diff options
226 files changed, 2169 insertions, 1074 deletions
@@ -64,6 +64,9 @@ Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com> Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com> Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com> Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> +Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com> +Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com> +Dmitry Safonov <0x7f454c46@gmail.com> <dima@arista.com> Domen Puncer <domen@coderock.org> Douglas Gilbert <dougg@torque.net> Ed L. Cashin <ecashin@coraid.com> @@ -160,6 +163,8 @@ Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.co Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com> Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting> Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com> +Maxime Ripard <mripard@kernel.org> <maxime.ripard@bootlin.com> +Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com> Mayuresh Janorkar <mayur@ti.com> Michael Buesch <m@bues.ch> Michel Dänzer <michel@tungstengraphics.com> diff --git a/Documentation/devicetree/bindings/net/dsa/ksz.txt b/Documentation/devicetree/bindings/net/dsa/ksz.txt index 5e8429b6f9ca..95e91e84151c 100644 --- a/Documentation/devicetree/bindings/net/dsa/ksz.txt +++ b/Documentation/devicetree/bindings/net/dsa/ksz.txt @@ -15,6 +15,7 @@ Required properties: - "microchip,ksz8565" - "microchip,ksz9893" - "microchip,ksz9563" + - "microchip,ksz8563" Optional properties: diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt index 63c73fafe26d..0b61a90f1592 100644 --- a/Documentation/devicetree/bindings/net/macb.txt +++ b/Documentation/devicetree/bindings/net/macb.txt @@ -15,10 +15,10 @@ Required properties: Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs. Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC. Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC. - Use "sifive,fu540-macb" for SiFive FU540-C000 SoC. + Use "sifive,fu540-c000-gem" for SiFive FU540-C000 SoC. Or the generic form: "cdns,emac". - reg: Address and length of the register set for the device - For "sifive,fu540-macb", second range is required to specify the + For "sifive,fu540-c000-gem", second range is required to specify the address and length of the registers for GEMGXL Management block. - interrupts: Should contain macb interrupt - phy-mode: See ethernet.txt file in the same directory. diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst new file mode 100644 index 000000000000..d37cbc502936 --- /dev/null +++ b/Documentation/process/embargoed-hardware-issues.rst @@ -0,0 +1,279 @@ +Embargoed hardware issues +========================= + +Scope +----- + +Hardware issues which result in security problems are a different category +of security bugs than pure software bugs which only affect the Linux +kernel. + +Hardware issues like Meltdown, Spectre, L1TF etc. must be treated +differently because they usually affect all Operating Systems ("OS") and +therefore need coordination across different OS vendors, distributions, +hardware vendors and other parties. For some of the issues, software +mitigations can depend on microcode or firmware updates, which need further +coordination. + +.. _Contact: + +Contact +------- + +The Linux kernel hardware security team is separate from the regular Linux +kernel security team. + +The team only handles the coordination of embargoed hardware security +issues. Reports of pure software security bugs in the Linux kernel are not +handled by this team and the reporter will be guided to contact the regular +Linux kernel security team (:ref:`Documentation/admin-guide/ +<securitybugs>`) instead. + +The team can be contacted by email at <hardware-security@kernel.org>. This +is a private list of security officers who will help you to coordinate an +issue according to our documented process. + +The list is encrypted and email to the list can be sent by either PGP or +S/MIME encrypted and must be signed with the reporter's PGP key or S/MIME +certificate. The list's PGP key and S/MIME certificate are available from +https://www.kernel.org/.... + +While hardware security issues are often handled by the affected hardware +vendor, we welcome contact from researchers or individuals who have +identified a potential hardware flaw. + +Hardware security officers +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The current team of hardware security officers: + + - Linus Torvalds (Linux Foundation Fellow) + - Greg Kroah-Hartman (Linux Foundation Fellow) + - Thomas Gleixner (Linux Foundation Fellow) + +Operation of mailing-lists +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The encrypted mailing-lists which are used in our process are hosted on +Linux Foundation's IT infrastructure. By providing this service Linux +Foundation's director of IT Infrastructure security technically has the +ability to access the embargoed information, but is obliged to +confidentiality by his employment contract. Linux Foundation's director of +IT Infrastructure security is also responsible for the kernel.org +infrastructure. + +The Linux Foundation's current director of IT Infrastructure security is +Konstantin Ryabitsev. + + +Non-disclosure agreements +------------------------- + +The Linux kernel hardware security team is not a formal body and therefore +unable to enter into any non-disclosure agreements. The kernel community +is aware of the sensitive nature of such issues and offers a Memorandum of +Understanding instead. + + +Memorandum of Understanding +--------------------------- + +The Linux kernel community has a deep understanding of the requirement to +keep hardware security issues under embargo for coordination between +different OS vendors, distributors, hardware vendors and other parties. + +The Linux kernel community has successfully handled hardware security +issues in the past and has the necessary mechanisms in place to allow +community compliant development under embargo restrictions. + +The Linux kernel community has a dedicated hardware security team for +initial contact, which oversees the process of handling such issues under +embargo rules. + +The hardware security team identifies the developers (domain experts) who +will form the initial response team for a particular issue. The initial +response team can bring in further developers (domain experts) to address +the issue in the best technical way. + +All involved developers pledge to adhere to the embargo rules and to keep +the received information confidential. Violation of the pledge will lead to +immediate exclusion from the current issue and removal from all related +mailing-lists. In addition, the hardware security team will also exclude +the offender from future issues. The impact of this consequence is a highly +effective deterrent in our community. In case a violation happens the +hardware security team will inform the involved parties immediately. If you +or anyone becomes aware of a potential violation, please report it +immediately to the Hardware security officers. + + +Process +^^^^^^^ + +Due to the globally distributed nature of Linux kernel development, +face-to-face meetings are almost impossible to address hardware security +issues. Phone conferences are hard to coordinate due to time zones and +other factors and should be only used when absolutely necessary. Encrypted +email has been proven to be the most effective and secure communication +method for these types of issues. + +Start of Disclosure +""""""""""""""""""" + +Disclosure starts by contacting the Linux kernel hardware security team by +email. This initial contact should contain a description of the problem and +a list of any known affected hardware. If your organization builds or +distributes the affected hardware, we encourage you to also consider what +other hardware could be affected. + +The hardware security team will provide an incident-specific encrypted +mailing-list which will be used for initial discussion with the reporter, +further disclosure and coordination. + +The hardware security team will provide the disclosing party a list of +developers (domain experts) who should be informed initially about the +issue after confirming with the developers that they will adhere to this +Memorandum of Understanding and the documented process. These developers +form the initial response team and will be responsible for handling the +issue after initial contact. The hardware security team is supporting the +response team, but is not necessarily involved in the mitigation +development process. + +While individual developers might be covered by a non-disclosure agreement +via their employer, they cannot enter individual non-disclosure agreements +in their role as Linux kernel developers. They will, however, agree to +adhere to this documented process and the Memorandum of Understanding. + + +Disclosure +"""""""""" + +The disclosing party provides detailed information to the initial response +team via the specific encrypted mailing-list. + +From our experience the technical documentation of these issues is usually +a sufficient starting point and further technical clarification is best +done via email. + +Mitigation development +"""""""""""""""""""""" + +The initial response team sets up an encrypted mailing-list or repurposes +an existing one if appropriate. The disclosing party should provide a list +of contacts for all other parties who have already been, or should be +informed about the issue. The response team contacts these parties so they +can name experts who should be subscribed to the mailing-list. + +Using a mailing-list is close to the normal Linux development process and +has been successfully used in developing mitigations for various hardware +security issues in the past. + +The mailing-list operates in the same way as normal Linux development. +Patches are posted, discussed and reviewed and if agreed on applied to a +non-public git repository which is only accessible to the participating +developers via a secure connection. The repository contains the main +development branch against the mainline kernel and backport branches for +stable kernel versions as necessary. + +The initial response team will identify further experts from the Linux +kernel developer community as needed and inform the disclosing party about +their participation. Bringing in experts can happen at any time of the +development process and often needs to be handled in a timely manner. + +Coordinated release +""""""""""""""""""" + +The involved parties will negotiate the date and time where the embargo +ends. At that point the prepared mitigations are integrated into the +relevant kernel trees and published. + +While we understand that hardware security issues need coordinated embargo +time, the embargo time should be constrained to the minimum time which is +required for all involved parties to develop, test and prepare the +mitigations. Extending embargo time artificially to meet conference talk +dates or other non-technical reasons is creating more work and burden for +the involved developers and response teams as the patches need to be kept +up to date in order to follow the ongoing upstream kernel development, +which might create conflicting changes. + +CVE assignment +"""""""""""""" + +Neither the hardware security team nor the initial response team assign +CVEs, nor are CVEs required for the development process. If CVEs are +provided by the disclosing party they can be used for documentation +purposes. + +Process ambassadors +------------------- + +For assistance with this process we have established ambassadors in various +organizations, who can answer questions about or provide guidance on the +reporting process and further handling. Ambassadors are not involved in the +disclosure of a particular issue, unless requested by a response team or by +an involved disclosed party. The current ambassadors list: + + ============= ======================================================== + ARM + AMD + IBM + Intel + Qualcomm + + Microsoft + VMware + XEN + + Canonical Tyler Hicks <tyhicks@canonical.com> + Debian Ben Hutchings <ben@decadent.org.uk> + Oracle Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> + Red Hat Josh Poimboeuf <jpoimboe@redhat.com> + SUSE Jiri Kosina <jkosina@suse.cz> + + Amazon + Google + ============== ======================================================== + +If you want your organization to be added to the ambassadors list, please +contact the hardware security team. The nominated ambassador has to +understand and support our process fully and is ideally well connected in +the Linux kernel community. + +Encrypted mailing-lists +----------------------- + +We use encrypted mailing-lists for communication. The operating principle +of these lists is that email sent to the list is encrypted either with the +list's PGP key or with the list's S/MIME certificate. The mailing-list +software decrypts the email and re-encrypts it individually for each +subscriber with the subscriber's PGP key or S/MIME certificate. Details +about the mailing-list software and the setup which is used to ensure the +security of the lists and protection of the data can be found here: +https://www.kernel.org/.... + +List keys +^^^^^^^^^ + +For initial contact see :ref:`Contact`. For incident specific mailing-lists +the key and S/MIME certificate are conveyed to the subscribers by email +sent from the specific list. + +Subscription to incident specific lists +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Subscription is handled by the response teams. Disclosed parties who want +to participate in the communication send a list of potential subscribers to +the response team so the response team can validate subscription requests. + +Each subscriber needs to send a subscription request to the response team +by email. The email must be signed with the subscriber's PGP key or S/MIME +certificate. If a PGP key is used, it must be available from a public key +server and is ideally connected to the Linux kernel's PGP web of trust. See +also: https://www.kernel.org/signature.html. + +The response team verifies that the subscriber request is valid and adds +the subscriber to the list. After subscription the subscriber will receive +email from the mailing-list which is signed either with the list's PGP key +or the list's S/MIME certificate. The subscriber's email client can extract +the PGP key or the S/MIME certificate from the signature so the subscriber +can send encrypted email to the list. + diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst index 878ebfda7eef..e2c9ffc682c5 100644 --- a/Documentation/process/index.rst +++ b/Documentation/process/index.rst @@ -45,6 +45,7 @@ Other guides to the community that are of interest to most developers are: submit-checklist kernel-docs deprecated + embargoed-hardware-issues These are some overall technical guides that have been put here for now for lack of a better place. diff --git a/MAINTAINERS b/MAINTAINERS index 68b48de8765b..a081c477d1d1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -683,7 +683,7 @@ S: Maintained F: drivers/crypto/sunxi-ss/ ALLWINNER VPU DRIVER -M: Maxime Ripard <maxime.ripard@bootlin.com> +M: Maxime Ripard <mripard@kernel.org> M: Paul Kocialkowski <paul.kocialkowski@bootlin.com> L: linux-media@vger.kernel.org S: Maintained @@ -1416,7 +1416,7 @@ S: Maintained F: drivers/clk/sunxi/ ARM/Allwinner sunXi SoC support -M: Maxime Ripard <maxime.ripard@bootlin.com> +M: Maxime Ripard <mripard@kernel.org> M: Chen-Yu Tsai <wens@csie.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -3586,7 +3586,7 @@ F: Documentation/filesystems/caching/cachefiles.txt F: fs/cachefiles/ CADENCE MIPI-CSI2 BRIDGES -M: Maxime Ripard <maxime.ripard@bootlin.com> +M: Maxime Ripard <mripard@kernel.org> L: linux-media@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/media/cdns,*.txt @@ -5309,7 +5309,7 @@ F: include/linux/vga* DRM DRIVERS AND MISC GPU PATCHES M: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> -M: Maxime Ripard <maxime.ripard@bootlin.com> +M: Maxime Ripard <mripard@kernel.org> M: Sean Paul <sean@poorly.run> W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html S: Maintained @@ -5322,7 +5322,7 @@ F: include/uapi/drm/drm* F: include/linux/vga* DRM DRIVERS FOR ALLWINNER A10 -M: Maxime Ripard <maxime.ripard@bootlin.com> +M: Maxime Ripard <mripard@kernel.org> L: dri-devel@lists.freedesktop.org S: Supported F: drivers/gpu/drm/sun4i/ @@ -7528,7 +7528,7 @@ I2C MV64XXX MARVELL AND ALLWINNER DRIVER M: Gregory CLEMENT <gregory.clement@bootlin.com> L: linux-i2c@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt +F: Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml F: drivers/i2c/busses/i2c-mv64xxx.c I2C OVER PARALLEL PORT @@ -9244,6 +9244,18 @@ F: include/linux/nd.h F: include/linux/libnvdimm.h F: include/uapi/linux/ndctl.h +LICENSES and SPDX stuff +M: Thomas Gleixner <tglx@linutronix.de> +M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +L: linux-spdx@vger.kernel.org +S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/spdx.git +F: COPYING +F: Documentation/process/license-rules.rst +F: LICENSES/ +F: scripts/spdxcheck-test.sh +F: scripts/spdxcheck.py + LIGHTNVM PLATFORM SUPPORT M: Matias Bjorling <mb@lightnvm.io> W: http://github/OpenChannelSSD @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 3 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7 NAME = Bobtail Squid # *DOCUMENTATION* diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index 445e4d702f43..dc05a63516f5 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -572,6 +572,7 @@ static unsigned long read_pointer(const u8 **pLoc, const void *end, #else BUILD_BUG_ON(sizeof(u32) != sizeof(value)); #endif + /* Fall through */ case DW_EH_PE_native: if (end < (const void *)(ptr.pul + 1)) return 0; diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi index ced1a19d5f89..46849d6ecb3e 100644 --- a/arch/arm/boot/dts/am33xx-l4.dtsi +++ b/arch/arm/boot/dts/am33xx-l4.dtsi @@ -185,7 +185,7 @@ uart0: serial@0 { compatible = "ti,am3352-uart", "ti,omap3-uart"; clock-frequency = <48000000>; - reg = <0x0 0x2000>; + reg = <0x0 0x1000>; interrupts = <72>; status = "disabled"; dmas = <&edma 26 0>, <&edma 27 0>; @@ -934,7 +934,7 @@ uart1: serial@0 { compatible = "ti,am3352-uart", "ti,omap3-uart"; clock-frequency = <48000000>; - reg = <0x0 0x2000>; + reg = <0x0 0x1000>; interrupts = <73>; status = "disabled"; dmas = <&edma 28 0>, <&edma 29 0>; @@ -966,7 +966,7 @@ uart2: serial@0 { compatible = "ti,am3352-uart", "ti,omap3-uart"; clock-frequency = <48000000>; - reg = <0x0 0x2000>; + reg = <0x0 0x1000>; interrupts = <74>; status = "disabled"; dmas = <&edma 30 0>, <&edma 31 0>; @@ -1614,7 +1614,7 @@ uart3: serial@0 { compatible = "ti,am3352-uart", "ti,omap3-uart"; clock-frequency = <48000000>; - reg = <0x0 0x2000>; + reg = <0x0 0x1000>; interrupts = <44>; status = "disabled"; }; @@ -1644,7 +1644,7 @@ uart4: serial@0 { compatible = "ti,am3352-uart", "ti,omap3-uart"; clock-frequency = <48000000>; - reg = <0x0 0x2000>; + reg = <0x0 0x1000>; interrupts = <45>; status = "disabled"; }; @@ -1674,7 +1674,7 @@ uart5: serial@0 { compatible = "ti,am3352-uart", "ti,omap3-uart"; clock-frequency = <48000000>; - reg = <0x0 0x2000>; + reg = <0x0 0x1000>; interrupts = <46>; status = "disabled"; }; @@ -1758,6 +1758,8 @@ target-module@cc000 { /* 0x481cc000, ap 60 46.0 */ compatible = "ti,sysc-omap4", "ti,sysc"; + reg = <0xcc020 0x4>; + reg-names = "rev"; ti,hwmods = "d_can0"; /* Domains (P, C): per_pwrdm, l4ls_clkdm */ clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>, @@ -1780,6 +1782,8 @@ target-module@d0000 { /* 0x481d0000, ap 62 42.0 */ compatible = "ti,sysc-omap4", "ti,sysc"; + reg = <0xd0020 0x4>; + reg-names = "rev"; ti,hwmods = "d_can1"; /* Domains (P, C): per_pwrdm, l4ls_clkdm */ clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>, diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index e5c2f71a7c77..fb6b8aa12cc5 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -234,13 +234,33 @@ interrupt-names = "edma3_tcerrint"; }; - mmc3: mmc@47810000 { - compatible = "ti,omap4-hsmmc"; + target-module@47810000 { + compatible = "ti,sysc-omap2", "ti,sysc"; ti,hwmods = "mmc3"; - ti,needs-special-reset; - interrupts = <29>; - reg = <0x47810000 0x1000>; - status = "disabled"; + reg = <0x478102fc 0x4>, + <0x47810110 0x4>, + <0x47810114 0x4>; + reg-names = "rev", "sysc", "syss"; + ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY | + SYSC_OMAP2_ENAWAKEUP | + SYSC_OMAP2_SOFTRESET | + SYSC_OMAP2_AUTOIDLE)>; + ti,sysc-sidle = <SYSC_IDLE_FORCE>, + <SYSC_IDLE_NO>, + <SYSC_IDLE_SMART>; + ti,syss-mask = <1>; + clocks = <&l3s_clkctrl AM3_L3S_MMC3_CLKCTRL 0>; + clock-names = "fck"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x47810000 0x1000>; + + mmc3: mmc@0 { + compatible = "ti,omap4-hsmmc"; + ti,needs-special-reset; + interrupts = <29>; + reg = <0x0 0x1000>; + }; }; usb: usb@47400000 { diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index 55aff4db9c7c..848e2a8884e2 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@ -228,13 +228,33 @@ interrupt-names = "edma3_tcerrint"; }; - mmc3: mmc@47810000 { - compatible = "ti,omap4-hsmmc"; - reg = <0x47810000 0x1000>; + target-module@47810000 { + compatible = "ti,sysc-omap2", "ti,sysc"; ti,hwmods = "mmc3"; - ti,needs-special-reset; - interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>; - status = "disabled"; + reg = <0x478102fc 0x4>, + <0x47810110 0x4>, + <0x47810114 0x4>; + reg-names = "rev", "sysc", "syss"; + ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY | + SYSC_OMAP2_ENAWAKEUP | + SYSC_OMAP2_SOFTRESET | + SYSC_OMAP2_AUTOIDLE)>; + ti,sysc-sidle = <SYSC_IDLE_FORCE>, + <SYSC_IDLE_NO>, + <SYSC_IDLE_SMART>; + ti,syss-mask = <1>; + clocks = <&l3s_clkctrl AM4_L3S_MMC3_CLKCTRL 0>; + clock-names = "fck"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x47810000 0x1000>; + + mmc3: mmc@0 { + compatible = "ti,omap4-hsmmc"; + ti,needs-special-reset; + interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>; + reg = <0x0 0x1000>; + }; }; sham: sham@53100000 { diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi index 989cb60b9029..04bee4ff9dcb 100644 --- a/arch/arm/boot/dts/am437x-l4.dtsi +++ b/arch/arm/boot/dts/am437x-l4.dtsi @@ -1574,6 +1574,8 @@ target-module@cc000 { /* 0x481cc000, ap 50 46.0 */ compatible = "ti,sysc-omap4", "ti,sysc"; + reg = <0xcc020 0x4>; + reg-names = "rev"; ti,hwmods = "d_can0"; /* Domains (P, C): per_pwrdm, l4ls_clkdm */ clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>; @@ -1593,6 +1595,8 @@ target-module@d0000 { /* 0x481d0000, ap 52 3a.0 */ compatible = "ti,sysc-omap4", "ti,sysc"; + reg = <0xd0020 0x4>; + reg-names = "rev"; ti,hwmods = "d_can1"; /* Domains (P, C): per_pwrdm, l4ls_clkdm */ clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>; diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts index 1d5e99964bbf..0aaacea1d887 100644 --- a/arch/arm/boot/dts/am571x-idk.dts +++ b/arch/arm/boot/dts/am571x-idk.dts @@ -175,14 +175,9 @@ }; &mmc1 { - pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; + pinctrl-names = "default", "hs"; pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; pinctrl-1 = <&mmc1_pins_hs>; - pinctrl-2 = <&mmc1_pins_sdr12>; - pinctrl-3 = <&mmc1_pins_sdr25>; - pinctrl-4 = <&mmc1_pins_sdr50>; - pinctrl-5 = <&mmc1_pins_ddr50_rev20 &mmc1_iodelay_ddr50_conf>; - pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>; }; &mmc2 { diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts index c65d7f6d3b5a..ea1c119feaa5 100644 --- a/arch/arm/boot/dts/am572x-idk.dts +++ b/arch/arm/boot/dts/am572x-idk.dts @@ -16,14 +16,9 @@ }; &mmc1 { - pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; + pinctrl-names = "default", "hs"; pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; pinctrl-1 = <&mmc1_pins_hs>; - pinctrl-2 = <&mmc1_pins_sdr12>; - pinctrl-3 = <&mmc1_pins_sdr25>; - pinctrl-4 = <&mmc1_pins_sdr50>; - pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>; - pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>; }; &mmc2 { diff --git a/arch/arm/boot/dts/am574x-idk.dts b/arch/arm/boot/dts/am574x-idk.dts index dc5141c35610..7935d70874ce 100644 --- a/arch/arm/boot/dts/am574x-idk.dts +++ b/arch/arm/boot/dts/am574x-idk.dts @@ -24,14 +24,9 @@ }; &mmc1 { - pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; + pinctrl-names = "default", "hs"; pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; pinctrl-1 = <&mmc1_pins_hs>; - pinctrl-2 = <&mmc1_pins_default>; - pinctrl-3 = <&mmc1_pins_hs>; - pinctrl-4 = <&mmc1_pins_sdr50>; - pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_conf>; - pinctrl-6 = <&mmc1_pins_ddr50 &mmc1_iodelay_sdr104_conf>; }; &mmc2 { diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi index d02f5fa61e5f..bc76f1705c0f 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi +++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi @@ -379,7 +379,7 @@ }; }; -&gpio7 { +&gpio7_target { ti,no-reset-on-init; ti,no-idle-on-init; }; @@ -430,6 +430,7 @@ bus-width = <4>; cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */ + no-1-8-v; }; &mmc2 { diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts index a374b5cd6db0..7b113b52c3fb 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts @@ -16,14 +16,9 @@ }; &mmc1 { - pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; + pinctrl-names = "default", "hs"; pinctrl-0 = <&mmc1_pins_default>; pinctrl-1 = <&mmc1_pins_hs>; - pinctrl-2 = <&mmc1_pins_sdr12>; - pinctrl-3 = <&mmc1_pins_sdr25>; - pinctrl-4 = <&mmc1_pins_sdr50>; - pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev11_conf>; - pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev11_conf>; vmmc-supply = <&vdd_3v3>; vqmmc-supply = <&ldo1_reg>; }; diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts index 4badd2144db9..30c500b15b21 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts @@ -16,14 +16,9 @@ }; &mmc1 { - pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; + pinctrl-names = "default", "hs"; pinctrl-0 = <&mmc1_pins_default>; pinctrl-1 = <&mmc1_pins_hs>; - pinctrl-2 = <&mmc1_pins_sdr12>; - pinctrl-3 = <&mmc1_pins_sdr25>; - pinctrl-4 = <&mmc1_pins_sdr50>; - pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>; - pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>; vmmc-supply = <&vdd_3v3>; vqmmc-supply = <&ldo1_reg>; }; diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index 714e971b912a..de7f85efaa51 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts @@ -498,7 +498,7 @@ phy-supply = <&ldousb_reg>; }; -&gpio7 { +&gpio7_target { ti,no-reset-on-init; ti,no-idle-on-init; }; diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi index 23faedec08ab..21e5914fdd62 100644 --- a/arch/arm/boot/dts/dra7-l4.dtsi +++ b/arch/arm/boot/dts/dra7-l4.dtsi @@ -1261,7 +1261,7 @@ }; }; - target-module@51000 { /* 0x48051000, ap 45 2e.0 */ + gpio7_target: target-module@51000 { /* 0x48051000, ap 45 2e.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; ti,hwmods = "gpio7"; reg = <0x51000 0x4>, @@ -3025,7 +3025,7 @@ target-module@80000 { /* 0x48480000, ap 31 16.0 */ compatible = "ti,sysc-omap4", "ti,sysc"; - reg = <0x80000 0x4>; + reg = <0x80020 0x4>; reg-names = "rev"; clocks = <&l4per2_clkctrl DRA7_L4PER2_DCAN2_CLKCTRL 0>; clock-names = "fck"; @@ -4577,7 +4577,7 @@ target-module@c000 { /* 0x4ae3c000, ap 30 04.0 */ compatible = "ti,sysc-omap4", "ti,sysc"; - reg = <0xc000 0x4>; + reg = <0xc020 0x4>; reg-names = "rev"; clocks = <&wkupaon_clkctrl DRA7_WKUPAON_DCAN1_CLKCTRL 0>; clock-names = "fck"; diff --git a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi index 28ebb4eb884a..214b9e6de2c3 100644 --- a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi +++ b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi @@ -32,7 +32,7 @@ * * Datamanual Revisions: * - * AM572x Silicon Revision 2.0: SPRS953B, Revised November 2016 + * AM572x Silicon Revision 2.0: SPRS953F, Revised May 2019 * AM572x Silicon Revision 1.1: SPRS915R, Revised November 2016 * */ @@ -229,45 +229,45 @@ mmc3_pins_default: mmc3_pins_default { pinctrl-single,pins = < - DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ - DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ - DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ - DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ - DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ - DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ + DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ + DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ + DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ + DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ + DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ + DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ >; }; mmc3_pins_hs: mmc3_pins_hs { pinctrl-single,pins = < - DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ - DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ - DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ - DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ - DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ - DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ + DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ + DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ + DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ + DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ + DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ + DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ >; }; mmc3_pins_sdr12: mmc3_pins_sdr12 { pinctrl-single,pins = < - DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ - DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ - DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ - DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ - DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ - DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ + DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ + DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ + DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ + DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ + DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ + DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ >; }; mmc3_pins_sdr25: mmc3_pins_sdr25 { pinctrl-single,pins = < - DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ - DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ - DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ - DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ - DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ - DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ + DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ + DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ + DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ + DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ + DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ + DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ >; }; diff --git a/arch/arm/boot/dts/vf610-bk4.dts b/arch/arm/boot/dts/vf610-bk4.dts index 3fa0cbe456db..0f3870d3b099 100644 --- a/arch/arm/boot/dts/vf610-bk4.dts +++ b/arch/arm/boot/dts/vf610-bk4.dts @@ -246,13 +246,13 @@ reg = <0>; }; - n25q128a13_2: flash@1 { + n25q128a13_2: flash@2 { compatible = "n25q128a13", "jedec,spi-nor"; #address-cells = <1>; #size-cells = <1>; spi-max-frequency = <66000000>; spi-rx-bus-width = <2>; - reg = <1>; + reg = <2>; }; }; diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S index 1d5210eb4776..582925238d65 100644 --- a/arch/arm/lib/backtrace.S +++ b/arch/arm/lib/backtrace.S @@ -66,7 +66,7 @@ for_each_frame: tst frame, mask @ Check for address exceptions 1003: ldr r2, [sv_pc, #-4] @ if stmfd sp!, {args} exists, ldr r3, .Ldsi+4 @ adjust saved 'pc' back one - teq r3, r2, lsr #10 @ instruction + teq r3, r2, lsr #11 @ instruction subne r0, sv_pc, #4 @ allow for mov subeq r0, sv_pc, #8 @ allow for mov + stmia diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S index 81159af44862..14a6c3eb3298 100644 --- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S +++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S @@ -126,6 +126,8 @@ restart: orr r11, r11, r13 @ mask all requested interrupts str r11, [r12, #OMAP1510_GPIO_INT_MASK] + str r13, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack all requested interrupts + ands r10, r13, #KEYBRD_CLK_MASK @ extract keyboard status - set? beq hksw @ no - try next source @@ -133,7 +135,6 @@ restart: @@@@@@@@@@@@@@@@@@@@@@ @ Keyboard clock FIQ mode interrupt handler @ r10 now contains KEYBRD_CLK_MASK, use it - str r10, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack the interrupt bic r11, r11, r10 @ unmask it str r11, [r12, #OMAP1510_GPIO_INT_MASK] diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c index 43899fa56674..0254eb9cf8c6 100644 --- a/arch/arm/mach-omap1/ams-delta-fiq.c +++ b/arch/arm/mach-omap1/ams-delta-fiq.c @@ -70,9 +70,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id) * interrupts default to since commit 80ac93c27441 * requires interrupt already acked and unmasked. */ - if (irq_chip->irq_ack) - irq_chip->irq_ack(d); - if (irq_chip->irq_unmask) + if (!WARN_ON_ONCE(!irq_chip->irq_unmask)) irq_chip->irq_unmask(d); } for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++) diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index f9c02f9f1c92..5c3845730dbf 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -127,6 +127,9 @@ static int __init omap4_sram_init(void) struct device_node *np; struct gen_pool *sram_pool; + if (!soc_is_omap44xx() && !soc_is_omap54xx()) + return 0; + np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu"); if (!np) pr_warn("%s:Unable to allocate sram needed to handle errata I688\n", diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index 4a5b4aee6615..1ec21e9ba1e9 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -379,7 +379,8 @@ static struct omap_hwmod dra7xx_dcan2_hwmod = { static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = { .rev_offs = 0x0, .sysc_offs = 0x4, - .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET, + .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | + SYSC_HAS_RESET_STATUS, .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type2, }; diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 16d373d587c4..b4be3baa83d4 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -175,6 +175,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low, #ifdef CONFIG_HAVE_ARCH_PFN_VALID int pfn_valid(unsigned long pfn) { + phys_addr_t addr = __pfn_to_phys(pfn); + + if (__phys_to_pfn(addr) != pfn) + return 0; + return memblock_is_map_memory(__pfn_to_phys(pfn)); } EXPORT_SYMBOL(pfn_valid); @@ -628,7 +633,8 @@ static void update_sections_early(struct section_perm perms[], int n) if (t->flags & PF_KTHREAD) continue; for_each_thread(t, s) - set_section_perms(perms, n, true, s->mm); + if (s->mm) + set_section_perms(perms, n, true, s->mm); } set_section_perms(perms, n, true, current->active_mm); set_section_perms(perms, n, true, &init_mm); diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts index c7a87368850b..12aa7eaeaf68 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts @@ -339,6 +339,12 @@ pinctrl-names = "default"; }; +&ir { + status = "okay"; + pinctrl-0 = <&remote_input_ao_pins>; + pinctrl-names = "default"; +}; + &pwm_ef { status = "okay"; pinctrl-0 = <&pwm_e_pins>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi index f8d43e3dcf20..1785552d450c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi @@ -2386,6 +2386,7 @@ clocks = <&clkc CLKID_USB1_DDR_BRIDGE>; clock-names = "ddr"; phys = <&usb2_phy1>; + phy-names = "usb2-phy"; dr_mode = "peripheral"; g-rx-fifo-size = <192>; g-np-tx-fifo-size = <128>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts index 81780ffcc7f0..4e916e1f71f7 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts @@ -53,6 +53,7 @@ gpio = <&gpio_ao GPIOAO_8 GPIO_ACTIVE_HIGH>; enable-active-high; + regulator-always-on; }; tf_io: gpio-regulator-tf_io { diff --git a/arch/nds32/kernel/signal.c b/arch/nds32/kernel/signal.c index fe61513982b4..330b19fcd990 100644 --- a/arch/nds32/kernel/signal.c +++ b/arch/nds32/kernel/signal.c @@ -316,6 +316,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) regs->uregs[0] = -EINTR; break; } + /* Else, fall through */ case -ERESTARTNOINTR: regs->uregs[0] = regs->orig_r0; regs->ipc -= 4; @@ -360,6 +361,7 @@ static void do_signal(struct pt_regs *regs) switch (regs->uregs[0]) { case -ERESTART_RESTARTBLOCK: regs->uregs[15] = __NR_restart_syscall; + /* Fall through */ case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index e99a14798ab0..c4b606fe73eb 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -660,8 +660,10 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, } tce = be64_to_cpu(tce); - if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) - return H_PARAMETER; + if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) { + ret = H_PARAMETER; + goto unlock_exit; + } list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index f50bbeedfc66..b4f20f13b860 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -556,8 +556,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); ua = 0; - if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) - return H_PARAMETER; + if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) { + ret = H_PARAMETER; + goto unlock_exit; + } list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h index 9c66033c3a54..161f28d04a07 100644 --- a/arch/riscv/include/asm/fixmap.h +++ b/arch/riscv/include/asm/fixmap.h @@ -30,10 +30,6 @@ enum fixed_addresses { __end_of_fixed_addresses }; -#define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE) -#define FIXADDR_TOP (VMALLOC_START) -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) - #define FIXMAP_PAGE_IO PAGE_KERNEL #define __early_set_fixmap __set_fixmap diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index a364aba23d55..c24a083b3e12 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -420,14 +420,22 @@ static inline void pgtable_cache_init(void) #define VMALLOC_END (PAGE_OFFSET - 1) #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) +#define FIXADDR_TOP VMALLOC_START +#ifdef CONFIG_64BIT +#define FIXADDR_SIZE PMD_SIZE +#else +#define FIXADDR_SIZE PGDIR_SIZE +#endif +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + /* - * Task size is 0x4000000000 for RV64 or 0xb800000 for RV32. + * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. * Note that PGDIR_SIZE must evenly divide TASK_SIZE. */ #ifdef CONFIG_64BIT #define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2) #else -#define TASK_SIZE VMALLOC_START +#define TASK_SIZE FIXADDR_START #endif #include <asm-generic/pgtable.h> diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 56e748a7679f..94df0868804b 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -38,6 +38,7 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \ REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding) REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector) +REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member) REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4)) export REALMODE_CFLAGS diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c index 2faddeb0398a..c8862696a47b 100644 --- a/arch/x86/boot/compressed/pgtable_64.c +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -72,7 +72,7 @@ static unsigned long find_trampoline_placement(void) /* Find the first usable memory region under bios_start. */ for (i = boot_params->e820_entries - 1; i >= 0; i--) { - unsigned long new; + unsigned long new = bios_start; entry = &boot_params->e820_table[i]; diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 62f317c9113a..5b35b7ea5d72 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -661,10 +661,17 @@ fail: throttle = perf_event_overflow(event, &data, ®s); out: - if (throttle) + if (throttle) { perf_ibs_stop(event, 0); - else - perf_ibs_enable_event(perf_ibs, hwc, period >> 4); + } else { + period >>= 4; + + if ((ibs_caps & IBS_CAPS_RDWROPCNT) && + (*config & IBS_OP_CNT_CTL)) + period |= *config & IBS_OP_CUR_CNT_RAND; + + perf_ibs_enable_event(perf_ibs, hwc, period); + } perf_event_update_userpage(event); diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 648260b5f367..e4c2cb65ea50 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3572,6 +3572,11 @@ static u64 bdw_limit_period(struct perf_event *event, u64 left) return left; } +static u64 nhm_limit_period(struct perf_event *event, u64 left) +{ + return max(left, 32ULL); +} + PMU_FORMAT_ATTR(event, "config:0-7" ); PMU_FORMAT_ATTR(umask, "config:8-15" ); PMU_FORMAT_ATTR(edge, "config:18" ); @@ -4606,6 +4611,7 @@ __init int intel_pmu_init(void) x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; x86_pmu.enable_all = intel_pmu_nhm_enable_all; x86_pmu.extra_regs = intel_nehalem_extra_regs; + x86_pmu.limit_period = nhm_limit_period; mem_attr = nhm_mem_events_attrs; diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 287f1f7b2e52..c38a66661576 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -16,7 +16,6 @@ #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR #ifndef __ASSEMBLY__ -extern void mcount(void); extern atomic_t modifying_ftrace_code; extern void __fentry__(void); diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 1392d5e6e8d6..ee26e9215f18 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -252,16 +252,20 @@ struct pebs_lbr { #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) #define IBSCTL_LVT_OFFSET_MASK 0x0F -/* ibs fetch bits/masks */ +/* IBS fetch bits/masks */ #define IBS_FETCH_RAND_EN (1ULL<<57) #define IBS_FETCH_VAL (1ULL<<49) #define IBS_FETCH_ENABLE (1ULL<<48) #define IBS_FETCH_CNT 0xFFFF0000ULL #define IBS_FETCH_MAX_CNT 0x0000FFFFULL -/* ibs op bits/masks */ -/* lower 4 bits of the current count are ignored: */ -#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32) +/* + * IBS op bits/masks + * The lower 7 bits of the current count are random bits + * preloaded by hardware and ignored in software + */ +#define IBS_OP_CUR_CNT (0xFFF80ULL<<32) +#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32) #define IBS_OP_CNT_CTL (1ULL<<19) #define IBS_OP_VAL (1ULL<<18) #define IBS_OP_ENABLE (1ULL<<17) diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index aa5495d0f478..dba2828b779a 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1179,6 +1179,10 @@ void clear_local_APIC(void) apic_write(APIC_LVT0, v | APIC_LVT_MASKED); v = apic_read(APIC_LVT1); apic_write(APIC_LVT1, v | APIC_LVT_MASKED); + if (!x2apic_enabled()) { + v = apic_read(APIC_LDR) & ~APIC_LDR_MASK; + apic_write(APIC_LDR, v); + } if (maxlvt >= 4) { v = apic_read(APIC_LVTPC); apic_write(APIC_LVTPC, v | APIC_LVT_MASKED); diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index afee386ff711..caedd8d60d36 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c @@ -38,32 +38,12 @@ static int bigsmp_early_logical_apicid(int cpu) return early_per_cpu(x86_cpu_to_apicid, cpu); } -static inline unsigned long calculate_ldr(int cpu) -{ - unsigned long val, id; - - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - id = per_cpu(x86_bios_cpu_apicid, cpu); - val |= SET_APIC_LOGICAL_ID(id); - - return val; -} - /* - * Set up the logical destination ID. - * - * Intel recommends to set DFR, LDR and TPR before enabling - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel - * document number 292116). So here it goes... + * bigsmp enables physical destination mode + * and doesn't use LDR and DFR */ static void bigsmp_init_apic_ldr(void) { - unsigned long val; - int cpu = smp_processor_id(); - - apic_write(APIC_DFR, APIC_DFR_FLAT); - val = calculate_ldr(cpu); - apic_write(APIC_LDR, val); } static void bigsmp_setup_apic_routing(void) diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index c7bb6c69f21c..d6af97fd170a 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2438,7 +2438,13 @@ unsigned int arch_dynirq_lower_bound(unsigned int from) * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use * gsi_top if ioapic_dynirq_base hasn't been initialized yet. */ - return ioapic_initialized ? ioapic_dynirq_base : gsi_top; + if (!ioapic_initialized) + return gsi_top; + /* + * For DT enabled machines ioapic_dynirq_base is irrelevant and not + * updated. So simply return @from if ioapic_dynirq_base == 0. + */ + return ioapic_dynirq_base ? : from; } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index d8359ebeea70..8cd745ef8c7b 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -508,9 +508,12 @@ struct uprobe_xol_ops { void (*abort)(struct arch_uprobe *, struct pt_regs *); }; -static inline int sizeof_long(void) +static inline int sizeof_long(struct pt_regs *regs) { - return in_ia32_syscall() ? 4 : 8; + /* + * Check registers for mode as in_xxx_syscall() does not apply here. + */ + return user_64bit_mode(regs) ? 8 : 4; } static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) @@ -521,9 +524,9 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) static int emulate_push_stack(struct pt_regs *regs, unsigned long val) { - unsigned long new_sp = regs->sp - sizeof_long(); + unsigned long new_sp = regs->sp - sizeof_long(regs); - if (copy_to_user((void __user *)new_sp, &val, sizeof_long())) + if (copy_to_user((void __user *)new_sp, &val, sizeof_long(regs))) return -EFAULT; regs->sp = new_sp; @@ -556,7 +559,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs long correction = utask->vaddr - utask->xol_vaddr; regs->ip += correction; } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) { - regs->sp += sizeof_long(); /* Pop incorrect return address */ + regs->sp += sizeof_long(regs); /* Pop incorrect return address */ if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen)) return -ERESTART; } @@ -675,7 +678,7 @@ static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) * "call" insn was executed out-of-line. Just restore ->sp and restart. * We could also restore ->ip and try to call branch_emulate_op() again. */ - regs->sp += sizeof_long(); + regs->sp += sizeof_long(regs); return -ERESTART; } @@ -1056,7 +1059,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs) { - int rasize = sizeof_long(), nleft; + int rasize = sizeof_long(regs), nleft; unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */ if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize)) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index c10a8b10b203..fff790a3f4ee 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1781,7 +1781,7 @@ int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args) int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries) { - uint16_t evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu); + uint16_t evmcs_ver = 0; struct kvm_cpuid_entry2 cpuid_entries[] = { { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS }, { .function = HYPERV_CPUID_INTERFACE }, @@ -1793,6 +1793,9 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, }; int i, nent = ARRAY_SIZE(cpuid_entries); + if (kvm_x86_ops->nested_get_evmcs_version) + evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu); + /* Skip NESTED_FEATURES if eVMCS is not supported */ if (!evmcs_ver) --nent; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index e3d3b2128f2b..e0368076a1ef 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -7128,12 +7128,6 @@ failed: return ret; } -static uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu) -{ - /* Not supported */ - return 0; -} - static int nested_enable_evmcs(struct kvm_vcpu *vcpu, uint16_t *vmcs_version) { @@ -7332,7 +7326,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .mem_enc_unreg_region = svm_unregister_enc_region, .nested_enable_evmcs = nested_enable_evmcs, - .nested_get_evmcs_version = nested_get_evmcs_version, + .nested_get_evmcs_version = NULL, .need_emulation_on_page_fault = svm_need_emulation_on_page_fault, }; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 42ed3faa6af8..c030c96fc81a 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7797,6 +7797,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .set_nested_state = NULL, .get_vmcs12_pages = NULL, .nested_enable_evmcs = NULL, + .nested_get_evmcs_version = NULL, .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault, }; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 93b0bd45ac73..290c3c3efb87 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6594,12 +6594,13 @@ restart: unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; - kvm_rip_write(vcpu, ctxt->eip); - if (r == EMULATE_DONE && ctxt->tf) - kvm_vcpu_do_singlestep(vcpu, &r); if (!ctxt->have_exception || - exception_type(ctxt->exception.vector) == EXCPT_TRAP) + exception_type(ctxt->exception.vector) == EXCPT_TRAP) { + kvm_rip_write(vcpu, ctxt->eip); + if (r == EMULATE_DONE && ctxt->tf) + kvm_vcpu_do_singlestep(vcpu, &r); __kvm_set_rflags(vcpu, ctxt->eflags); + } /* * For STI, interrupts are shadowed; so KVM_REQ_EVENT will diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 6a9a77a403c9..e14e95ea7338 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -516,7 +516,7 @@ static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val, */ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start, unsigned long pfn, unsigned long npg, - int warnlvl) + unsigned long lpsize, int warnlvl) { pgprotval_t forbidden, res; unsigned long end; @@ -535,9 +535,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start, check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX"); forbidden = res; - res = protect_kernel_text_ro(start, end); - check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO"); - forbidden |= res; + /* + * Special case to preserve a large page. If the change spawns the + * full large page mapping then there is no point to split it + * up. Happens with ftrace and is going to be removed once ftrace + * switched to text_poke(). + */ + if (lpsize != (npg * PAGE_SIZE) || (start & (lpsize - 1))) { + res = protect_kernel_text_ro(start, end); + check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO"); + forbidden |= res; + } /* Check the PFN directly */ res = protect_pci_bios(pfn, pfn + npg - 1); @@ -819,7 +827,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address, * extra conditional required here. */ chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages, - CPA_CONFLICT); + psize, CPA_CONFLICT); if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) { /* @@ -855,7 +863,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address, * protection requirement in the large page. */ new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages, - CPA_DETECT); + psize, CPA_DETECT); /* * If there is a conflict, split the large page. @@ -906,7 +914,8 @@ static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn, if (!cpa->force_static_prot) goto set; - prot = static_protections(ref_prot, address, pfn, npg, CPA_PROTECT); + /* Hand in lpsize = 0 to enforce the protection mechanism */ + prot = static_protections(ref_prot, address, pfn, npg, 0, CPA_PROTECT); if (pgprot_val(prot) == pgprot_val(ref_prot)) goto set; @@ -1503,7 +1512,8 @@ repeat: pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); cpa_inc_4k_install(); - new_prot = static_protections(new_prot, address, pfn, 1, + /* Hand in lpsize = 0 to enforce the protection mechanism */ + new_prot = static_protections(new_prot, address, pfn, 1, 0, CPA_PROTECT); new_prot = pgprot_clear_protnone_bits(new_prot); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3327192bb71f..c8fb886aebd4 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3038,6 +3038,17 @@ again: } return true; case RBD_OBJ_READ_PARENT: + /* + * The parent image is read only up to the overlap -- zero-fill + * from the overlap to the end of the request. + */ + if (!*result) { + u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req); + + if (obj_overlap < obj_req->ex.oe_len) + rbd_obj_zero_range(obj_req, obj_overlap, + obj_req->ex.oe_len - obj_overlap); + } return true; default: BUG(); diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c index 19d7b6ff2f17..20c957185af2 100644 --- a/drivers/bus/hisi_lpc.c +++ b/drivers/bus/hisi_lpc.c @@ -456,6 +456,17 @@ struct hisi_lpc_acpi_cell { size_t pdata_size; }; +static void hisi_lpc_acpi_remove(struct device *hostdev) +{ + struct acpi_device *adev = ACPI_COMPANION(hostdev); + struct acpi_device *child; + + device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev); + + list_for_each_entry(child, &adev->children, node) + acpi_device_clear_enumerated(child); +} + /* * hisi_lpc_acpi_probe - probe children for ACPI FW * @hostdev: LPC host device pointer @@ -555,8 +566,7 @@ static int hisi_lpc_acpi_probe(struct device *hostdev) return 0; fail: - device_for_each_child(hostdev, NULL, - hisi_lpc_acpi_remove_subdev); + hisi_lpc_acpi_remove(hostdev); return ret; } @@ -569,6 +579,10 @@ static int hisi_lpc_acpi_probe(struct device *dev) { return -ENODEV; } + +static void hisi_lpc_acpi_remove(struct device *hostdev) +{ +} #endif // CONFIG_ACPI /* @@ -606,24 +620,27 @@ static int hisi_lpc_probe(struct platform_device *pdev) range->fwnode = dev->fwnode; range->flags = LOGIC_PIO_INDIRECT; range->size = PIO_INDIRECT_SIZE; + range->hostdata = lpcdev; + range->ops = &hisi_lpc_ops; + lpcdev->io_host = range; ret = logic_pio_register_range(range); if (ret) { dev_err(dev, "register IO range failed (%d)!\n", ret); return ret; } - lpcdev->io_host = range; /* register the LPC host PIO resources */ if (acpi_device) ret = hisi_lpc_acpi_probe(dev); else ret = of_platform_populate(dev->of_node, NULL, NULL, dev); - if (ret) + if (ret) { + logic_pio_unregister_range(range); return ret; + } - lpcdev->io_host->hostdata = lpcdev; - lpcdev->io_host->ops = &hisi_lpc_ops; + dev_set_drvdata(dev, lpcdev); io_end = lpcdev->io_host->io_start + lpcdev->io_host->size; dev_info(dev, "registered range [%pa - %pa]\n", @@ -632,6 +649,23 @@ static int hisi_lpc_probe(struct platform_device *pdev) return ret; } +static int hisi_lpc_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct acpi_device *acpi_device = ACPI_COMPANION(dev); + struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev); + struct logic_pio_hwaddr *range = lpcdev->io_host; + + if (acpi_device) + hisi_lpc_acpi_remove(dev); + else + of_platform_depopulate(dev); + + logic_pio_unregister_range(range); + + return 0; +} + static const struct of_device_id hisi_lpc_of_match[] = { { .compatible = "hisilicon,hip06-lpc", }, { .compatible = "hisilicon,hip07-lpc", }, @@ -645,5 +679,6 @@ static struct platform_driver hisi_lpc_driver = { .acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match), }, .probe = hisi_lpc_probe, + .remove = hisi_lpc_remove, }; builtin_platform_driver(hisi_lpc_driver); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index e6deabd8305d..2db474ab4c6b 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -949,7 +949,7 @@ static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode) *best_mode = SYSC_IDLE_SMART_WKUP; else if (idlemodes & BIT(SYSC_IDLE_SMART)) *best_mode = SYSC_IDLE_SMART; - else if (idlemodes & SYSC_IDLE_FORCE) + else if (idlemodes & BIT(SYSC_IDLE_FORCE)) *best_mode = SYSC_IDLE_FORCE; else return -EINVAL; @@ -1267,7 +1267,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0), SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902, 0xffff00f0, 0), - SYSC_QUIRK("dcan", 0, 0, -1, -1, 0xffffffff, 0xffffffff, 0), + SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0), + SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0), SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0), SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0), SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0), @@ -1692,10 +1693,7 @@ static int sysc_init_sysc_mask(struct sysc *ddata) if (error) return 0; - if (val) - ddata->cfg.sysc_val = val & ddata->cap->sysc_mask; - else - ddata->cfg.sysc_val = ddata->cap->sysc_mask; + ddata->cfg.sysc_val = val & ddata->cap->sysc_mask; return 0; } @@ -2385,27 +2383,27 @@ static int sysc_probe(struct platform_device *pdev) error = sysc_init_dts_quirks(ddata); if (error) - goto unprepare; + return error; error = sysc_map_and_check_registers(ddata); if (error) - goto unprepare; + return error; error = sysc_init_sysc_mask(ddata); if (error) - goto unprepare; + return error; error = sysc_init_idlemodes(ddata); if (error) - goto unprepare; + return error; error = sysc_init_syss_mask(ddata); if (error) - goto unprepare; + return error; error = sysc_init_pdata(ddata); if (error) - goto unprepare; + return error; sysc_init_early_quirks(ddata); @@ -2415,7 +2413,7 @@ static int sysc_probe(struct platform_device *pdev) error = sysc_init_resets(ddata); if (error) - return error; + goto unprepare; error = sysc_init_module(ddata); if (error) diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index f79eede71c62..edefa669153f 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -540,6 +540,10 @@ int ccp_dev_suspend(struct sp_device *sp, pm_message_t state) unsigned long flags; unsigned int i; + /* If there's no device there's nothing to do */ + if (!ccp) + return 0; + spin_lock_irqsave(&ccp->cmd_lock, flags); ccp->suspending = 1; @@ -564,6 +568,10 @@ int ccp_dev_resume(struct sp_device *sp) unsigned long flags; unsigned int i; + /* If there's no device there's nothing to do */ + if (!ccp) + return 0; + spin_lock_irqsave(&ccp->cmd_lock, flags); ccp->suspending = 0; diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c index a13f224303c6..0221dee8dd4c 100644 --- a/drivers/fpga/altera-ps-spi.c +++ b/drivers/fpga/altera-ps-spi.c @@ -210,7 +210,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr, return -EIO; } - if (!IS_ERR(conf->confd)) { + if (conf->confd) { if (!gpiod_get_raw_value_cansleep(conf->confd)) { dev_err(&mgr->dev, "CONF_DONE is inactive!\n"); return -EIO; @@ -289,10 +289,13 @@ static int altera_ps_probe(struct spi_device *spi) return PTR_ERR(conf->status); } - conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN); + conf->confd = devm_gpiod_get_optional(&spi->dev, "confd", GPIOD_IN); if (IS_ERR(conf->confd)) { - dev_warn(&spi->dev, "Not using confd gpio: %ld\n", - PTR_ERR(conf->confd)); + dev_err(&spi->dev, "Failed to get confd gpio: %ld\n", + PTR_ERR(conf->confd)); + return PTR_ERR(conf->confd); + } else if (!conf->confd) { + dev_warn(&spi->dev, "Not using confd gpio"); } /* Register manager with unique name */ diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c index 343153d47e5b..004dc03ccf09 100644 --- a/drivers/fsi/fsi-scom.c +++ b/drivers/fsi/fsi-scom.c @@ -38,8 +38,7 @@ #define SCOM_STATUS_PIB_RESP_MASK 0x00007000 #define SCOM_STATUS_PIB_RESP_SHIFT 12 -#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_ERR_SUMMARY | \ - SCOM_STATUS_PROTECTION | \ +#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_PROTECTION | \ SCOM_STATUS_PARITY | \ SCOM_STATUS_PIB_ABORT | \ SCOM_STATUS_PIB_RESP_MASK) @@ -251,11 +250,6 @@ static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status) /* Return -EBUSY on PIB abort to force a retry */ if (status & SCOM_STATUS_PIB_ABORT) return -EBUSY; - if (status & SCOM_STATUS_ERR_SUMMARY) { - fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy, - sizeof(uint32_t)); - return -EIO; - } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 9b384a94d2f3..3e35a8f2c5e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -574,6 +574,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x699f, 0x1028, 0x0814, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0, 0, 0, 0, 0 }, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index f539a2a92774..7398b4850649 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -534,21 +534,24 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, struct drm_sched_entity *entity) { struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); - unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1); - struct dma_fence *other = centity->fences[idx]; + struct dma_fence *other; + unsigned idx; + long r; - if (other) { - signed long r; - r = dma_fence_wait(other, true); - if (r < 0) { - if (r != -ERESTARTSYS) - DRM_ERROR("Error (%ld) waiting for fence!\n", r); + spin_lock(&ctx->ring_lock); + idx = centity->sequence & (amdgpu_sched_jobs - 1); + other = dma_fence_get(centity->fences[idx]); + spin_unlock(&ctx->ring_lock); - return r; - } - } + if (!other) + return 0; - return 0; + r = dma_fence_wait(other, true); + if (r < 0 && r != -ERESTARTSYS) + DRM_ERROR("Error (%ld) waiting for fence!\n", r); + + dma_fence_put(other); + return r; } void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 4ea67f94cae2..c066e1d3f981 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -596,14 +596,14 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) case CHIP_VEGA20: break; case CHIP_RAVEN: - if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) - break; - if ((adev->gfx.rlc_fw_version != 106 && - adev->gfx.rlc_fw_version < 531) || - (adev->gfx.rlc_fw_version == 53815) || - (adev->gfx.rlc_feature_version < 1) || - !adev->gfx.rlc.is_rlc_v2_1) + if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) + &&((adev->gfx.rlc_fw_version != 106 && + adev->gfx.rlc_fw_version < 531) || + (adev->gfx.rlc_fw_version == 53815) || + (adev->gfx.rlc_feature_version < 1) || + !adev->gfx.rlc.is_rlc_v2_1)) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + if (adev->pm.pp_feature & PP_GFXOFF_MASK) adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_CP | diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index f27c6fbb192e..90c4e87ac5ad 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -2101,7 +2101,11 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr, if (ret) return ret; - *query = metrics_table.CurrSocketPower << 8; + /* For the 40.46 release, they changed the value name */ + if (hwmgr->smu_version == 0x282e00) + *query = metrics_table.AverageSocketPower << 8; + else + *query = metrics_table.CurrSocketPower << 8; return ret; } @@ -2349,12 +2353,16 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr) data->dpm_table.soc_table.dpm_state.soft_max_level = data->dpm_table.soc_table.dpm_levels[soft_level].value; - ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); + ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | + FEATURE_DPM_UCLK_MASK | + FEATURE_DPM_SOCCLK_MASK); PP_ASSERT_WITH_CODE(!ret, "Failed to upload boot level to highest!", return ret); - ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); + ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | + FEATURE_DPM_UCLK_MASK | + FEATURE_DPM_SOCCLK_MASK); PP_ASSERT_WITH_CODE(!ret, "Failed to upload dpm max level to highest!", return ret); @@ -2387,12 +2395,16 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr) data->dpm_table.soc_table.dpm_state.soft_max_level = data->dpm_table.soc_table.dpm_levels[soft_level].value; - ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); + ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | + FEATURE_DPM_UCLK_MASK | + FEATURE_DPM_SOCCLK_MASK); PP_ASSERT_WITH_CODE(!ret, "Failed to upload boot level to highest!", return ret); - ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); + ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | + FEATURE_DPM_UCLK_MASK | + FEATURE_DPM_SOCCLK_MASK); PP_ASSERT_WITH_CODE(!ret, "Failed to upload dpm max level to highest!", return ret); @@ -2403,14 +2415,54 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr) static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) { + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + uint32_t soft_min_level, soft_max_level; int ret = 0; - ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); + /* gfxclk soft min/max settings */ + soft_min_level = + vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); + soft_max_level = + vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table)); + + data->dpm_table.gfx_table.dpm_state.soft_min_level = + data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; + data->dpm_table.gfx_table.dpm_state.soft_max_level = + data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; + + /* uclk soft min/max settings */ + soft_min_level = + vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table)); + soft_max_level = + vega20_find_highest_dpm_level(&(data->dpm_table.mem_table)); + + data->dpm_table.mem_table.dpm_state.soft_min_level = + data->dpm_table.mem_table.dpm_levels[soft_min_level].value; + data->dpm_table.mem_table.dpm_state.soft_max_level = + data->dpm_table.mem_table.dpm_levels[soft_max_level].value; + + /* socclk soft min/max settings */ + soft_min_level = + vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table)); + soft_max_level = + vega20_find_highest_dpm_level(&(data->dpm_table.soc_table)); + + data->dpm_table.soc_table.dpm_state.soft_min_level = + data->dpm_table.soc_table.dpm_levels[soft_min_level].value; + data->dpm_table.soc_table.dpm_state.soft_max_level = + data->dpm_table.soc_table.dpm_levels[soft_max_level].value; + + ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | + FEATURE_DPM_UCLK_MASK | + FEATURE_DPM_SOCCLK_MASK); PP_ASSERT_WITH_CODE(!ret, "Failed to upload DPM Bootup Levels!", return ret); - ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); + ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | + FEATURE_DPM_UCLK_MASK | + FEATURE_DPM_SOCCLK_MASK); PP_ASSERT_WITH_CODE(!ret, "Failed to upload DPM Max Levels!", return ret); diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index dd6fd1c8bf24..6a14497257e4 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -3050,6 +3050,7 @@ static int vega20_get_fan_speed_percent(struct smu_context *smu, static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value) { + uint32_t smu_version; int ret = 0; SmuMetrics_t metrics; @@ -3060,7 +3061,15 @@ static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value) if (ret) return ret; - *value = metrics.CurrSocketPower << 8; + ret = smu_get_smc_version(smu, NULL, &smu_version); + if (ret) + return ret; + + /* For the 40.46 release, they changed the value name */ + if (smu_version == 0x282e00) + *value = metrics.AverageSocketPower << 8; + else + *value = metrics.CurrSocketPower << 8; return 0; } diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c index a0eabc134dd6..9d4d5075cc64 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c @@ -127,7 +127,7 @@ static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np) pipe->of_output_port = of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT); - pipe->of_node = np; + pipe->of_node = of_node_get(np); return 0; } diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c index d50e75f0b2bd..69d9e26c60c8 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c @@ -14,8 +14,8 @@ #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_irq.h> -#include <drm/drm_vblank.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> #include "komeda_dev.h" #include "komeda_framebuffer.h" @@ -147,7 +147,6 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc, struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st); struct komeda_plane_state *kplane_st; struct drm_plane_state *plane_st; - struct drm_framebuffer *fb; struct drm_plane *plane; struct list_head zorder_list; int order = 0, err; @@ -173,7 +172,6 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc, list_for_each_entry(kplane_st, &zorder_list, zlist_node) { plane_st = &kplane_st->base; - fb = plane_st->fb; plane = plane_st->plane; plane_st->normalized_zpos = order++; @@ -206,7 +204,7 @@ static int komeda_kms_check(struct drm_device *dev, struct drm_atomic_state *state) { struct drm_crtc *crtc; - struct drm_crtc_state *old_crtc_st, *new_crtc_st; + struct drm_crtc_state *new_crtc_st; int i, err; err = drm_atomic_helper_check_modeset(dev, state); @@ -217,7 +215,7 @@ static int komeda_kms_check(struct drm_device *dev, * so need to add all affected_planes (even unchanged) to * drm_atomic_state. */ - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_st, new_crtc_st, i) { + for_each_new_crtc_in_state(state, crtc, new_crtc_st, i) { err = drm_atomic_add_affected_planes(state, crtc); if (err) return err; @@ -308,11 +306,11 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev) komeda_kms_irq_handler, IRQF_SHARED, drm->driver->name, drm); if (err) - goto cleanup_mode_config; + goto free_component_binding; err = mdev->funcs->enable_irq(mdev); if (err) - goto cleanup_mode_config; + goto free_component_binding; drm->irq_enabled = true; @@ -320,15 +318,21 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev) err = drm_dev_register(drm, 0); if (err) - goto cleanup_mode_config; + goto free_interrupts; return kms; -cleanup_mode_config: +free_interrupts: drm_kms_helper_poll_fini(drm); drm->irq_enabled = false; + mdev->funcs->disable_irq(mdev); +free_component_binding: + component_unbind_all(mdev->dev, drm); +cleanup_mode_config: drm_mode_config_cleanup(drm); komeda_kms_cleanup_private_objs(kms); + drm->dev_private = NULL; + drm_dev_put(drm); free_kms: kfree(kms); return ERR_PTR(err); @@ -339,13 +343,14 @@ void komeda_kms_detach(struct komeda_kms_dev *kms) struct drm_device *drm = &kms->base; struct komeda_dev *mdev = drm->dev_private; - drm->irq_enabled = false; - mdev->funcs->disable_irq(mdev); drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); + drm_atomic_helper_shutdown(drm); + drm->irq_enabled = false; + mdev->funcs->disable_irq(mdev); component_unbind_all(mdev->dev, drm); - komeda_kms_cleanup_private_objs(kms); drm_mode_config_cleanup(drm); + komeda_kms_cleanup_private_objs(kms); drm->dev_private = NULL; drm_dev_put(drm); } diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h index a90bcbb3cb23..14b683164544 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h @@ -480,6 +480,7 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe, struct seq_file *sf); /* component APIs */ +extern __printf(10, 11) struct komeda_component * komeda_component_add(struct komeda_pipeline *pipe, size_t comp_sz, u32 id, u32 hw_id, diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c index 617e1f7b8472..2851cac94d86 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c @@ -148,7 +148,7 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms, if (!kcrtc->master->wb_layer) return 0; - kwb_conn = kzalloc(sizeof(*wb_conn), GFP_KERNEL); + kwb_conn = kzalloc(sizeof(*kwb_conn), GFP_KERNEL); if (!kwb_conn) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 60652ebbdf61..18e4cba76720 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -539,7 +539,15 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); - drm_connector_attach_max_bpc_property(connector, 6, 12); + + /* + * Reuse the prop from the SST connector because we're + * not allowed to create new props after device registration. + */ + connector->max_bpc_property = + intel_dp->attached_connector->base.max_bpc_property; + if (connector->max_bpc_property) + drm_connector_attach_max_bpc_property(connector, 6, 12); return connector; diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index ffec807b8960..f413904a3e96 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -541,7 +541,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) | DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances); DRM_INFO("PPS2 = 0x%08x\n", pps_val); - if (encoder->type == INTEL_OUTPUT_EDP) { + if (cpu_transcoder == TRANSCODER_EDP) { I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f62e3397d936..bac1ee94f63f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1598,6 +1598,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) pci_set_master(pdev); + /* + * We don't have a max segment size, so set it to the max so sg's + * debugging layer doesn't complain + */ + dma_set_max_seg_size(&pdev->dev, UINT_MAX); + /* overlay on gen2 is broken and can't address above 1G */ if (IS_GEN(dev_priv, 2)) { ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 94d3992b599d..724627afdedc 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -101,6 +101,9 @@ static struct _balloon_info_ bl_info; static void vgt_deballoon_space(struct i915_ggtt *ggtt, struct drm_mm_node *node) { + if (!drm_mm_node_allocated(node)) + return; + DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n", node->start, node->start + node->size, diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index de0f882f0f7b..14b41de44ebc 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -4,6 +4,7 @@ * Author: Archit Taneja <archit@ti.com> */ +#include <linux/bitops.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -20,7 +21,8 @@ int omapdss_device_init_output(struct omap_dss_device *out) { struct device_node *remote_node; - remote_node = of_graph_get_remote_node(out->dev->of_node, 0, 0); + remote_node = of_graph_get_remote_node(out->dev->of_node, + ffs(out->of_ports) - 1, 0); if (!remote_node) { dev_dbg(out->dev, "failed to find video sink\n"); return 0; diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index f33e349c4ec5..952201c6d821 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -59,6 +59,11 @@ module_param_named(num_heads, qxl_num_crtc, int, 0400); static struct drm_driver qxl_driver; static struct pci_driver qxl_pci_driver; +static bool is_vga(struct pci_dev *pdev) +{ + return pdev->class == PCI_CLASS_DISPLAY_VGA << 8; +} + static int qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -83,9 +88,17 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto disable_pci; + if (is_vga(pdev)) { + ret = vga_get_interruptible(pdev, VGA_RSRC_LEGACY_IO); + if (ret) { + DRM_ERROR("can't get legacy vga ioports\n"); + goto disable_pci; + } + } + ret = qxl_device_init(qdev, &qxl_driver, pdev); if (ret) - goto disable_pci; + goto put_vga; ret = qxl_modeset_init(qdev); if (ret) @@ -105,6 +118,9 @@ modeset_cleanup: qxl_modeset_fini(qdev); unload: qxl_device_fini(qdev); +put_vga: + if (is_vga(pdev)) + vga_put(pdev, VGA_RSRC_LEGACY_IO); disable_pci: pci_disable_device(pdev); free_dev: @@ -122,6 +138,8 @@ qxl_pci_remove(struct pci_dev *pdev) qxl_modeset_fini(qdev); qxl_device_fini(qdev); + if (is_vga(pdev)) + vga_put(pdev, VGA_RSRC_LEGACY_IO); dev->dev_private = NULL; kfree(qdev); diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index b2da31310d24..09b526518f5a 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -204,6 +204,7 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, .interruptible = false, .no_wait_gpu = false }; + size_t max_segment; /* wtf swapping */ if (bo->pages) @@ -215,8 +216,13 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, if (!bo->pages) goto out; - ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, - nr_pages << PAGE_SHIFT, GFP_KERNEL); + max_segment = virtio_max_dma_size(qdev->vdev); + max_segment &= PAGE_MASK; + if (max_segment > SCATTERLIST_MAX_SEGMENT) + max_segment = SCATTERLIST_MAX_SEGMENT; + ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, + nr_pages << PAGE_SHIFT, + max_segment, GFP_KERNEL); if (ret) goto out; return 0; diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index c0378c3de9a4..91dfeba62485 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -165,6 +165,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { .driver_data = (kernel_ulong_t)0, }, { + /* Lewisburg PCH */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa226), + .driver_data = (kernel_ulong_t)0, + }, + { /* Gemini Lake */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e), .driver_data = (kernel_ulong_t)&intel_th_2x, @@ -199,6 +204,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Tiger Lake PCH */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { 0 }, }; diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index e55b902560de..181e7ff1ec4f 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c @@ -1276,7 +1276,6 @@ int stm_source_register_device(struct device *parent, err: put_device(&src->dev); - kfree(src); return err; } diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index d7fd76baec92..19ef2b0c682a 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -790,7 +790,10 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter, static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap) { - u32 val = I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; + u32 val; + + /* We do not support the SMBUS Quick command */ + val = I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); if (adap->algo->reg_slave) val |= I2C_FUNC_SLAVE; diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c index e7f9305b2dd9..f5f001738df5 100644 --- a/drivers/i2c/busses/i2c-designware-slave.c +++ b/drivers/i2c/busses/i2c-designware-slave.c @@ -94,6 +94,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave) dev->disable_int(dev); dev->disable(dev); + synchronize_irq(dev->irq); dev->slave = NULL; pm_runtime_put(dev->dev); diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index f2956936c3f2..2e08b4722dc4 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1194,19 +1194,28 @@ static acpi_status check_acpi_smo88xx_device(acpi_handle obj_handle, int i; status = acpi_get_object_info(obj_handle, &info); - if (!ACPI_SUCCESS(status) || !(info->valid & ACPI_VALID_HID)) + if (ACPI_FAILURE(status)) return AE_OK; + if (!(info->valid & ACPI_VALID_HID)) + goto smo88xx_not_found; + hid = info->hardware_id.string; if (!hid) - return AE_OK; + goto smo88xx_not_found; i = match_string(acpi_smo8800_ids, ARRAY_SIZE(acpi_smo8800_ids), hid); if (i < 0) - return AE_OK; + goto smo88xx_not_found; + + kfree(info); *((bool *)return_value) = true; return AE_CTRL_TERMINATE; + +smo88xx_not_found: + kfree(info); + return AE_OK; } static bool is_dell_system_with_lis3lv02d(void) diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index 252edb433fdf..29eae1bf4f86 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -234,6 +234,10 @@ static const struct i2c_adapter_quirks mt7622_i2c_quirks = { .max_num_msgs = 255, }; +static const struct i2c_adapter_quirks mt8183_i2c_quirks = { + .flags = I2C_AQ_NO_ZERO_LEN, +}; + static const struct mtk_i2c_compatible mt2712_compat = { .regs = mt_i2c_regs_v1, .pmic_i2c = 0, @@ -298,6 +302,7 @@ static const struct mtk_i2c_compatible mt8173_compat = { }; static const struct mtk_i2c_compatible mt8183_compat = { + .quirks = &mt8183_i2c_quirks, .regs = mt_i2c_regs_v2, .pmic_i2c = 0, .dcm = 0, @@ -870,7 +875,11 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id) static u32 mtk_i2c_functionality(struct i2c_adapter *adap) { - return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; + if (adap->quirks->flags & I2C_AQ_NO_ZERO_LEN) + return I2C_FUNC_I2C | + (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); + else + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm mtk_i2c_algorithm = { diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index c46c4bddc7ca..cba325eb852f 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -91,7 +91,7 @@ #define SB800_PIIX4_PORT_IDX_MASK 0x06 #define SB800_PIIX4_PORT_IDX_SHIFT 1 -/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */ +/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */ #define SB800_PIIX4_PORT_IDX_KERNCZ 0x02 #define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18 #define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3 @@ -358,18 +358,16 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev, /* Find which register is used for port selection */ if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD || PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON) { - switch (PIIX4_dev->device) { - case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS: + if (PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS || + (PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS && + PIIX4_dev->revision >= 0x1F)) { piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ; piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ; piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ; - break; - case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS: - default: + } else { piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT; piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK; piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT; - break; } } else { if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2, diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index f26ed495d384..9c440fa6a3dd 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -832,7 +832,7 @@ EXPORT_SYMBOL_GPL(i2c_new_device); */ void i2c_unregister_device(struct i2c_client *client) { - if (!client) + if (IS_ERR_OR_NULL(client)) return; if (client->dev.of_node) { diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index 1db5ad3d9580..8c1931a57f4a 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -1962,6 +1962,10 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) struct sockaddr_in s_laddr, *s_raddr; const struct in_ifaddr *ifa; + if (!in_dev) { + rv = -ENODEV; + goto out; + } memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr)); s_raddr = (struct sockaddr_in *)&id->remote_addr; @@ -1991,22 +1995,27 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr), *s_raddr = &to_sockaddr_in6(id->remote_addr); + if (!in6_dev) { + rv = -ENODEV; + goto out; + } siw_dbg(id->device, "laddr %pI6:%d, raddr %pI6:%d\n", &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port), &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port)); - read_lock_bh(&in6_dev->lock); + rtnl_lock(); list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { - struct sockaddr_in6 bind_addr; - + if (ifp->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED)) + continue; if (ipv6_addr_any(&s_laddr->sin6_addr) || ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) { - bind_addr.sin6_family = AF_INET6; - bind_addr.sin6_port = s_laddr->sin6_port; - bind_addr.sin6_flowinfo = 0; - bind_addr.sin6_addr = ifp->addr; - bind_addr.sin6_scope_id = dev->ifindex; + struct sockaddr_in6 bind_addr = { + .sin6_family = AF_INET6, + .sin6_port = s_laddr->sin6_port, + .sin6_flowinfo = 0, + .sin6_addr = ifp->addr, + .sin6_scope_id = dev->ifindex }; rv = siw_listen_address(id, backlog, (struct sockaddr *)&bind_addr, @@ -2015,12 +2024,12 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) listeners++; } } - read_unlock_bh(&in6_dev->lock); - + rtnl_unlock(); in6_dev_put(in6_dev); } else { - return -EAFNOSUPPORT; + rv = -EAFNOSUPPORT; } +out: if (listeners) rv = 0; else if (!rv) diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index 1606658b9b7e..24245ccdba72 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -22,7 +22,7 @@ struct lkdtm_list { * recurse past the end of THREAD_SIZE by default. */ #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0) -#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2) +#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2) #else #define REC_STACK_SIZE (THREAD_SIZE / 8) #endif @@ -91,7 +91,7 @@ void lkdtm_LOOP(void) void lkdtm_EXHAUST_STACK(void) { - pr_info("Calling function with %d frame size to depth %d ...\n", + pr_info("Calling function with %lu frame size to depth %d ...\n", REC_STACK_SIZE, recur_count); recursive_loop(recur_count); pr_info("FAIL: survived without exhausting stack?!\n"); diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 6c0173772162..77f7dff7098d 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -81,6 +81,8 @@ #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ +#define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */ + #define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */ #define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 57cb68f5cc64..541538eff8b1 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -98,6 +98,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)}, diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 8840299420e0..5e6be1527571 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -691,7 +691,6 @@ static int vmballoon_alloc_page_list(struct vmballoon *b, } if (page) { - vmballoon_mark_page_offline(page, ctl->page_size); /* Success. Add the page to the list and continue. */ list_add(&page->lru, &ctl->pages); continue; @@ -930,7 +929,6 @@ static void vmballoon_release_page_list(struct list_head *page_list, list_for_each_entry_safe(page, tmp, page_list, lru) { list_del(&page->lru); - vmballoon_mark_page_online(page, page_size); __free_pages(page, vmballoon_page_order(page_size)); } @@ -1005,6 +1003,7 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b, enum vmballoon_page_size_type page_size) { unsigned long flags; + struct page *page; if (page_size == VMW_BALLOON_4K_PAGE) { balloon_page_list_enqueue(&b->b_dev_info, pages); @@ -1014,6 +1013,11 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b, * for the balloon compaction mechanism. */ spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); + + list_for_each_entry(page, pages, lru) { + vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE); + } + list_splice_init(pages, &b->huge_pages); __count_vm_events(BALLOON_INFLATE, *n_pages * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE)); @@ -1056,6 +1060,8 @@ static void vmballoon_dequeue_page_list(struct vmballoon *b, /* 2MB pages */ spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) { + vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE); + list_move(&page->lru, pages); if (++i == n_req_pages) break; diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c index bad89b6e0802..345addd9306d 100644 --- a/drivers/misc/vmw_vmci/vmci_doorbell.c +++ b/drivers/misc/vmw_vmci/vmci_doorbell.c @@ -310,7 +310,8 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle) entry = container_of(resource, struct dbell_entry, resource); if (entry->run_delayed) { - schedule_work(&entry->work); + if (!schedule_work(&entry->work)) + vmci_resource_put(resource); } else { entry->notify_cb(entry->client_data); vmci_resource_put(resource); @@ -361,7 +362,8 @@ static void dbell_fire_entries(u32 notify_idx) atomic_read(&dbell->active) == 1) { if (dbell->run_delayed) { vmci_resource_get(&dbell->resource); - schedule_work(&dbell->work); + if (!schedule_work(&dbell->work)) + vmci_resource_put(&dbell->resource); } else { dbell->notify_cb(dbell->client_data); } diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index d681e8aaca83..fe914ff5f5d6 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -1292,6 +1292,12 @@ int mmc_attach_sd(struct mmc_host *host) goto err; } + /* + * Some SD cards claims an out of spec VDD voltage range. Let's treat + * these bits as being in-valid and especially also bit7. + */ + ocr &= ~0x7FFF; + rocr = mmc_select_voltage(host, ocr); /* diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c index 163d1cf4367e..44139fceac24 100644 --- a/drivers/mmc/host/sdhci-cadence.c +++ b/drivers/mmc/host/sdhci-cadence.c @@ -369,6 +369,7 @@ static int sdhci_cdns_probe(struct platform_device *pdev) host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning; host->mmc_host_ops.hs400_enhanced_strobe = sdhci_cdns_hs400_enhanced_strobe; + sdhci_enable_v4_mode(host); sdhci_get_of_property(pdev); diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index d4e7e8b7be77..e7d1920729fb 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -357,6 +357,9 @@ static int sdhci_at91_probe(struct platform_device *pdev) pm_runtime_set_autosuspend_delay(&pdev->dev, 50); pm_runtime_use_autosuspend(&pdev->dev); + /* HS200 is broken at this moment */ + host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200; + ret = sdhci_add_host(host); if (ret) goto pm_runtime_disable; diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c index 83a4767ca680..d07b9793380f 100644 --- a/drivers/mmc/host/sdhci-sprd.c +++ b/drivers/mmc/host/sdhci-sprd.c @@ -217,10 +217,11 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host, struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); u32 div, val, mask; - div = sdhci_sprd_calc_div(sprd_host->base_rate, clk); + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); - clk |= ((div & 0x300) >> 2) | ((div & 0xFF) << 8); - sdhci_enable_clk(host, clk); + div = sdhci_sprd_calc_div(sprd_host->base_rate, clk); + div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8); + sdhci_enable_clk(host, div); /* enable auto gate sdhc_enable_auto_gate */ val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI); @@ -373,6 +374,11 @@ static unsigned int sdhci_sprd_get_max_timeout_count(struct sdhci_host *host) return 1 << 31; } +static unsigned int sdhci_sprd_get_ro(struct sdhci_host *host) +{ + return 0; +} + static struct sdhci_ops sdhci_sprd_ops = { .read_l = sdhci_sprd_readl, .write_l = sdhci_sprd_writel, @@ -385,6 +391,7 @@ static struct sdhci_ops sdhci_sprd_ops = { .set_uhs_signaling = sdhci_sprd_set_uhs_signaling, .hw_reset = sdhci_sprd_hw_reset, .get_max_timeout_count = sdhci_sprd_get_max_timeout_count, + .get_ro = sdhci_sprd_get_ro, }; static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq) @@ -501,9 +508,12 @@ static void sdhci_sprd_phy_param_parse(struct sdhci_sprd_host *sprd_host, } static const struct sdhci_pltfm_data sdhci_sprd_pdata = { - .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, + .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_MISSING_CAPS, .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 | - SDHCI_QUIRK2_USE_32BIT_BLK_CNT, + SDHCI_QUIRK2_USE_32BIT_BLK_CNT | + SDHCI_QUIRK2_PRESET_VALUE_BROKEN, .ops = &sdhci_sprd_ops, }; @@ -605,6 +615,16 @@ static int sdhci_sprd_probe(struct platform_device *pdev) sdhci_enable_v4_mode(host); + /* + * Supply the existing CAPS, but clear the UHS-I modes. This + * will allow these modes to be specified only by device + * tree properties through mmc_of_parse(). + */ + host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); + host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); + host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | + SDHCI_SUPPORT_DDR50); + ret = sdhci_setup_host(host); if (ret) goto pm_runtime_disable; diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index f4d4761cf20a..02d8f524bb9e 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -258,6 +258,16 @@ static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg) } } +static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host) +{ + /* + * Write-enable shall be assumed if GPIO is missing in a board's + * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on + * Tegra. + */ + return mmc_gpio_get_ro(host->mmc); +} + static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -1224,6 +1234,7 @@ static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = { }; static const struct sdhci_ops tegra_sdhci_ops = { + .get_ro = tegra_sdhci_get_ro, .read_w = tegra_sdhci_readw, .write_l = tegra_sdhci_writel, .set_clock = tegra_sdhci_set_clock, @@ -1279,6 +1290,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra30 = { }; static const struct sdhci_ops tegra114_sdhci_ops = { + .get_ro = tegra_sdhci_get_ro, .read_w = tegra_sdhci_readw, .write_w = tegra_sdhci_writew, .write_l = tegra_sdhci_writel, @@ -1332,6 +1344,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra124 = { }; static const struct sdhci_ops tegra210_sdhci_ops = { + .get_ro = tegra_sdhci_get_ro, .read_w = tegra_sdhci_readw, .write_w = tegra210_sdhci_writew, .write_l = tegra_sdhci_writel, @@ -1366,6 +1379,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = { }; static const struct sdhci_ops tegra186_sdhci_ops = { + .get_ro = tegra_sdhci_get_ro, .read_w = tegra_sdhci_readw, .write_l = tegra_sdhci_writel, .set_clock = tegra_sdhci_set_clock, diff --git a/drivers/mtd/hyperbus/Kconfig b/drivers/mtd/hyperbus/Kconfig index b4e3caf7d799..a4d8968d133d 100644 --- a/drivers/mtd/hyperbus/Kconfig +++ b/drivers/mtd/hyperbus/Kconfig @@ -1,5 +1,6 @@ menuconfig MTD_HYPERBUS tristate "HyperBus support" + depends on HAS_IOMEM select MTD_CFI select MTD_MAP_BANK_WIDTH_2 select MTD_CFI_AMDSTD diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c index a226b389e12d..2e402e4d866f 100644 --- a/drivers/net/dsa/microchip/ksz9477_spi.c +++ b/drivers/net/dsa/microchip/ksz9477_spi.c @@ -80,6 +80,7 @@ static const struct of_device_id ksz9477_dt_ids[] = { { .compatible = "microchip,ksz9897" }, { .compatible = "microchip,ksz9893" }, { .compatible = "microchip,ksz9563" }, + { .compatible = "microchip,ksz8563" }, {}, }; MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index c44a8d23d973..13d027baaa8b 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -299,6 +299,7 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset, #define KSZ_REGMAP_ENTRY(width, swp, regbits, regpad, regalign) \ { \ + .name = #width, \ .val_bits = (width), \ .reg_stride = (width) / 8, \ .reg_bits = (regbits) + (regalign), \ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index b41f23679a08..7ce9c69e9c44 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -469,13 +469,19 @@ static int __init xgbe_mod_init(void) ret = xgbe_platform_init(); if (ret) - return ret; + goto err_platform_init; ret = xgbe_pci_init(); if (ret) - return ret; + goto err_pci_init; return 0; + +err_pci_init: + xgbe_platform_exit(); +err_platform_init: + unregister_netdevice_notifier(&xgbe_netdev_notifier); + return ret; } static void __exit xgbe_mod_exit(void) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c index 440690b18734..aee827f07c16 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c @@ -431,7 +431,8 @@ int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id) if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) break; } - if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) { + if (rule && rule->type == aq_rx_filter_vlan && + be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) { struct ethtool_rxnfc cmd; cmd.fs.location = rule->aq_fsp.location; @@ -843,7 +844,7 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic) return err; if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { - if (hweight < AQ_VLAN_MAX_FILTERS && hweight > 0) { + if (hweight <= AQ_VLAN_MAX_FILTERS && hweight > 0) { err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, !(aq_nic->packet_filter & IFF_PROMISC)); aq_nic->aq_nic_cfg.is_vlan_force_promisc = false; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index 100722ad5c2d..b4a0fb281e69 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -61,6 +61,10 @@ static int aq_ndev_open(struct net_device *ndev) if (err < 0) goto err_exit; + err = aq_filters_vlans_update(aq_nic); + if (err < 0) + goto err_exit; + err = aq_nic_start(aq_nic); if (err < 0) goto err_exit; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index e1392766e21e..8f66e7817811 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -393,7 +393,7 @@ int aq_nic_start(struct aq_nic_s *self) self->aq_nic_cfg.link_irq_vec); err = request_threaded_irq(irqvec, NULL, aq_linkstate_threaded_isr, - IRQF_SHARED, + IRQF_SHARED | IRQF_ONESHOT, self->ndev->name, self); if (err < 0) goto err_exit; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index 715685aa48c3..28892b8acd0e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -86,6 +86,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) } } +err_exit: if (!was_tx_cleaned) work_done = budget; @@ -95,7 +96,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) 1U << self->aq_ring_param.vec_idx); } } -err_exit: + return work_done; } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 1586316eb6f1..12cb77ef1081 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1124,6 +1124,7 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = { .set_coalesce = bcmgenet_set_coalesce, .get_link_ksettings = bcmgenet_get_link_ksettings, .set_link_ksettings = bcmgenet_set_link_ksettings, + .get_ts_info = ethtool_op_get_ts_info, }; /* Power down the unimac, based on mode. */ diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 5ca17e62dc3e..35b59b5edf0f 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -4154,7 +4154,7 @@ static const struct of_device_id macb_dt_ids[] = { { .compatible = "cdns,emac", .data = &emac_config }, { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, { .compatible = "cdns,zynq-gem", .data = &zynq_config }, - { .compatible = "sifive,fu540-macb", .data = &fu540_c000_config }, + { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, macb_dt_ids); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c index 2fd2586e42bf..bc594892507a 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c @@ -82,7 +82,7 @@ static int enetc_ptp_probe(struct pci_dev *pdev, n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); if (n != 1) { err = -EPERM; - goto err_irq; + goto err_irq_vectors; } ptp_qoriq->irq = pci_irq_vector(pdev, 0); @@ -107,6 +107,8 @@ static int enetc_ptp_probe(struct pci_dev *pdev, err_no_clock: free_irq(ptp_qoriq->irq, ptp_qoriq); err_irq: + pci_free_irq_vectors(pdev); +err_irq_vectors: iounmap(base); err_ioremap: kfree(ptp_qoriq); @@ -125,6 +127,7 @@ static void enetc_ptp_remove(struct pci_dev *pdev) enetc_phc_index = -1; ptp_qoriq_free(ptp_qoriq); + pci_free_irq_vectors(pdev); kfree(ptp_qoriq); pci_release_mem_regions(pdev); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 07efa2b40003..4f83f97ffe8b 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1983,6 +1983,10 @@ static void __ibmvnic_reset(struct work_struct *work) rwi = get_next_rwi(adapter); while (rwi) { + if (adapter->state == VNIC_REMOVING || + adapter->state == VNIC_REMOVED) + goto out; + if (adapter->force_reset_recovery) { adapter->force_reset_recovery = false; rc = do_hard_reset(adapter, rwi, reset_state); @@ -2007,7 +2011,7 @@ static void __ibmvnic_reset(struct work_struct *work) netdev_dbg(adapter->netdev, "Reset failed\n"); free_all_rwi(adapter); } - +out: adapter->resetting = false; if (we_lock_rtnl) rtnl_unlock(); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index c2e00bb587cd..5f56ee83e3b1 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4931,6 +4931,13 @@ static const struct dmi_system_id msi_blacklist[] = { DMI_MATCH(DMI_BOARD_NAME, "P6T"), }, }, + { + .ident = "ASUS P6X", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P6X"), + }, + }, {} }; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 4054b70d7719..5afcb3c4c2ef 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1163,7 +1163,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool clr_gpr, lmem_step step) { s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off; - bool first = true, last; + bool first = true, narrow_ld, last; bool needs_inc = false; swreg stack_off_reg; u8 prev_gpr = 255; @@ -1209,13 +1209,22 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, needs_inc = true; } + + narrow_ld = clr_gpr && size < 8; + if (lm3) { + unsigned int nop_cnt; + emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); - /* For size < 4 one slot will be filled by zeroing of upper. */ - wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); + /* For size < 4 one slot will be filled by zeroing of upper, + * but be careful, that zeroing could be eliminated by zext + * optimization. + */ + nop_cnt = narrow_ld && meta->flags & FLAG_INSN_DO_ZEXT ? 2 : 3; + wrp_nops(nfp_prog, nop_cnt); } - if (clr_gpr && size < 8) + if (narrow_ld) wrp_zext(nfp_prog, meta, gpr); while (size) { diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index b0708460e342..987ae221f6be 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1579,9 +1579,10 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, struct nfp_flower_priv *priv = app->priv; struct flow_block_cb *block_cb; - if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && - !(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && - nfp_flower_internal_port_can_offload(app, netdev))) + if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && + !nfp_flower_internal_port_can_offload(app, netdev)) || + (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && + nfp_flower_internal_port_can_offload(app, netdev))) return -EOPNOTSUPP; switch (f->command) { diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index def8c198b016..2600ce476d6b 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -348,13 +348,13 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, flow.daddr = *(__be32 *)n->primary_key; - /* Only concerned with route changes for representors. */ - if (!nfp_netdev_is_nfp_repr(n->dev)) - return NOTIFY_DONE; - app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); app = app_priv->app; + if (!nfp_netdev_is_nfp_repr(n->dev) && + !nfp_flower_internal_port_can_offload(app, n->dev)) + return NOTIFY_DONE; + /* Only concerned with changes to routes already added to NFP. */ if (!nfp_tun_has_route(app, flow.daddr)) return NOTIFY_DONE; diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 7a5e6c5abb57..276c7cae7cee 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -794,15 +794,16 @@ static int sgiseeq_probe(struct platform_device *pdev) printk(KERN_ERR "Sgiseeq: Cannot register net device, " "aborting.\n"); err = -ENODEV; - goto err_out_free_page; + goto err_out_free_attrs; } printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr); return 0; -err_out_free_page: - free_page((unsigned long) sp->srings); +err_out_free_attrs: + dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings, + sp->srings_dma, DMA_ATTR_NON_CONSISTENT); err_out_free_dev: free_netdev(dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 4644b2aeeba1..e2e469c37a4d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1194,10 +1194,8 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable) int ret; struct device *dev = &bsp_priv->pdev->dev; - if (!ldo) { - dev_err(dev, "no regulator found\n"); - return -1; - } + if (!ldo) + return 0; if (enable) { ret = regulator_enable(ldo); diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index 58bb25e4af10..7935593debb1 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -523,6 +523,32 @@ int genphy_c45_read_status(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(genphy_c45_read_status); +/** + * genphy_c45_config_aneg - restart auto-negotiation or forced setup + * @phydev: target phy_device struct + * + * Description: If auto-negotiation is enabled, we configure the + * advertising, and then restart auto-negotiation. If it is not + * enabled, then we force a configuration. + */ +int genphy_c45_config_aneg(struct phy_device *phydev) +{ + bool changed = false; + int ret; + + if (phydev->autoneg == AUTONEG_DISABLE) + return genphy_c45_pma_setup_forced(phydev); + + ret = genphy_c45_an_config_aneg(phydev); + if (ret < 0) + return ret; + if (ret > 0) + changed = true; + + return genphy_c45_check_and_restart_aneg(phydev, changed); +} +EXPORT_SYMBOL_GPL(genphy_c45_config_aneg); + /* The gen10g_* functions are the old Clause 45 stub */ int gen10g_config_aneg(struct phy_device *phydev) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 0acd5b49f450..35d29a823af8 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -507,7 +507,7 @@ static int phy_config_aneg(struct phy_device *phydev) * allowed to call genphy_config_aneg() */ if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) - return -EOPNOTSUPP; + return genphy_c45_config_aneg(phydev); return genphy_config_aneg(phydev); } diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index c6fa0c17c13d..778d27d1fb15 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -4215,8 +4215,7 @@ static int rtl8152_close(struct net_device *netdev) unregister_pm_notifier(&tp->pm_notifier); #endif tasklet_disable(&tp->tx_tl); - if (!test_bit(RTL8152_UNPLUG, &tp->flags)) - napi_disable(&tp->napi); + napi_disable(&tp->napi); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); @@ -5604,7 +5603,6 @@ static int rtl8152_probe(struct usb_interface *intf, return 0; out1: - netif_napi_del(&tp->napi); tasklet_kill(&tp->tx_tl); usb_set_intfdata(intf, NULL); out: @@ -5620,7 +5618,6 @@ static void rtl8152_disconnect(struct usb_interface *intf) if (tp) { rtl_set_unplug(tp); - netif_napi_del(&tp->napi); unregister_netdev(tp->netdev); tasklet_kill(&tp->tx_tl); cancel_delayed_work_sync(&tp->hw_phy_work); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 1f500cddb3a7..55b713255b8e 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -556,6 +556,30 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; +const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = { + .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)", + .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, +}; + +const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = { + .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)", + .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, +}; + const struct iwl_cfg iwl22000_2ax_cfg_jf = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 1c1bf1b281cd..6c04f8223aff 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -577,6 +577,8 @@ extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr; extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr; extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; +extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0; +extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0; extern const struct iwl_cfg killer1650x_2ax_cfg; extern const struct iwl_cfg killer1650w_2ax_cfg; extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 7c5aaeaf7fe5..d9ed53b7c768 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1062,6 +1062,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0; else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0; + else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) + iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0; + else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0) + iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0; } /* same thing for QuZ... */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 935e35dafce5..db62c8314603 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -3602,12 +3602,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && - ((trans->cfg != &iwl_ax200_cfg_cc && - trans->cfg != &iwl_ax201_cfg_qu_hr && - trans->cfg != &killer1650x_2ax_cfg && - trans->cfg != &killer1650w_2ax_cfg && - trans->cfg != &iwl_ax201_cfg_quz_hr) || - trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) { + trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) { u32 hw_status; hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS); diff --git a/drivers/soc/ixp4xx/Kconfig b/drivers/soc/ixp4xx/Kconfig index de2e62c3310a..e3eb19b85fa4 100644 --- a/drivers/soc/ixp4xx/Kconfig +++ b/drivers/soc/ixp4xx/Kconfig @@ -1,4 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only +if ARCH_IXP4XX || COMPILE_TEST + menu "IXP4xx SoC drivers" config IXP4XX_QMGR @@ -15,3 +17,5 @@ config IXP4XX_NPE and is automatically selected by Ethernet and HSS drivers. endmenu + +endif diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c index bb77c220b6f8..ccc6d53fe788 100644 --- a/drivers/soc/ti/pm33xx.c +++ b/drivers/soc/ti/pm33xx.c @@ -141,7 +141,7 @@ static int __init am43xx_map_gic(void) } #ifdef CONFIG_SUSPEND -struct wkup_m3_wakeup_src rtc_wake_src(void) +static struct wkup_m3_wakeup_src rtc_wake_src(void) { u32 i; @@ -157,7 +157,7 @@ struct wkup_m3_wakeup_src rtc_wake_src(void) return rtc_ext_wakeup; } -int am33xx_rtc_only_idle(unsigned long wfi_flags) +static int am33xx_rtc_only_idle(unsigned long wfi_flags) { omap_rtc_power_off_program(&omap_rtc->dev); am33xx_do_wfi_sram(wfi_flags); @@ -252,7 +252,7 @@ static int am33xx_pm_begin(suspend_state_t state) if (state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) { nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0"); - if (nvmem) + if (!IS_ERR(nvmem)) nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4, (void *)&rtc_magic_val); rtc_only_idle = 1; @@ -278,9 +278,12 @@ static void am33xx_pm_end(void) struct nvmem_device *nvmem; nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0"); + if (IS_ERR(nvmem)) + return; + m3_ipc->ops->finish_low_power(m3_ipc); if (rtc_only_idle) { - if (retrigger_irq) + if (retrigger_irq) { /* * 32 bits of Interrupt Set-Pending correspond to 32 * 32 interrupts. Compute the bit offset of the @@ -291,8 +294,10 @@ static void am33xx_pm_end(void) writel_relaxed(1 << (retrigger_irq & 31), gic_dist_base + GIC_INT_SET_PENDING_BASE + retrigger_irq / 32 * 4); - nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4, - (void *)&val); + } + + nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4, + (void *)&val); } rtc_only_idle = 0; @@ -415,7 +420,7 @@ static int am33xx_pm_rtc_setup(void) nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0"); - if (nvmem) { + if (!IS_ERR(nvmem)) { nvmem_device_read(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4, (void *)&rtc_magic_val); if ((rtc_magic_val & 0xffff) != RTC_REG_BOOT_MAGIC) diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 6a5ee8e6da10..67ad40b0a05b 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -709,12 +709,6 @@ static int _gadget_stop_activity(struct usb_gadget *gadget) struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget); unsigned long flags; - spin_lock_irqsave(&ci->lock, flags); - ci->gadget.speed = USB_SPEED_UNKNOWN; - ci->remote_wakeup = 0; - ci->suspended = 0; - spin_unlock_irqrestore(&ci->lock, flags); - /* flush all endpoints */ gadget_for_each_ep(ep, gadget) { usb_ep_fifo_flush(ep); @@ -732,6 +726,12 @@ static int _gadget_stop_activity(struct usb_gadget *gadget) ci->status = NULL; } + spin_lock_irqsave(&ci->lock, flags); + ci->gadget.speed = USB_SPEED_UNKNOWN; + ci->remote_wakeup = 0; + ci->suspended = 0; + spin_unlock_irqrestore(&ci->lock, flags); + return 0; } @@ -1303,6 +1303,10 @@ static int ep_disable(struct usb_ep *ep) return -EBUSY; spin_lock_irqsave(hwep->lock, flags); + if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) { + spin_unlock_irqrestore(hwep->lock, flags); + return 0; + } /* only internal SW should disable ctrl endpts */ @@ -1392,6 +1396,10 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req, return -EINVAL; spin_lock_irqsave(hwep->lock, flags); + if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) { + spin_unlock_irqrestore(hwep->lock, flags); + return 0; + } retval = _ep_queue(ep, req, gfp_flags); spin_unlock_irqrestore(hwep->lock, flags); return retval; @@ -1415,8 +1423,8 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) return -EINVAL; spin_lock_irqsave(hwep->lock, flags); - - hw_ep_flush(hwep->ci, hwep->num, hwep->dir); + if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN) + hw_ep_flush(hwep->ci, hwep->num, hwep->dir); list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) { dma_pool_free(hwep->td_pool, node->ptr, node->dma); @@ -1487,6 +1495,10 @@ static void ep_fifo_flush(struct usb_ep *ep) } spin_lock_irqsave(hwep->lock, flags); + if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) { + spin_unlock_irqrestore(hwep->lock, flags); + return; + } hw_ep_flush(hwep->ci, hwep->num, hwep->dir); @@ -1559,6 +1571,10 @@ static int ci_udc_wakeup(struct usb_gadget *_gadget) int ret = 0; spin_lock_irqsave(&ci->lock, flags); + if (ci->gadget.speed == USB_SPEED_UNKNOWN) { + spin_unlock_irqrestore(&ci->lock, flags); + return 0; + } if (!ci->remote_wakeup) { ret = -EOPNOTSUPP; goto out; diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index a7824a51f86d..70afb2ca1eab 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -587,10 +587,20 @@ static int wdm_flush(struct file *file, fl_owner_t id) { struct wdm_device *desc = file->private_data; - wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags)); + wait_event(desc->wait, + /* + * needs both flags. We cannot do with one + * because resetting it would cause a race + * with write() yet we need to signal + * a disconnect + */ + !test_bit(WDM_IN_USE, &desc->flags) || + test_bit(WDM_DISCONNECTING, &desc->flags)); /* cannot dereference desc->intf if WDM_DISCONNECTING */ - if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags)) + if (test_bit(WDM_DISCONNECTING, &desc->flags)) + return -ENODEV; + if (desc->werr < 0) dev_err(&desc->intf->dev, "Error in flush path: %d\n", desc->werr); @@ -974,8 +984,6 @@ static void wdm_disconnect(struct usb_interface *intf) spin_lock_irqsave(&desc->iuspin, flags); set_bit(WDM_DISCONNECTING, &desc->flags); set_bit(WDM_READ, &desc->flags); - /* to terminate pending flushes */ - clear_bit(WDM_IN_USE, &desc->flags); spin_unlock_irqrestore(&desc->iuspin, flags); wake_up_all(&desc->wait); mutex_lock(&desc->rlock); diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 4942122b2346..36858ddd8d9b 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -2362,8 +2362,11 @@ static int usbtmc_probe(struct usb_interface *intf, goto err_put; } + retcode = -EINVAL; data->bulk_in = bulk_in->bEndpointAddress; data->wMaxPacketSize = usb_endpoint_maxp(bulk_in); + if (!data->wMaxPacketSize) + goto err_put; dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n", data->bulk_in); data->bulk_out = bulk_out->bEndpointAddress; diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 03432467b05f..7537681355f6 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -216,17 +216,18 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) /* EHCI, OHCI */ hcd->rsrc_start = pci_resource_start(dev, 0); hcd->rsrc_len = pci_resource_len(dev, 0); - if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, - driver->description)) { + if (!devm_request_mem_region(&dev->dev, hcd->rsrc_start, + hcd->rsrc_len, driver->description)) { dev_dbg(&dev->dev, "controller already in use\n"); retval = -EBUSY; goto put_hcd; } - hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len); + hcd->regs = devm_ioremap_nocache(&dev->dev, hcd->rsrc_start, + hcd->rsrc_len); if (hcd->regs == NULL) { dev_dbg(&dev->dev, "error mapping memory\n"); retval = -EFAULT; - goto release_mem_region; + goto put_hcd; } } else { @@ -240,8 +241,8 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) hcd->rsrc_start = pci_resource_start(dev, region); hcd->rsrc_len = pci_resource_len(dev, region); - if (request_region(hcd->rsrc_start, hcd->rsrc_len, - driver->description)) + if (devm_request_region(&dev->dev, hcd->rsrc_start, + hcd->rsrc_len, driver->description)) break; } if (region == PCI_ROM_RESOURCE) { @@ -275,20 +276,13 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) } if (retval != 0) - goto unmap_registers; + goto put_hcd; device_wakeup_enable(hcd->self.controller); if (pci_dev_run_wake(dev)) pm_runtime_put_noidle(&dev->dev); return retval; -unmap_registers: - if (driver->flags & HCD_MEMORY) { - iounmap(hcd->regs); -release_mem_region: - release_mem_region(hcd->rsrc_start, hcd->rsrc_len); - } else - release_region(hcd->rsrc_start, hcd->rsrc_len); put_hcd: usb_put_hcd(hcd); disable_pci: @@ -347,14 +341,6 @@ void usb_hcd_pci_remove(struct pci_dev *dev) dev_set_drvdata(&dev->dev, NULL); up_read(&companions_rwsem); } - - if (hcd->driver->flags & HCD_MEMORY) { - iounmap(hcd->regs); - release_mem_region(hcd->rsrc_start, hcd->rsrc_len); - } else { - release_region(hcd->rsrc_start, hcd->rsrc_len); - } - usb_put_hcd(hcd); pci_disable_device(dev); } diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index 5f1b14f3e5a0..bb6af6b5ac97 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -2265,7 +2265,7 @@ static void udc_handle_ep0_setup(struct lpc32xx_udc *udc) default: break; } - + break; case USB_REQ_SET_ADDRESS: if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) { diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index b457fdaff297..1fe3deec35cf 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -419,8 +419,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci) * other cases where the next software may expect clean state from the * "firmware". this is bus-neutral, unlike shutdown() methods. */ -static void -ohci_shutdown (struct usb_hcd *hcd) +static void _ohci_shutdown(struct usb_hcd *hcd) { struct ohci_hcd *ohci; @@ -436,6 +435,16 @@ ohci_shutdown (struct usb_hcd *hcd) ohci->rh_state = OHCI_RH_HALTED; } +static void ohci_shutdown(struct usb_hcd *hcd) +{ + struct ohci_hcd *ohci = hcd_to_ohci(hcd); + unsigned long flags; + + spin_lock_irqsave(&ohci->lock, flags); + _ohci_shutdown(hcd); + spin_unlock_irqrestore(&ohci->lock, flags); +} + /*-------------------------------------------------------------------------* * HC functions *-------------------------------------------------------------------------*/ @@ -760,7 +769,7 @@ static void io_watchdog_func(struct timer_list *t) died: usb_hc_died(ohci_to_hcd(ohci)); ohci_dump(ohci); - ohci_shutdown(ohci_to_hcd(ohci)); + _ohci_shutdown(ohci_to_hcd(ohci)); goto done; } else { /* No write back because the done queue was empty */ diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c index 8616c52849c6..2b0ccd150209 100644 --- a/drivers/usb/host/xhci-rcar.c +++ b/drivers/usb/host/xhci-rcar.c @@ -104,7 +104,7 @@ static int xhci_rcar_is_gen2(struct device *dev) return of_device_is_compatible(node, "renesas,xhci-r8a7790") || of_device_is_compatible(node, "renesas,xhci-r8a7791") || of_device_is_compatible(node, "renesas,xhci-r8a7793") || - of_device_is_compatible(node, "renensas,rcar-gen2-xhci"); + of_device_is_compatible(node, "renesas,rcar-gen2-xhci"); } static int xhci_rcar_is_gen3(struct device *dev) diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index dafc65911fc0..2ff7c911fbd0 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -1194,6 +1194,16 @@ static int tegra_xusb_probe(struct platform_device *pdev) tegra_xusb_config(tegra, regs); + /* + * The XUSB Falcon microcontroller can only address 40 bits, so set + * the DMA mask accordingly. + */ + err = dma_set_mask_and_coherent(tegra->dev, DMA_BIT_MASK(40)); + if (err < 0) { + dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); + goto put_rpm; + } + err = tegra_xusb_load_firmware(tegra); if (err < 0) { dev_err(&pdev->dev, "failed to load firmware: %d\n", err); diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c index cc794e25a0b6..1d9ce9cbc831 100644 --- a/drivers/usb/storage/realtek_cr.c +++ b/drivers/usb/storage/realtek_cr.c @@ -38,7 +38,7 @@ MODULE_LICENSE("GPL"); static int auto_delink_en = 1; module_param(auto_delink_en, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(auto_delink_en, "enable auto delink"); +MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])"); #ifdef CONFIG_REALTEK_AUTOPM static int ss_en = 1; @@ -996,12 +996,15 @@ static int init_realtek_cr(struct us_data *us) goto INIT_FAIL; } - if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) || - CHECK_FW_VER(chip, 0x5901)) - SET_AUTO_DELINK(chip); - if (STATUS_LEN(chip) == 16) { - if (SUPPORT_AUTO_DELINK(chip)) + if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) || + CHECK_PID(chip, 0x0159)) { + if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) || + CHECK_FW_VER(chip, 0x5901)) SET_AUTO_DELINK(chip); + if (STATUS_LEN(chip) == 16) { + if (SUPPORT_AUTO_DELINK(chip)) + SET_AUTO_DELINK(chip); + } } #ifdef CONFIG_REALTEK_AUTOPM if (ss_en) diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index ea0d27a94afe..1cd9b6305b06 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2100,7 +2100,7 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201, US_FL_IGNORE_RESIDUE ), /* Reported by Michael Büsch <m@bues.ch> */ -UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116, +UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0117, "JMicron", "USB to ATA/ATAPI Bridge", USB_SC_DEVICE, USB_PR_DEVICE, NULL, diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 15abe1d9958f..bcfdb55fd198 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -1446,7 +1446,7 @@ static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo, else if ((pdo_min_voltage(pdo[i]) == pdo_min_voltage(pdo[i - 1])) && (pdo_max_voltage(pdo[i]) == - pdo_min_voltage(pdo[i - 1]))) + pdo_max_voltage(pdo[i - 1]))) return PDO_ERR_DUPE_PDO; break; /* diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 4b21a90015a9..99caf77df4a2 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -152,5 +152,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* CONFIG_CIFS_NFSD_EXPORT */ -#define CIFS_VERSION "2.21" +#define CIFS_VERSION "2.22" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index e23234207fc2..592a6cea2b79 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -579,6 +579,7 @@ extern void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page, unsigned int *len, unsigned int *offset); void extract_unc_hostname(const char *unc, const char **h, size_t *len); +int copy_path_name(char *dst, const char *src); #ifdef CONFIG_CIFS_DFS_UPCALL static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index e2f95965065d..3907653e63c7 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -942,10 +942,8 @@ PsxDelete: PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; - } else { /* BB add path length overrun check */ - name_len = strnlen(fileName, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->FileName, fileName, name_len); + } else { + name_len = copy_path_name(pSMB->FileName, fileName); } params = 6 + name_len; @@ -1015,10 +1013,8 @@ DelFileRetry: remap); name_len++; /* trailing null */ name_len *= 2; - } else { /* BB improve check for buffer overruns BB */ - name_len = strnlen(name, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->fileName, name, name_len); + } else { + name_len = copy_path_name(pSMB->fileName, name); } pSMB->SearchAttributes = cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM); @@ -1062,10 +1058,8 @@ RmDirRetry: remap); name_len++; /* trailing null */ name_len *= 2; - } else { /* BB improve check for buffer overruns BB */ - name_len = strnlen(name, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->DirName, name, name_len); + } else { + name_len = copy_path_name(pSMB->DirName, name); } pSMB->BufferFormat = 0x04; @@ -1107,10 +1101,8 @@ MkDirRetry: remap); name_len++; /* trailing null */ name_len *= 2; - } else { /* BB improve check for buffer overruns BB */ - name_len = strnlen(name, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->DirName, name, name_len); + } else { + name_len = copy_path_name(pSMB->DirName, name); } pSMB->BufferFormat = 0x04; @@ -1157,10 +1149,8 @@ PsxCreat: PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; - } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(name, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->FileName, name, name_len); + } else { + name_len = copy_path_name(pSMB->FileName, name); } params = 6 + name_len; @@ -1324,11 +1314,9 @@ OldOpenRetry: fileName, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; - } else { /* BB improve check for buffer overruns BB */ + } else { count = 0; /* no pad */ - name_len = strnlen(fileName, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->fileName, fileName, name_len); + name_len = copy_path_name(pSMB->fileName, fileName); } if (*pOplock & REQ_OPLOCK) pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK); @@ -1442,11 +1430,8 @@ openRetry: /* BB improve check for buffer overruns BB */ /* no pad */ count = 0; - name_len = strnlen(path, PATH_MAX); - /* trailing null */ - name_len++; + name_len = copy_path_name(req->fileName, path); req->NameLength = cpu_to_le16(name_len); - strncpy(req->fileName, path, name_len); } if (*oplock & REQ_OPLOCK) @@ -2812,15 +2797,10 @@ renameRetry: remap); name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; name_len2 *= 2; /* convert to bytes */ - } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(from_name, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->OldFileName, from_name, name_len); - name_len2 = strnlen(to_name, PATH_MAX); - name_len2++; /* trailing null */ + } else { + name_len = copy_path_name(pSMB->OldFileName, from_name); + name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name); pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ - strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2); - name_len2++; /* trailing null */ name_len2++; /* signature byte */ } @@ -2962,15 +2942,10 @@ copyRetry: toName, PATH_MAX, nls_codepage, remap); name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; name_len2 *= 2; /* convert to bytes */ - } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(fromName, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->OldFileName, fromName, name_len); - name_len2 = strnlen(toName, PATH_MAX); - name_len2++; /* trailing null */ + } else { + name_len = copy_path_name(pSMB->OldFileName, fromName); pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ - strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2); - name_len2++; /* trailing null */ + name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, toName); name_len2++; /* signature byte */ } @@ -3021,10 +2996,8 @@ createSymLinkRetry: name_len++; /* trailing null */ name_len *= 2; - } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(fromName, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->FileName, fromName, name_len); + } else { + name_len = copy_path_name(pSMB->FileName, fromName); } params = 6 + name_len; pSMB->MaxSetupCount = 0; @@ -3044,10 +3017,8 @@ createSymLinkRetry: PATH_MAX, nls_codepage, remap); name_len_target++; /* trailing null */ name_len_target *= 2; - } else { /* BB improve the check for buffer overruns BB */ - name_len_target = strnlen(toName, PATH_MAX); - name_len_target++; /* trailing null */ - strncpy(data_offset, toName, name_len_target); + } else { + name_len_target = copy_path_name(data_offset, toName); } pSMB->MaxParameterCount = cpu_to_le16(2); @@ -3109,10 +3080,8 @@ createHardLinkRetry: name_len++; /* trailing null */ name_len *= 2; - } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(toName, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->FileName, toName, name_len); + } else { + name_len = copy_path_name(pSMB->FileName, toName); } params = 6 + name_len; pSMB->MaxSetupCount = 0; @@ -3131,10 +3100,8 @@ createHardLinkRetry: PATH_MAX, nls_codepage, remap); name_len_target++; /* trailing null */ name_len_target *= 2; - } else { /* BB improve the check for buffer overruns BB */ - name_len_target = strnlen(fromName, PATH_MAX); - name_len_target++; /* trailing null */ - strncpy(data_offset, fromName, name_len_target); + } else { + name_len_target = copy_path_name(data_offset, fromName); } pSMB->MaxParameterCount = cpu_to_le16(2); @@ -3213,15 +3180,10 @@ winCreateHardLinkRetry: remap); name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; name_len2 *= 2; /* convert to bytes */ - } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(from_name, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->OldFileName, from_name, name_len); - name_len2 = strnlen(to_name, PATH_MAX); - name_len2++; /* trailing null */ + } else { + name_len = copy_path_name(pSMB->OldFileName, from_name); pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ - strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2); - name_len2++; /* trailing null */ + name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name); name_len2++; /* signature byte */ } @@ -3271,10 +3233,8 @@ querySymLinkRetry: remap); name_len++; /* trailing null */ name_len *= 2; - } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(searchName, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->FileName, searchName, name_len); + } else { + name_len = copy_path_name(pSMB->FileName, searchName); } params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; @@ -3691,10 +3651,8 @@ queryAclRetry: name_len *= 2; pSMB->FileName[name_len] = 0; pSMB->FileName[name_len+1] = 0; - } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(searchName, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->FileName, searchName, name_len); + } else { + name_len = copy_path_name(pSMB->FileName, searchName); } params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; @@ -3776,10 +3734,8 @@ setAclRetry: PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; - } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(fileName, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->FileName, fileName, name_len); + } else { + name_len = copy_path_name(pSMB->FileName, fileName); } params = 6 + name_len; pSMB->MaxParameterCount = cpu_to_le16(2); @@ -4184,9 +4140,7 @@ QInfRetry: name_len++; /* trailing null */ name_len *= 2; } else { - name_len = strnlen(search_name, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->FileName, search_name, name_len); + name_len = copy_path_name(pSMB->FileName, search_name); } pSMB->BufferFormat = 0x04; name_len++; /* account for buffer type byte */ @@ -4321,10 +4275,8 @@ QPathInfoRetry: PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; - } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(search_name, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->FileName, search_name, name_len); + } else { + name_len = copy_path_name(pSMB->FileName, search_name); } params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; @@ -4490,10 +4442,8 @@ UnixQPathInfoRetry: PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; - } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(searchName, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->FileName, searchName, name_len); + } else { + name_len = copy_path_name(pSMB->FileName, searchName); } params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; @@ -4593,17 +4543,16 @@ findFirstRetry: pSMB->FileName[name_len+1] = 0; name_len += 2; } - } else { /* BB add check for overrun of SMB buf BB */ - name_len = strnlen(searchName, PATH_MAX); -/* BB fix here and in unicode clause above ie - if (name_len > buffersize-header) - free buffer exit; BB */ - strncpy(pSMB->FileName, searchName, name_len); + } else { + name_len = copy_path_name(pSMB->FileName, searchName); if (msearch) { - pSMB->FileName[name_len] = CIFS_DIR_SEP(cifs_sb); - pSMB->FileName[name_len+1] = '*'; - pSMB->FileName[name_len+2] = 0; - name_len += 3; + if (WARN_ON_ONCE(name_len > PATH_MAX-2)) + name_len = PATH_MAX-2; + /* overwrite nul byte */ + pSMB->FileName[name_len-1] = CIFS_DIR_SEP(cifs_sb); + pSMB->FileName[name_len] = '*'; + pSMB->FileName[name_len+1] = 0; + name_len += 2; } } @@ -4898,10 +4847,8 @@ GetInodeNumberRetry: remap); name_len++; /* trailing null */ name_len *= 2; - } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(search_name, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->FileName, search_name, name_len); + } else { + name_len = copy_path_name(pSMB->FileName, search_name); } params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; @@ -5008,9 +4955,7 @@ getDFSRetry: name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(search_name, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->RequestFileName, search_name, name_len); + name_len = copy_path_name(pSMB->RequestFileName, search_name); } if (ses->server->sign) @@ -5663,10 +5608,8 @@ SetEOFRetry: PATH_MAX, cifs_sb->local_nls, remap); name_len++; /* trailing null */ name_len *= 2; - } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(file_name, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->FileName, file_name, name_len); + } else { + name_len = copy_path_name(pSMB->FileName, file_name); } params = 6 + name_len; data_count = sizeof(struct file_end_of_file_info); @@ -5959,10 +5902,8 @@ SetTimesRetry: PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; - } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(fileName, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->FileName, fileName, name_len); + } else { + name_len = copy_path_name(pSMB->FileName, fileName); } params = 6 + name_len; @@ -6040,10 +5981,8 @@ SetAttrLgcyRetry: PATH_MAX, nls_codepage); name_len++; /* trailing null */ name_len *= 2; - } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(fileName, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->fileName, fileName, name_len); + } else { + name_len = copy_path_name(pSMB->fileName, fileName); } pSMB->attr = cpu_to_le16(dos_attrs); pSMB->BufferFormat = 0x04; @@ -6203,10 +6142,8 @@ setPermsRetry: PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; - } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(file_name, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->FileName, file_name, name_len); + } else { + name_len = copy_path_name(pSMB->FileName, file_name); } params = 6 + name_len; @@ -6298,10 +6235,8 @@ QAllEAsRetry: PATH_MAX, nls_codepage, remap); list_len++; /* trailing null */ list_len *= 2; - } else { /* BB improve the check for buffer overruns BB */ - list_len = strnlen(searchName, PATH_MAX); - list_len++; /* trailing null */ - strncpy(pSMB->FileName, searchName, list_len); + } else { + list_len = copy_path_name(pSMB->FileName, searchName); } params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */; @@ -6480,10 +6415,8 @@ SetEARetry: PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; - } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(fileName, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->FileName, fileName, name_len); + } else { + name_len = copy_path_name(pSMB->FileName, fileName); } params = 6 + name_len; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 1795e80cbdf7..5299effa6f7d 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2981,6 +2981,7 @@ static int cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) { int rc = 0; + int is_domain = 0; const char *delim, *payload; char *desc; ssize_t len; @@ -3028,6 +3029,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) rc = PTR_ERR(key); goto out_err; } + is_domain = 1; } down_read(&key->sem); @@ -3085,6 +3087,26 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) goto out_key_put; } + /* + * If we have a domain key then we must set the domainName in the + * for the request. + */ + if (is_domain && ses->domainName) { + vol->domainname = kstrndup(ses->domainName, + strlen(ses->domainName), + GFP_KERNEL); + if (!vol->domainname) { + cifs_dbg(FYI, "Unable to allocate %zd bytes for " + "domain\n", len); + rc = -ENOMEM; + kfree(vol->username); + vol->username = NULL; + kzfree(vol->password); + vol->password = NULL; + goto out_key_put; + } + } + out_key_put: up_read(&key->sem); key_put(key); @@ -4209,16 +4231,19 @@ build_unc_path_to_root(const struct smb_vol *vol, strlen(vol->prepath) + 1 : 0; unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1); + if (unc_len > MAX_TREE_SIZE) + return ERR_PTR(-EINVAL); + full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL); if (full_path == NULL) return ERR_PTR(-ENOMEM); - strncpy(full_path, vol->UNC, unc_len); + memcpy(full_path, vol->UNC, unc_len); pos = full_path + unc_len; if (pplen) { *pos = CIFS_DIR_SEP(cifs_sb); - strncpy(pos + 1, vol->prepath, pplen); + memcpy(pos + 1, vol->prepath, pplen); pos += pplen; } diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index f26a48dd2e39..be424e81e3ad 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -69,11 +69,10 @@ cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb, return full_path; if (dfsplen) - strncpy(full_path, tcon->treeName, dfsplen); + memcpy(full_path, tcon->treeName, dfsplen); full_path[dfsplen] = CIFS_DIR_SEP(cifs_sb); - strncpy(full_path + dfsplen + 1, vol->prepath, pplen); + memcpy(full_path + dfsplen + 1, vol->prepath, pplen); convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb)); - full_path[dfsplen + pplen] = 0; /* add trailing null */ return full_path; } diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index f383877a6511..5ad83bdb9bea 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -1011,3 +1011,25 @@ void extract_unc_hostname(const char *unc, const char **h, size_t *len) *h = unc; *len = end - unc; } + +/** + * copy_path_name - copy src path to dst, possibly truncating + * + * returns number of bytes written (including trailing nul) + */ +int copy_path_name(char *dst, const char *src) +{ + int name_len; + + /* + * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it + * will truncate and strlen(dst) will be PATH_MAX-1 + */ + name_len = strscpy(dst, src, PATH_MAX); + if (WARN_ON_ONCE(name_len < 0)) + name_len = PATH_MAX-1; + + /* we count the trailing nul */ + name_len++; + return name_len; +} diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index dcd49ad60c83..4c764ff7edd2 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -159,13 +159,16 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses, const struct nls_table *nls_cp) { char *bcc_ptr = *pbcc_area; + int len; /* copy user */ /* BB what about null user mounts - check that we do this BB */ /* copy user */ if (ses->user_name != NULL) { - strncpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN); - bcc_ptr += strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN); + len = strscpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN); + if (WARN_ON_ONCE(len < 0)) + len = CIFS_MAX_USERNAME_LEN - 1; + bcc_ptr += len; } /* else null user mount */ *bcc_ptr = 0; @@ -173,8 +176,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses, /* copy domain */ if (ses->domainName != NULL) { - strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN); - bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN); + len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN); + if (WARN_ON_ONCE(len < 0)) + len = CIFS_MAX_DOMAINNAME_LEN - 1; + bcc_ptr += len; } /* else we will send a null domain name so the server will default to its own domain */ *bcc_ptr = 0; @@ -242,9 +247,10 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft, kfree(ses->serverOS); - ses->serverOS = kzalloc(len + 1, GFP_KERNEL); + ses->serverOS = kmalloc(len + 1, GFP_KERNEL); if (ses->serverOS) { - strncpy(ses->serverOS, bcc_ptr, len); + memcpy(ses->serverOS, bcc_ptr, len); + ses->serverOS[len] = 0; if (strncmp(ses->serverOS, "OS/2", 4) == 0) cifs_dbg(FYI, "OS/2 server\n"); } @@ -258,9 +264,11 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft, kfree(ses->serverNOS); - ses->serverNOS = kzalloc(len + 1, GFP_KERNEL); - if (ses->serverNOS) - strncpy(ses->serverNOS, bcc_ptr, len); + ses->serverNOS = kmalloc(len + 1, GFP_KERNEL); + if (ses->serverNOS) { + memcpy(ses->serverNOS, bcc_ptr, len); + ses->serverNOS[len] = 0; + } bcc_ptr += len + 1; bleft -= len + 1; diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h index cbd9d8495690..88e1e6304a71 100644 --- a/include/linux/logic_pio.h +++ b/include/linux/logic_pio.h @@ -117,6 +117,7 @@ struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode); unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode, resource_size_t hw_addr, resource_size_t size); int logic_pio_register_range(struct logic_pio_hwaddr *newrange); +void logic_pio_unregister_range(struct logic_pio_hwaddr *range); resource_size_t logic_pio_to_hwaddr(unsigned long pio); unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index d77d717c620c..3f38c30d2f13 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -215,8 +215,9 @@ enum node_stat_item { NR_INACTIVE_FILE, /* " " " " " */ NR_ACTIVE_FILE, /* " " " " " */ NR_UNEVICTABLE, /* " " " " " */ - NR_SLAB_RECLAIMABLE, - NR_SLAB_UNRECLAIMABLE, + NR_SLAB_RECLAIMABLE, /* Please do not reorder this item */ + NR_SLAB_UNRECLAIMABLE, /* and this one without looking at + * memcg_flush_percpu_vmstats() first. */ NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ WORKINGSET_NODES, diff --git a/include/linux/phy.h b/include/linux/phy.h index d26779f1fb6b..a7ecbe0e55aa 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1117,6 +1117,7 @@ int genphy_c45_an_disable_aneg(struct phy_device *phydev); int genphy_c45_read_mdix(struct phy_device *phydev); int genphy_c45_pma_read_abilities(struct phy_device *phydev); int genphy_c45_read_status(struct phy_device *phydev); +int genphy_c45_config_aneg(struct phy_device *phydev); /* The gen10g_* functions are the old Clause 45 stub */ int gen10g_config_aneg(struct phy_device *phydev); diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 5150436783e8..30a8cdcfd4a4 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -548,6 +548,7 @@ extern int trace_event_get_offsets(struct trace_event_call *call); #define is_signed_type(type) (((type)(-1)) < (type)1) +int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); int trace_set_clr_event(const char *system, const char *event, int set); /* diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h index f37d12877754..adcc6a97db61 100644 --- a/include/math-emu/op-common.h +++ b/include/math-emu/op-common.h @@ -308,6 +308,7 @@ do { \ \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ R##_e = X##_e; \ + /* Fall through */ \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \ @@ -318,6 +319,7 @@ do { \ \ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NORMAL): \ R##_e = Y##_e; \ + /* Fall through */ \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NAN): \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \ @@ -415,6 +417,7 @@ do { \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \ R##_s = X##_s; \ + /* Fall through */ \ \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_INF): \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \ @@ -428,6 +431,7 @@ do { \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \ R##_s = Y##_s; \ + /* Fall through */ \ \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_INF): \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ @@ -493,6 +497,7 @@ do { \ \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ FP_SET_EXCEPTION(FP_EX_DIVZERO); \ + /* Fall through */ \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \ R##_c = FP_CLS_INF; \ diff --git a/include/net/act_api.h b/include/net/act_api.h index c61a1bf4e3de..3a1a72990fce 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -15,6 +15,7 @@ struct tcf_idrinfo { struct mutex lock; struct idr action_idr; + struct net *net; }; struct tc_action_ops; @@ -108,7 +109,7 @@ struct tc_action_net { }; static inline -int tc_action_net_init(struct tc_action_net *tn, +int tc_action_net_init(struct net *net, struct tc_action_net *tn, const struct tc_action_ops *ops) { int err = 0; @@ -117,6 +118,7 @@ int tc_action_net_init(struct tc_action_net *tn, if (!tn->idrinfo) return -ENOMEM; tn->ops = ops; + tn->idrinfo->net = net; mutex_init(&tn->idrinfo->lock); idr_init(&tn->idrinfo->action_idr); return err; diff --git a/include/net/psample.h b/include/net/psample.h index 37a4df2325b2..6b578ce69cd8 100644 --- a/include/net/psample.h +++ b/include/net/psample.h @@ -11,6 +11,7 @@ struct psample_group { u32 group_num; u32 refcount; u32 seq; + struct rcu_head rcu; }; struct psample_group *psample_group_get(struct net *net, u32 group_num); diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index fa06b528c73c..a13a62db3565 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -23,20 +23,17 @@ #define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY enum rxrpc_skb_trace { - rxrpc_skb_rx_cleaned, - rxrpc_skb_rx_freed, - rxrpc_skb_rx_got, - rxrpc_skb_rx_lost, - rxrpc_skb_rx_purged, - rxrpc_skb_rx_received, - rxrpc_skb_rx_rotated, - rxrpc_skb_rx_seen, - rxrpc_skb_tx_cleaned, - rxrpc_skb_tx_freed, - rxrpc_skb_tx_got, - rxrpc_skb_tx_new, - rxrpc_skb_tx_rotated, - rxrpc_skb_tx_seen, + rxrpc_skb_cleaned, + rxrpc_skb_freed, + rxrpc_skb_got, + rxrpc_skb_lost, + rxrpc_skb_new, + rxrpc_skb_purged, + rxrpc_skb_received, + rxrpc_skb_rotated, + rxrpc_skb_seen, + rxrpc_skb_unshared, + rxrpc_skb_unshared_nomem, }; enum rxrpc_local_trace { @@ -228,20 +225,17 @@ enum rxrpc_tx_point { * Declare tracing information enums and their string mappings for display. */ #define rxrpc_skb_traces \ - EM(rxrpc_skb_rx_cleaned, "Rx CLN") \ - EM(rxrpc_skb_rx_freed, "Rx FRE") \ - EM(rxrpc_skb_rx_got, "Rx GOT") \ - EM(rxrpc_skb_rx_lost, "Rx *L*") \ - EM(rxrpc_skb_rx_purged, "Rx PUR") \ - EM(rxrpc_skb_rx_received, "Rx RCV") \ - EM(rxrpc_skb_rx_rotated, "Rx ROT") \ - EM(rxrpc_skb_rx_seen, "Rx SEE") \ - EM(rxrpc_skb_tx_cleaned, "Tx CLN") \ - EM(rxrpc_skb_tx_freed, "Tx FRE") \ - EM(rxrpc_skb_tx_got, "Tx GOT") \ - EM(rxrpc_skb_tx_new, "Tx NEW") \ - EM(rxrpc_skb_tx_rotated, "Tx ROT") \ - E_(rxrpc_skb_tx_seen, "Tx SEE") + EM(rxrpc_skb_cleaned, "CLN") \ + EM(rxrpc_skb_freed, "FRE") \ + EM(rxrpc_skb_got, "GOT") \ + EM(rxrpc_skb_lost, "*L*") \ + EM(rxrpc_skb_new, "NEW") \ + EM(rxrpc_skb_purged, "PUR") \ + EM(rxrpc_skb_received, "RCV") \ + EM(rxrpc_skb_rotated, "ROT") \ + EM(rxrpc_skb_seen, "SEE") \ + EM(rxrpc_skb_unshared, "UNS") \ + E_(rxrpc_skb_unshared_nomem, "US0") #define rxrpc_local_traces \ EM(rxrpc_local_got, "GOT") \ @@ -643,13 +637,14 @@ TRACE_EVENT(rxrpc_call, TRACE_EVENT(rxrpc_skb, TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op, - int usage, int mod_count, const void *where), + int usage, int mod_count, u8 flags, const void *where), - TP_ARGS(skb, op, usage, mod_count, where), + TP_ARGS(skb, op, usage, mod_count, flags, where), TP_STRUCT__entry( __field(struct sk_buff *, skb ) __field(enum rxrpc_skb_trace, op ) + __field(u8, flags ) __field(int, usage ) __field(int, mod_count ) __field(const void *, where ) @@ -657,14 +652,16 @@ TRACE_EVENT(rxrpc_skb, TP_fast_assign( __entry->skb = skb; + __entry->flags = flags; __entry->op = op; __entry->usage = usage; __entry->mod_count = mod_count; __entry->where = where; ), - TP_printk("s=%p %s u=%d m=%d p=%pSR", + TP_printk("s=%p %cx %s u=%d m=%d p=%pSR", __entry->skb, + __entry->flags & RXRPC_SKB_TX_BUFFER ? 'T' : 'R', __print_symbolic(__entry->op, rxrpc_skb_traces), __entry->usage, __entry->mod_count, diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 8191a7db2777..66088a9e9b9e 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -890,7 +890,8 @@ int bpf_jit_get_func_addr(const struct bpf_prog *prog, static int bpf_jit_blind_insn(const struct bpf_insn *from, const struct bpf_insn *aux, - struct bpf_insn *to_buff) + struct bpf_insn *to_buff, + bool emit_zext) { struct bpf_insn *to = to_buff; u32 imm_rnd = get_random_int(); @@ -1005,6 +1006,8 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from, case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); + if (emit_zext) + *to++ = BPF_ZEXT_REG(BPF_REG_AX); *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); break; @@ -1088,7 +1091,8 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) insn[1].code == 0) memcpy(aux, insn, sizeof(aux)); - rewritten = bpf_jit_blind_insn(insn, aux, insn_buff); + rewritten = bpf_jit_blind_insn(insn, aux, insn_buff, + clone->aux->verifier_zext); if (!rewritten) continue; diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 95a260f9214b..136ce049c4ad 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -263,8 +263,10 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, { char namebuf[KSYM_NAME_LEN]; - if (is_ksym_addr(addr)) - return !!get_symbol_pos(addr, symbolsize, offset); + if (is_ksym_addr(addr)) { + get_symbol_pos(addr, symbolsize, offset); + return 1; + } return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) || !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); } diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index eca34503f178..f9821a3374e9 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -3095,6 +3095,14 @@ t_probe_next(struct seq_file *m, loff_t *pos) hnd = &iter->probe_entry->hlist; hash = iter->probe->ops.func_hash->filter_hash; + + /* + * A probe being registered may temporarily have an empty hash + * and it's at the end of the func_probes list. + */ + if (!hash || hash == EMPTY_HASH) + return NULL; + size = 1 << hash->size_bits; retry: @@ -4320,12 +4328,21 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr, mutex_unlock(&ftrace_lock); + /* + * Note, there's a small window here that the func_hash->filter_hash + * may be NULL or empty. Need to be carefule when reading the loop. + */ mutex_lock(&probe->ops.func_hash->regex_lock); orig_hash = &probe->ops.func_hash->filter_hash; old_hash = *orig_hash; hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); + if (!hash) { + ret = -ENOMEM; + goto out; + } + ret = ftrace_match_records(hash, glob, strlen(glob)); /* Nothing found? */ diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 525a97fbbc60..563e80f9006a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1567,9 +1567,9 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, /** * update_max_tr_single - only copy one trace over, and reset the rest - * @tr - tracer - * @tsk - task with the latency - * @cpu - the cpu of the buffer to copy. + * @tr: tracer + * @tsk: task with the latency + * @cpu: the cpu of the buffer to copy. * * Flip the trace of a single CPU buffer between the @tr and the max_tr. */ @@ -1767,7 +1767,7 @@ static void __init apply_trace_boot_options(void); /** * register_tracer - register a tracer with the ftrace system. - * @type - the plugin for the tracer + * @type: the plugin for the tracer * * Register a new plugin tracer. */ @@ -2230,9 +2230,9 @@ static bool tracing_record_taskinfo_skip(int flags) /** * tracing_record_taskinfo - record the task info of a task * - * @task - task to record - * @flags - TRACE_RECORD_CMDLINE for recording comm - * - TRACE_RECORD_TGID for recording tgid + * @task: task to record + * @flags: TRACE_RECORD_CMDLINE for recording comm + * TRACE_RECORD_TGID for recording tgid */ void tracing_record_taskinfo(struct task_struct *task, int flags) { @@ -2258,10 +2258,10 @@ void tracing_record_taskinfo(struct task_struct *task, int flags) /** * tracing_record_taskinfo_sched_switch - record task info for sched_switch * - * @prev - previous task during sched_switch - * @next - next task during sched_switch - * @flags - TRACE_RECORD_CMDLINE for recording comm - * TRACE_RECORD_TGID for recording tgid + * @prev: previous task during sched_switch + * @next: next task during sched_switch + * @flags: TRACE_RECORD_CMDLINE for recording comm + * TRACE_RECORD_TGID for recording tgid */ void tracing_record_taskinfo_sched_switch(struct task_struct *prev, struct task_struct *next, int flags) @@ -3072,7 +3072,9 @@ static void trace_printk_start_stop_comm(int enabled) /** * trace_vbprintk - write binary msg to tracing buffer - * + * @ip: The address of the caller + * @fmt: The string format to write to the buffer + * @args: Arguments for @fmt */ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) { diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index c7506bc81b75..648930823b57 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -787,7 +787,7 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, return ret; } -static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) +int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) { char *event = NULL, *sub = NULL, *match; int ret; diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index dbef0d135075..fb6bfbc5bf86 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -895,7 +895,8 @@ void trace_probe_cleanup(struct trace_probe *tp) for (i = 0; i < tp->nr_args; i++) traceprobe_free_probe_arg(&tp->args[i]); - kfree(call->class->system); + if (call->class) + kfree(call->class->system); kfree(call->name); kfree(call->print_fmt); } diff --git a/lib/kfifo.c b/lib/kfifo.c index 117ad0e7fbf4..70dab9ac7827 100644 --- a/lib/kfifo.c +++ b/lib/kfifo.c @@ -68,7 +68,8 @@ int __kfifo_init(struct __kfifo *fifo, void *buffer, { size /= esize; - size = roundup_pow_of_two(size); + if (!is_power_of_2(size)) + size = rounddown_pow_of_two(size); fifo->in = 0; fifo->out = 0; diff --git a/lib/logic_pio.c b/lib/logic_pio.c index feea48fd1a0d..905027574e5d 100644 --- a/lib/logic_pio.c +++ b/lib/logic_pio.c @@ -35,7 +35,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range) struct logic_pio_hwaddr *range; resource_size_t start; resource_size_t end; - resource_size_t mmio_sz = 0; + resource_size_t mmio_end = 0; resource_size_t iio_sz = MMIO_UPPER_LIMIT; int ret = 0; @@ -46,7 +46,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range) end = new_range->hw_start + new_range->size; mutex_lock(&io_range_mutex); - list_for_each_entry_rcu(range, &io_range_list, list) { + list_for_each_entry(range, &io_range_list, list) { if (range->fwnode == new_range->fwnode) { /* range already there */ goto end_register; @@ -56,7 +56,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range) /* for MMIO ranges we need to check for overlap */ if (start >= range->hw_start + range->size || end < range->hw_start) { - mmio_sz += range->size; + mmio_end = range->io_start + range->size; } else { ret = -EFAULT; goto end_register; @@ -69,16 +69,16 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range) /* range not registered yet, check for available space */ if (new_range->flags == LOGIC_PIO_CPU_MMIO) { - if (mmio_sz + new_range->size - 1 > MMIO_UPPER_LIMIT) { + if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) { /* if it's too big check if 64K space can be reserved */ - if (mmio_sz + SZ_64K - 1 > MMIO_UPPER_LIMIT) { + if (mmio_end + SZ_64K - 1 > MMIO_UPPER_LIMIT) { ret = -E2BIG; goto end_register; } new_range->size = SZ_64K; pr_warn("Requested IO range too big, new size set to 64K\n"); } - new_range->io_start = mmio_sz; + new_range->io_start = mmio_end; } else if (new_range->flags == LOGIC_PIO_INDIRECT) { if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) { ret = -E2BIG; @@ -99,6 +99,20 @@ end_register: } /** + * logic_pio_unregister_range - unregister a logical PIO range for a host + * @range: pointer to the IO range which has been already registered. + * + * Unregister a previously-registered IO range node. + */ +void logic_pio_unregister_range(struct logic_pio_hwaddr *range) +{ + mutex_lock(&io_range_mutex); + list_del_rcu(&range->list); + mutex_unlock(&io_range_mutex); + synchronize_rcu(); +} + +/** * find_io_range_by_fwnode - find logical PIO range for given FW node * @fwnode: FW node handle associated with logical PIO range * @@ -108,26 +122,38 @@ end_register: */ struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode) { - struct logic_pio_hwaddr *range; + struct logic_pio_hwaddr *range, *found_range = NULL; + rcu_read_lock(); list_for_each_entry_rcu(range, &io_range_list, list) { - if (range->fwnode == fwnode) - return range; + if (range->fwnode == fwnode) { + found_range = range; + break; + } } - return NULL; + rcu_read_unlock(); + + return found_range; } /* Return a registered range given an input PIO token */ static struct logic_pio_hwaddr *find_io_range(unsigned long pio) { - struct logic_pio_hwaddr *range; + struct logic_pio_hwaddr *range, *found_range = NULL; + rcu_read_lock(); list_for_each_entry_rcu(range, &io_range_list, list) { - if (in_range(pio, range->io_start, range->size)) - return range; + if (in_range(pio, range->io_start, range->size)) { + found_range = range; + break; + } } - pr_err("PIO entry token %lx invalid\n", pio); - return NULL; + rcu_read_unlock(); + + if (!found_range) + pr_err("PIO entry token 0x%lx invalid\n", pio); + + return found_range; } /** @@ -180,14 +206,23 @@ unsigned long logic_pio_trans_cpuaddr(resource_size_t addr) { struct logic_pio_hwaddr *range; + rcu_read_lock(); list_for_each_entry_rcu(range, &io_range_list, list) { if (range->flags != LOGIC_PIO_CPU_MMIO) continue; - if (in_range(addr, range->hw_start, range->size)) - return addr - range->hw_start + range->io_start; + if (in_range(addr, range->hw_start, range->size)) { + unsigned long cpuaddr; + + cpuaddr = addr - range->hw_start + range->io_start; + + rcu_read_unlock(); + return cpuaddr; + } } - pr_err("addr %llx not registered in io_range_list\n", - (unsigned long long) addr); + rcu_read_unlock(); + + pr_err("addr %pa not registered in io_range_list\n", &addr); + return ~0UL; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 26e2999af608..9ec5e12486a7 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -752,15 +752,13 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, /* Update memcg */ __mod_memcg_state(memcg, idx, val); + /* Update lruvec */ + __this_cpu_add(pn->lruvec_stat_local->count[idx], val); + x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { struct mem_cgroup_per_node *pi; - /* - * Batch local counters to keep them in sync with - * the hierarchical ones. - */ - __this_cpu_add(pn->lruvec_stat_local->count[idx], x); for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) atomic_long_add(x, &pi->lruvec_stat[idx]); x = 0; @@ -3260,37 +3258,49 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, } } -static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg) +static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg, bool slab_only) { unsigned long stat[MEMCG_NR_STAT]; struct mem_cgroup *mi; int node, cpu, i; + int min_idx, max_idx; - for (i = 0; i < MEMCG_NR_STAT; i++) + if (slab_only) { + min_idx = NR_SLAB_RECLAIMABLE; + max_idx = NR_SLAB_UNRECLAIMABLE; + } else { + min_idx = 0; + max_idx = MEMCG_NR_STAT; + } + + for (i = min_idx; i < max_idx; i++) stat[i] = 0; for_each_online_cpu(cpu) - for (i = 0; i < MEMCG_NR_STAT; i++) - stat[i] += raw_cpu_read(memcg->vmstats_percpu->stat[i]); + for (i = min_idx; i < max_idx; i++) + stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu); for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) - for (i = 0; i < MEMCG_NR_STAT; i++) + for (i = min_idx; i < max_idx; i++) atomic_long_add(stat[i], &mi->vmstats[i]); + if (!slab_only) + max_idx = NR_VM_NODE_STAT_ITEMS; + for_each_node(node) { struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; struct mem_cgroup_per_node *pi; - for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) + for (i = min_idx; i < max_idx; i++) stat[i] = 0; for_each_online_cpu(cpu) - for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) - stat[i] += raw_cpu_read( - pn->lruvec_stat_cpu->count[i]); + for (i = min_idx; i < max_idx; i++) + stat[i] += per_cpu( + pn->lruvec_stat_cpu->count[i], cpu); for (pi = pn; pi; pi = parent_nodeinfo(pi, node)) - for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) + for (i = min_idx; i < max_idx; i++) atomic_long_add(stat[i], &pi->lruvec_stat[i]); } } @@ -3306,8 +3316,8 @@ static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg) for_each_online_cpu(cpu) for (i = 0; i < NR_VM_EVENT_ITEMS; i++) - events[i] += raw_cpu_read( - memcg->vmstats_percpu->events[i]); + events[i] += per_cpu(memcg->vmstats_percpu->events[i], + cpu); for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) for (i = 0; i < NR_VM_EVENT_ITEMS; i++) @@ -3363,7 +3373,14 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) if (!parent) parent = root_mem_cgroup; + /* + * Deactivate and reparent kmem_caches. Then flush percpu + * slab statistics to have precise values at the parent and + * all ancestor levels. It's required to keep slab stats + * accurate after the reparenting of kmem_caches. + */ memcg_deactivate_kmem_caches(memcg, parent); + memcg_flush_percpu_vmstats(memcg, true); kmemcg_id = memcg->kmemcg_id; BUG_ON(kmemcg_id < 0); @@ -4740,7 +4757,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) * Flush percpu vmstats and vmevents to guarantee the value correctness * on parent's and all ancestor levels. */ - memcg_flush_percpu_vmstats(memcg); + memcg_flush_percpu_vmstats(memcg, false); memcg_flush_percpu_vmevents(memcg); for_each_node(node) free_mem_cgroup_per_node_info(memcg, node); diff --git a/mm/vmscan.c b/mm/vmscan.c index c77d1e3761a7..a6c5d0b28321 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3220,6 +3220,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, #ifdef CONFIG_MEMCG +/* Only used by soft limit reclaim. Do not reuse for anything else. */ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, gfp_t gfp_mask, bool noswap, pg_data_t *pgdat, @@ -3235,7 +3236,8 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, }; unsigned long lru_pages; - set_task_reclaim_state(current, &sc.reclaim_state); + WARN_ON_ONCE(!current->reclaim_state); + sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); @@ -3253,7 +3255,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); - set_task_reclaim_state(current, NULL); *nr_scanned = sc.nr_scanned; return sc.nr_reclaimed; diff --git a/mm/z3fold.c b/mm/z3fold.c index e31cd9bd4ed5..75b7962439ff 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -1406,6 +1406,7 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) * should freak out. */ WARN(1, "Z3fold is experiencing kref problems\n"); + z3fold_page_unlock(zhdr); return false; } z3fold_page_unlock(zhdr); diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 08def3a0d200..e98bb6ab4f7e 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -2412,7 +2412,9 @@ struct zs_pool *zs_create_pool(const char *name) if (!pool->name) goto err; +#ifdef CONFIG_COMPACTION init_waitqueue_head(&pool->migration_wait); +#endif if (create_cache(pool)) goto err; diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 240ed70912d6..d78938e3e008 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -277,17 +277,23 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv) * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached * @buff_pos: current position in the skb * @packet_len: total length of the skb - * @tvlv_len: tvlv length of the previously considered OGM + * @ogm_packet: potential OGM in buffer * * Return: true if there is enough space for another OGM, false otherwise. */ -static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len, - __be16 tvlv_len) +static bool +batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len, + const struct batadv_ogm_packet *ogm_packet) { int next_buff_pos = 0; - next_buff_pos += buff_pos + BATADV_OGM_HLEN; - next_buff_pos += ntohs(tvlv_len); + /* check if there is enough space for the header */ + next_buff_pos += buff_pos + sizeof(*ogm_packet); + if (next_buff_pos > packet_len) + return false; + + /* check if there is enough space for the optional TVLV */ + next_buff_pos += ntohs(ogm_packet->tvlv_len); return (next_buff_pos <= packet_len) && (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); @@ -315,7 +321,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet, /* adjust all flags and log packets */ while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len, - batadv_ogm_packet->tvlv_len)) { + batadv_ogm_packet)) { /* we might have aggregated direct link packets with an * ordinary base packet */ @@ -1704,7 +1710,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb, /* unpack the aggregated packets and process them one by one */ while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb), - ogm_packet->tvlv_len)) { + ogm_packet)) { batadv_iv_ogm_process(skb, ogm_offset, if_incoming); ogm_offset += BATADV_OGM_HLEN; diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 319249f0f85f..dc4f7430cb5a 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -806,17 +806,23 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv, * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated * @buff_pos: current position in the skb * @packet_len: total length of the skb - * @tvlv_len: tvlv length of the previously considered OGM + * @ogm2_packet: potential OGM2 in buffer * * Return: true if there is enough space for another OGM, false otherwise. */ -static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, - __be16 tvlv_len) +static bool +batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, + const struct batadv_ogm2_packet *ogm2_packet) { int next_buff_pos = 0; - next_buff_pos += buff_pos + BATADV_OGM2_HLEN; - next_buff_pos += ntohs(tvlv_len); + /* check if there is enough space for the header */ + next_buff_pos += buff_pos + sizeof(*ogm2_packet); + if (next_buff_pos > packet_len) + return false; + + /* check if there is enough space for the optional TVLV */ + next_buff_pos += ntohs(ogm2_packet->tvlv_len); return (next_buff_pos <= packet_len) && (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); @@ -993,7 +999,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb, ogm_packet = (struct batadv_ogm2_packet *)skb->data; while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb), - ogm_packet->tvlv_len)) { + ogm_packet)) { batadv_v_ogm_process(skb, ogm_offset, if_incoming); ogm_offset += BATADV_OGM2_HLEN; diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c index 1804e867f715..7c9e92b2f806 100644 --- a/net/bridge/netfilter/nft_meta_bridge.c +++ b/net/bridge/netfilter/nft_meta_bridge.c @@ -53,7 +53,7 @@ static void nft_meta_bridge_get_eval(const struct nft_expr *expr, goto err; br_vlan_get_proto(br_dev, &p_proto); - nft_reg_store16(dest, p_proto); + nft_reg_store16(dest, htons(p_proto)); return; } default: diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c index 5d6724cee38f..4f75df40fb12 100644 --- a/net/ceph/crypto.c +++ b/net/ceph/crypto.c @@ -136,8 +136,10 @@ void ceph_crypto_key_destroy(struct ceph_crypto_key *key) if (key) { kfree(key->key); key->key = NULL; - crypto_free_sync_skcipher(key->tfm); - key->tfm = NULL; + if (key->tfm) { + crypto_free_sync_skcipher(key->tfm); + key->tfm = NULL; + } } } diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 2cf27da1baeb..849380a622ef 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -122,7 +122,7 @@ static void queue_process(struct work_struct *work) txq = netdev_get_tx_queue(dev, q_index); HARD_TX_LOCK(dev, txq, smp_processor_id()); if (netif_xmit_frozen_or_stopped(txq) || - netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { + !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) { skb_queue_head(&npinfo->txq, skb); HARD_TX_UNLOCK(dev, txq); local_irq_restore(flags); @@ -335,7 +335,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, HARD_TX_UNLOCK(dev, txq); - if (status == NETDEV_TX_OK) + if (dev_xmit_complete(status)) break; } @@ -352,7 +352,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, } - if (status != NETDEV_TX_OK) { + if (!dev_xmit_complete(status)) { skb_queue_tail(&npinfo->txq, skb); schedule_delayed_work(&npinfo->tx_work,0); } diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index e44e6275b0a1..9c1cc2482b68 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -28,6 +28,7 @@ * * RSV - VID[9]: * To be used for further expansion of SWITCH_ID or for other purposes. + * Must be transmitted as zero and ignored on receive. * * SWITCH_ID - VID[8:6]: * Index of switch within DSA tree. Must be between 0 and @@ -35,6 +36,7 @@ * * RSV - VID[5:4]: * To be used for further expansion of PORT or for other purposes. + * Must be transmitted as zero and ignored on receive. * * PORT - VID[3:0]: * Index of switch port. Must be between 0 and DSA_MAX_PORTS - 1. diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 051ef10374f6..94df48bcecc2 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -935,6 +935,22 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) return mss_now; } +/* In some cases, both sendpage() and sendmsg() could have added + * an skb to the write queue, but failed adding payload on it. + * We need to remove it to consume less memory, but more + * importantly be able to generate EPOLLOUT for Edge Trigger epoll() + * users. + */ +static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb) +{ + if (skb && !skb->len) { + tcp_unlink_write_queue(skb, sk); + if (tcp_write_queue_empty(sk)) + tcp_chrono_stop(sk, TCP_CHRONO_BUSY); + sk_wmem_free_skb(sk, skb); + } +} + ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, size_t size, int flags) { @@ -1064,6 +1080,7 @@ out: return copied; do_error: + tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk)); if (copied) goto out; out_err: @@ -1389,18 +1406,11 @@ out_nopush: sock_zerocopy_put(uarg); return copied + copied_syn; +do_error: + skb = tcp_write_queue_tail(sk); do_fault: - if (!skb->len) { - tcp_unlink_write_queue(skb, sk); - /* It is the one place in all of TCP, except connection - * reset, where we can be unlinking the send_head. - */ - if (tcp_write_queue_empty(sk)) - tcp_chrono_stop(sk, TCP_CHRONO_BUSY); - sk_wmem_free_skb(sk, skb); - } + tcp_remove_empty_skb(sk, skb); -do_error: if (copied + copied_syn) goto out; out_err: diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 5c46bc4c7e8d..42abc9bd687a 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2053,7 +2053,7 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len) if (len <= skb->len) break; - if (unlikely(TCP_SKB_CB(skb)->eor)) + if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb)) return false; len -= skb->len; @@ -2170,6 +2170,7 @@ static int tcp_mtu_probe(struct sock *sk) * we need to propagate it to the new skb. */ TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor; + tcp_skb_collapse_tstamp(nskb, skb); tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); } else { diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 7f3f13c37916..eaa4c2cc2fbb 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -787,14 +787,15 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) if (pmc) { im->idev = pmc->idev; if (im->mca_sfmode == MCAST_INCLUDE) { - im->mca_tomb = pmc->mca_tomb; - im->mca_sources = pmc->mca_sources; + swap(im->mca_tomb, pmc->mca_tomb); + swap(im->mca_sources, pmc->mca_sources); for (psf = im->mca_sources; psf; psf = psf->sf_next) psf->sf_crcount = idev->mc_qrv; } else { im->mca_crcount = idev->mc_qrv; } in6_dev_put(pmc->idev); + ip6_mc_clear_src(pmc); kfree(pmc); } spin_unlock_bh(&im->mca_lock); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 3c1ab870fefe..768d14c9a716 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2447,11 +2447,13 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb, skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) && sdata->control_port_over_nl80211)) { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); - bool noencrypt = status->flag & RX_FLAG_DECRYPTED; + bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED); cfg80211_rx_control_port(dev, skb, noencrypt); dev_kfree_skb(skb); } else { + memset(skb->cb, 0, sizeof(skb->cb)); + /* deliver to local stack */ if (rx->napi) napi_gro_receive(rx->napi, skb); @@ -2546,8 +2548,6 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) if (skb) { skb->protocol = eth_type_trans(skb, dev); - memset(skb->cb, 0, sizeof(skb->cb)); - ieee80211_deliver_skb_to_local_stack(skb, rx); } diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c index c57d2348c505..9eca90414bb7 100644 --- a/net/netfilter/nf_conntrack_ftp.c +++ b/net/netfilter/nf_conntrack_ftp.c @@ -322,7 +322,7 @@ static int find_pattern(const char *data, size_t dlen, i++; } - pr_debug("Skipped up to `%c'!\n", skip); + pr_debug("Skipped up to 0x%hhx delimiter!\n", skip); *numoff = i; *numlen = getnum(data + i, dlen - i, cmd, term, numoff); diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index d97f4ea47cf3..88d4127df863 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -1035,9 +1035,14 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net) table[NF_SYSCTL_CT_COUNT].data = &net->ct.count; table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum; table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid; + table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct; + table[NF_SYSCTL_CT_HELPER].data = &net->ct.sysctl_auto_assign_helper; #ifdef CONFIG_NF_CONNTRACK_EVENTS table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events; #endif +#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP + table[NF_SYSCTL_CT_TIMESTAMP].data = &net->ct.sysctl_tstamp; +#endif table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout; table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout; table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout; diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index d68c801dd614..b9e7dd6e60ce 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -228,7 +228,6 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb, { skb_orphan(skb); skb_dst_set_noref(skb, dst); - skb->tstamp = 0; dst_output(state->net, state->sk, skb); return NF_STOLEN; } @@ -284,6 +283,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; iph = ip_hdr(skb); ip_decrease_ttl(iph); + skb->tstamp = 0; if (unlikely(dst_xfrm(&rt->dst))) { memset(skb->cb, 0, sizeof(struct inet_skb_parm)); @@ -512,6 +512,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; ip6h = ipv6_hdr(skb); ip6h->hop_limit--; + skb->tstamp = 0; if (unlikely(dst_xfrm(&rt->dst))) { memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c index ead7c6022208..b92b22ce8abd 100644 --- a/net/netfilter/xt_physdev.c +++ b/net/netfilter/xt_physdev.c @@ -101,11 +101,9 @@ static int physdev_mt_check(const struct xt_mtchk_param *par) if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) && (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) || info->invert & XT_PHYSDEV_OP_BRIDGED) && - par->hook_mask & ((1 << NF_INET_LOCAL_OUT) | - (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) { + par->hook_mask & (1 << NF_INET_LOCAL_OUT)) { pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n"); - if (par->hook_mask & (1 << NF_INET_LOCAL_OUT)) - return -EINVAL; + return -EINVAL; } if (!brnf_probed) { diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index d8da6477d6be..05249eb45082 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -525,6 +525,11 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key, return -EPFNOSUPPORT; } + /* The key extracted from the fragment that completed this datagram + * likely didn't have an L4 header, so regenerate it. + */ + ovs_flow_key_update_l3l4(skb, key); + key->ip.frag = OVS_FRAG_TYPE_NONE; skb_clear_hash(skb); skb->ignore_df = 1; diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index bc89e16e0505..9d81d2c7bf82 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -523,78 +523,15 @@ static int parse_nsh(struct sk_buff *skb, struct sw_flow_key *key) } /** - * key_extract - extracts a flow key from an Ethernet frame. + * key_extract_l3l4 - extracts L3/L4 header information. * @skb: sk_buff that contains the frame, with skb->data pointing to the - * Ethernet header + * L3 header * @key: output flow key * - * The caller must ensure that skb->len >= ETH_HLEN. - * - * Returns 0 if successful, otherwise a negative errno value. - * - * Initializes @skb header fields as follows: - * - * - skb->mac_header: the L2 header. - * - * - skb->network_header: just past the L2 header, or just past the - * VLAN header, to the first byte of the L2 payload. - * - * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6 - * on output, then just past the IP header, if one is present and - * of a correct length, otherwise the same as skb->network_header. - * For other key->eth.type values it is left untouched. - * - * - skb->protocol: the type of the data starting at skb->network_header. - * Equals to key->eth.type. */ -static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) +static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key) { int error; - struct ethhdr *eth; - - /* Flags are always used as part of stats */ - key->tp.flags = 0; - - skb_reset_mac_header(skb); - - /* Link layer. */ - clear_vlan(key); - if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) { - if (unlikely(eth_type_vlan(skb->protocol))) - return -EINVAL; - - skb_reset_network_header(skb); - key->eth.type = skb->protocol; - } else { - eth = eth_hdr(skb); - ether_addr_copy(key->eth.src, eth->h_source); - ether_addr_copy(key->eth.dst, eth->h_dest); - - __skb_pull(skb, 2 * ETH_ALEN); - /* We are going to push all headers that we pull, so no need to - * update skb->csum here. - */ - - if (unlikely(parse_vlan(skb, key))) - return -ENOMEM; - - key->eth.type = parse_ethertype(skb); - if (unlikely(key->eth.type == htons(0))) - return -ENOMEM; - - /* Multiple tagged packets need to retain TPID to satisfy - * skb_vlan_pop(), which will later shift the ethertype into - * skb->protocol. - */ - if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK)) - skb->protocol = key->eth.cvlan.tpid; - else - skb->protocol = key->eth.type; - - skb_reset_network_header(skb); - __skb_push(skb, skb->data - skb_mac_header(skb)); - } - skb_reset_mac_len(skb); /* Network layer. */ if (key->eth.type == htons(ETH_P_IP)) { @@ -623,6 +560,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) offset = nh->frag_off & htons(IP_OFFSET); if (offset) { key->ip.frag = OVS_FRAG_TYPE_LATER; + memset(&key->tp, 0, sizeof(key->tp)); return 0; } if (nh->frag_off & htons(IP_MF) || @@ -740,8 +678,10 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) return error; } - if (key->ip.frag == OVS_FRAG_TYPE_LATER) + if (key->ip.frag == OVS_FRAG_TYPE_LATER) { + memset(&key->tp, 0, sizeof(key->tp)); return 0; + } if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; @@ -788,6 +728,92 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) return 0; } +/** + * key_extract - extracts a flow key from an Ethernet frame. + * @skb: sk_buff that contains the frame, with skb->data pointing to the + * Ethernet header + * @key: output flow key + * + * The caller must ensure that skb->len >= ETH_HLEN. + * + * Returns 0 if successful, otherwise a negative errno value. + * + * Initializes @skb header fields as follows: + * + * - skb->mac_header: the L2 header. + * + * - skb->network_header: just past the L2 header, or just past the + * VLAN header, to the first byte of the L2 payload. + * + * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6 + * on output, then just past the IP header, if one is present and + * of a correct length, otherwise the same as skb->network_header. + * For other key->eth.type values it is left untouched. + * + * - skb->protocol: the type of the data starting at skb->network_header. + * Equals to key->eth.type. + */ +static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) +{ + struct ethhdr *eth; + + /* Flags are always used as part of stats */ + key->tp.flags = 0; + + skb_reset_mac_header(skb); + + /* Link layer. */ + clear_vlan(key); + if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) { + if (unlikely(eth_type_vlan(skb->protocol))) + return -EINVAL; + + skb_reset_network_header(skb); + key->eth.type = skb->protocol; + } else { + eth = eth_hdr(skb); + ether_addr_copy(key->eth.src, eth->h_source); + ether_addr_copy(key->eth.dst, eth->h_dest); + + __skb_pull(skb, 2 * ETH_ALEN); + /* We are going to push all headers that we pull, so no need to + * update skb->csum here. + */ + + if (unlikely(parse_vlan(skb, key))) + return -ENOMEM; + + key->eth.type = parse_ethertype(skb); + if (unlikely(key->eth.type == htons(0))) + return -ENOMEM; + + /* Multiple tagged packets need to retain TPID to satisfy + * skb_vlan_pop(), which will later shift the ethertype into + * skb->protocol. + */ + if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK)) + skb->protocol = key->eth.cvlan.tpid; + else + skb->protocol = key->eth.type; + + skb_reset_network_header(skb); + __skb_push(skb, skb->data - skb_mac_header(skb)); + } + + skb_reset_mac_len(skb); + + /* Fill out L3/L4 key info, if any */ + return key_extract_l3l4(skb, key); +} + +/* In the case of conntrack fragment handling it expects L3 headers, + * add a helper. + */ +int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key) +{ + return key_extract_l3l4(skb, key); +} + int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) { int res; diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index a5506e2d4b7a..b830d5ff7af4 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -270,6 +270,7 @@ void ovs_flow_stats_clear(struct sw_flow *); u64 ovs_flow_used_time(unsigned long flow_jiffies); int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key); +int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key); int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, struct sk_buff *skb, struct sw_flow_key *key); diff --git a/net/psample/psample.c b/net/psample/psample.c index 841f198ea1a8..66e4b61a350d 100644 --- a/net/psample/psample.c +++ b/net/psample/psample.c @@ -154,7 +154,7 @@ static void psample_group_destroy(struct psample_group *group) { psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP); list_del(&group->list); - kfree(group); + kfree_rcu(group, rcu); } static struct psample_group * diff --git a/net/rds/recv.c b/net/rds/recv.c index 7e451c82595b..c8404971d5ab 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -813,6 +813,7 @@ void rds6_inc_info_copy(struct rds_incoming *inc, minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence); minfo6.len = be32_to_cpu(inc->i_hdr.h_len); + minfo6.tos = inc->i_conn->c_tos; if (flip) { minfo6.laddr = *daddr; @@ -826,6 +827,8 @@ void rds6_inc_info_copy(struct rds_incoming *inc, minfo6.fport = inc->i_hdr.h_dport; } + minfo6.flags = 0; + rds_info_copy(iter, &minfo6, sizeof(minfo6)); } #endif diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 0dbbfd1b6487..d72ddb67bb74 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -862,7 +862,6 @@ static void rxrpc_sock_destructor(struct sock *sk) static int rxrpc_release_sock(struct sock *sk) { struct rxrpc_sock *rx = rxrpc_sk(sk); - struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt)); @@ -898,8 +897,6 @@ static int rxrpc_release_sock(struct sock *sk) rxrpc_release_calls_on_socket(rx); flush_workqueue(rxrpc_workqueue); rxrpc_purge_queue(&sk->sk_receive_queue); - rxrpc_queue_work(&rxnet->service_conn_reaper); - rxrpc_queue_work(&rxnet->client_conn_reaper); rxrpc_unuse_local(rx->local); rx->local = NULL; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index fa5b030acaa8..1091bf35a199 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -185,11 +185,17 @@ struct rxrpc_host_header { * - max 48 bytes (struct sk_buff::cb) */ struct rxrpc_skb_priv { - union { - u8 nr_jumbo; /* Number of jumbo subpackets */ - }; + atomic_t nr_ring_pins; /* Number of rxtx ring pins */ + u8 nr_subpackets; /* Number of subpackets */ + u8 rx_flags; /* Received packet flags */ +#define RXRPC_SKB_INCL_LAST 0x01 /* - Includes last packet */ +#define RXRPC_SKB_TX_BUFFER 0x02 /* - Is transmit buffer */ union { int remain; /* amount of space remaining for next write */ + + /* List of requested ACKs on subpackets */ + unsigned long rx_req_ack[(RXRPC_MAX_NR_JUMBO + BITS_PER_LONG - 1) / + BITS_PER_LONG]; }; struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */ @@ -617,8 +623,7 @@ struct rxrpc_call { #define RXRPC_TX_ANNO_LAST 0x04 #define RXRPC_TX_ANNO_RESENT 0x08 -#define RXRPC_RX_ANNO_JUMBO 0x3f /* Jumbo subpacket number + 1 if not zero */ -#define RXRPC_RX_ANNO_JLAST 0x40 /* Set if last element of a jumbo packet */ +#define RXRPC_RX_ANNO_SUBPACKET 0x3f /* Subpacket number in jumbogram */ #define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */ rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but * not hard-ACK'd packet follows this. @@ -909,6 +914,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *); void rxrpc_put_client_conn(struct rxrpc_connection *); void rxrpc_discard_expired_client_conns(struct work_struct *); void rxrpc_destroy_all_client_connections(struct rxrpc_net *); +void rxrpc_clean_up_local_conns(struct rxrpc_local *); /* * conn_event.c @@ -1109,6 +1115,7 @@ void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *); void rxrpc_packet_destructor(struct sk_buff *); void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace); +void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_purge_queue(struct sk_buff_head *); diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index c767679bfa5d..cedbbb3a7c2e 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) continue; skb = call->rxtx_buffer[ix]; - rxrpc_see_skb(skb, rxrpc_skb_tx_seen); + rxrpc_see_skb(skb, rxrpc_skb_seen); if (anno_type == RXRPC_TX_ANNO_UNACK) { if (ktime_after(skb->tstamp, max_age)) { @@ -255,18 +255,18 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) continue; skb = call->rxtx_buffer[ix]; - rxrpc_get_skb(skb, rxrpc_skb_tx_got); + rxrpc_get_skb(skb, rxrpc_skb_got); spin_unlock_bh(&call->lock); if (rxrpc_send_data_packet(call, skb, true) < 0) { - rxrpc_free_skb(skb, rxrpc_skb_tx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); return; } if (rxrpc_is_client_call(call)) rxrpc_expose_client_call(call); - rxrpc_free_skb(skb, rxrpc_skb_tx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); spin_lock_bh(&call->lock); /* We need to clear the retransmit state, but there are two diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 60cbc81dc461..32d8dc677142 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -422,6 +422,19 @@ void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op) } /* + * Clean up the RxTx skb ring. + */ +static void rxrpc_cleanup_ring(struct rxrpc_call *call) +{ + int i; + + for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { + rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned); + call->rxtx_buffer[i] = NULL; + } +} + +/* * Detach a call from its owning socket. */ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) @@ -429,7 +442,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) const void *here = __builtin_return_address(0); struct rxrpc_connection *conn = call->conn; bool put = false; - int i; _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); @@ -481,13 +493,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) conn->security->free_call_crypto(call); } - for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { - rxrpc_free_skb(call->rxtx_buffer[i], - (call->tx_phase ? rxrpc_skb_tx_cleaned : - rxrpc_skb_rx_cleaned)); - call->rxtx_buffer[i] = NULL; - } - + rxrpc_cleanup_ring(call); _leave(""); } @@ -570,8 +576,6 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) */ void rxrpc_cleanup_call(struct rxrpc_call *call) { - int i; - _net("DESTROY CALL %d", call->debug_id); memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); @@ -582,13 +586,8 @@ void rxrpc_cleanup_call(struct rxrpc_call *call) ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); ASSERTCMP(call->conn, ==, NULL); - /* Clean up the Rx/Tx buffer */ - for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) - rxrpc_free_skb(call->rxtx_buffer[i], - (call->tx_phase ? rxrpc_skb_tx_cleaned : - rxrpc_skb_rx_cleaned)); - - rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned); + rxrpc_cleanup_ring(call); + rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned); call_rcu(&call->rcu, rxrpc_rcu_destroy_call); } diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index aea82f909c60..3f1da1b49f69 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -1162,3 +1162,47 @@ void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet) _leave(""); } + +/* + * Clean up the client connections on a local endpoint. + */ +void rxrpc_clean_up_local_conns(struct rxrpc_local *local) +{ + struct rxrpc_connection *conn, *tmp; + struct rxrpc_net *rxnet = local->rxnet; + unsigned int nr_active; + LIST_HEAD(graveyard); + + _enter(""); + + spin_lock(&rxnet->client_conn_cache_lock); + nr_active = rxnet->nr_active_client_conns; + + list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns, + cache_link) { + if (conn->params.local == local) { + ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_IDLE); + + trace_rxrpc_client(conn, -1, rxrpc_client_discard); + if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags)) + BUG(); + conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; + list_move(&conn->cache_link, &graveyard); + nr_active--; + } + } + + rxnet->nr_active_client_conns = nr_active; + spin_unlock(&rxnet->client_conn_cache_lock); + ASSERTCMP(nr_active, >=, 0); + + while (!list_empty(&graveyard)) { + conn = list_entry(graveyard.next, + struct rxrpc_connection, cache_link); + list_del_init(&conn->cache_link); + + rxrpc_put_connection(conn); + } + + _leave(" [culled]"); +} diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index df6624c140be..a1ceef4f5cd0 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -472,7 +472,7 @@ void rxrpc_process_connection(struct work_struct *work) /* go through the conn-level event packets, releasing the ref on this * connection that each one has when we've finished with it */ while ((skb = skb_dequeue(&conn->rx_queue))) { - rxrpc_see_skb(skb, rxrpc_skb_rx_seen); + rxrpc_see_skb(skb, rxrpc_skb_seen); ret = rxrpc_process_event(conn, skb, &abort_code); switch (ret) { case -EPROTO: @@ -484,7 +484,7 @@ void rxrpc_process_connection(struct work_struct *work) goto requeue_and_leave; case -ECONNABORTED: default: - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); break; } } @@ -501,6 +501,6 @@ requeue_and_leave: protocol_error: if (rxrpc_abort_connection(conn, ret, abort_code) < 0) goto requeue_and_leave; - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); goto out; } diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 434ef392212b..ed05b6922132 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -398,7 +398,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work) if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) continue; - if (rxnet->live) { + if (rxnet->live && !conn->params.local->dead) { idle_timestamp = READ_ONCE(conn->idle_timestamp); expire_at = idle_timestamp + rxrpc_connection_expiry * HZ; if (conn->params.local->service_closed) diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index dd47d465d1d3..d122c53c8697 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -233,7 +233,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK; skb = call->rxtx_buffer[ix]; annotation = call->rxtx_annotations[ix]; - rxrpc_see_skb(skb, rxrpc_skb_tx_rotated); + rxrpc_see_skb(skb, rxrpc_skb_rotated); call->rxtx_buffer[ix] = NULL; call->rxtx_annotations[ix] = 0; skb->next = list; @@ -258,7 +258,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, skb = list; list = skb->next; skb_mark_not_on_list(skb); - rxrpc_free_skb(skb, rxrpc_skb_tx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); } return rot_last; @@ -347,7 +347,7 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call) } /* - * Scan a jumbo packet to validate its structure and to work out how many + * Scan a data packet to validate its structure and to work out how many * subpackets it contains. * * A jumbo packet is a collection of consecutive packets glued together with @@ -358,16 +358,21 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call) * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any * size. */ -static bool rxrpc_validate_jumbo(struct sk_buff *skb) +static bool rxrpc_validate_data(struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); unsigned int offset = sizeof(struct rxrpc_wire_header); unsigned int len = skb->len; - int nr_jumbo = 1; u8 flags = sp->hdr.flags; - do { - nr_jumbo++; + for (;;) { + if (flags & RXRPC_REQUEST_ACK) + __set_bit(sp->nr_subpackets, sp->rx_req_ack); + sp->nr_subpackets++; + + if (!(flags & RXRPC_JUMBO_PACKET)) + break; + if (len - offset < RXRPC_JUMBO_SUBPKTLEN) goto protocol_error; if (flags & RXRPC_LAST_PACKET) @@ -376,9 +381,10 @@ static bool rxrpc_validate_jumbo(struct sk_buff *skb) if (skb_copy_bits(skb, offset, &flags, 1) < 0) goto protocol_error; offset += sizeof(struct rxrpc_jumbo_header); - } while (flags & RXRPC_JUMBO_PACKET); + } - sp->nr_jumbo = nr_jumbo; + if (flags & RXRPC_LAST_PACKET) + sp->rx_flags |= RXRPC_SKB_INCL_LAST; return true; protocol_error: @@ -399,10 +405,10 @@ protocol_error: * (that information is encoded in the ACK packet). */ static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq, - u8 annotation, bool *_jumbo_bad) + bool is_jumbo, bool *_jumbo_bad) { /* Discard normal packets that are duplicates. */ - if (annotation == 0) + if (is_jumbo) return; /* Skip jumbo subpackets that are duplicates. When we've had three or @@ -416,29 +422,30 @@ static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq, } /* - * Process a DATA packet, adding the packet to the Rx ring. + * Process a DATA packet, adding the packet to the Rx ring. The caller's + * packet ref must be passed on or discarded. */ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); enum rxrpc_call_state state; - unsigned int offset = sizeof(struct rxrpc_wire_header); - unsigned int ix; + unsigned int j; rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0; - rxrpc_seq_t seq = sp->hdr.seq, hard_ack; - bool immediate_ack = false, jumbo_bad = false, queued; - u16 len; - u8 ack = 0, flags, annotation = 0; + rxrpc_seq_t seq0 = sp->hdr.seq, hard_ack; + bool immediate_ack = false, jumbo_bad = false; + u8 ack = 0; _enter("{%u,%u},{%u,%u}", - call->rx_hard_ack, call->rx_top, skb->len, seq); + call->rx_hard_ack, call->rx_top, skb->len, seq0); - _proto("Rx DATA %%%u { #%u f=%02x }", - sp->hdr.serial, seq, sp->hdr.flags); + _proto("Rx DATA %%%u { #%u f=%02x n=%u }", + sp->hdr.serial, seq0, sp->hdr.flags, sp->nr_subpackets); state = READ_ONCE(call->state); - if (state >= RXRPC_CALL_COMPLETE) + if (state >= RXRPC_CALL_COMPLETE) { + rxrpc_free_skb(skb, rxrpc_skb_freed); return; + } if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) { unsigned long timo = READ_ONCE(call->next_req_timo); @@ -463,137 +470,137 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) !rxrpc_receiving_reply(call)) goto unlock; - call->ackr_prev_seq = seq; - + call->ackr_prev_seq = seq0; hard_ack = READ_ONCE(call->rx_hard_ack); - if (after(seq, hard_ack + call->rx_winsize)) { - ack = RXRPC_ACK_EXCEEDS_WINDOW; - ack_serial = serial; - goto ack; - } - flags = sp->hdr.flags; - if (flags & RXRPC_JUMBO_PACKET) { + if (sp->nr_subpackets > 1) { if (call->nr_jumbo_bad > 3) { ack = RXRPC_ACK_NOSPACE; ack_serial = serial; goto ack; } - annotation = 1; } -next_subpacket: - queued = false; - ix = seq & RXRPC_RXTX_BUFF_MASK; - len = skb->len; - if (flags & RXRPC_JUMBO_PACKET) - len = RXRPC_JUMBO_DATALEN; - - if (flags & RXRPC_LAST_PACKET) { - if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && - seq != call->rx_top) { - rxrpc_proto_abort("LSN", call, seq); - goto unlock; - } - } else { - if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && - after_eq(seq, call->rx_top)) { - rxrpc_proto_abort("LSA", call, seq); - goto unlock; + for (j = 0; j < sp->nr_subpackets; j++) { + rxrpc_serial_t serial = sp->hdr.serial + j; + rxrpc_seq_t seq = seq0 + j; + unsigned int ix = seq & RXRPC_RXTX_BUFF_MASK; + bool terminal = (j == sp->nr_subpackets - 1); + bool last = terminal && (sp->rx_flags & RXRPC_SKB_INCL_LAST); + u8 flags, annotation = j; + + _proto("Rx DATA+%u %%%u { #%x t=%u l=%u }", + j, serial, seq, terminal, last); + + if (last) { + if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && + seq != call->rx_top) { + rxrpc_proto_abort("LSN", call, seq); + goto unlock; + } + } else { + if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && + after_eq(seq, call->rx_top)) { + rxrpc_proto_abort("LSA", call, seq); + goto unlock; + } } - } - trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); - if (before_eq(seq, hard_ack)) { - ack = RXRPC_ACK_DUPLICATE; - ack_serial = serial; - goto skip; - } + flags = 0; + if (last) + flags |= RXRPC_LAST_PACKET; + if (!terminal) + flags |= RXRPC_JUMBO_PACKET; + if (test_bit(j, sp->rx_req_ack)) + flags |= RXRPC_REQUEST_ACK; + trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); - if (flags & RXRPC_REQUEST_ACK && !ack) { - ack = RXRPC_ACK_REQUESTED; - ack_serial = serial; - } - - if (call->rxtx_buffer[ix]) { - rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad); - if (ack != RXRPC_ACK_DUPLICATE) { + if (before_eq(seq, hard_ack)) { ack = RXRPC_ACK_DUPLICATE; ack_serial = serial; + continue; } - immediate_ack = true; - goto skip; - } - /* Queue the packet. We use a couple of memory barriers here as need - * to make sure that rx_top is perceived to be set after the buffer - * pointer and that the buffer pointer is set after the annotation and - * the skb data. - * - * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window() - * and also rxrpc_fill_out_ack(). - */ - rxrpc_get_skb(skb, rxrpc_skb_rx_got); - call->rxtx_annotations[ix] = annotation; - smp_wmb(); - call->rxtx_buffer[ix] = skb; - if (after(seq, call->rx_top)) { - smp_store_release(&call->rx_top, seq); - } else if (before(seq, call->rx_top)) { - /* Send an immediate ACK if we fill in a hole */ - if (!ack) { - ack = RXRPC_ACK_DELAY; - ack_serial = serial; + if (call->rxtx_buffer[ix]) { + rxrpc_input_dup_data(call, seq, sp->nr_subpackets > 1, + &jumbo_bad); + if (ack != RXRPC_ACK_DUPLICATE) { + ack = RXRPC_ACK_DUPLICATE; + ack_serial = serial; + } + immediate_ack = true; + continue; } - immediate_ack = true; - } - if (flags & RXRPC_LAST_PACKET) { - set_bit(RXRPC_CALL_RX_LAST, &call->flags); - trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq); - } else { - trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq); - } - queued = true; - if (after_eq(seq, call->rx_expect_next)) { - if (after(seq, call->rx_expect_next)) { - _net("OOS %u > %u", seq, call->rx_expect_next); - ack = RXRPC_ACK_OUT_OF_SEQUENCE; - ack_serial = serial; - } - call->rx_expect_next = seq + 1; - } - -skip: - offset += len; - if (flags & RXRPC_JUMBO_PACKET) { - if (skb_copy_bits(skb, offset, &flags, 1) < 0) { - rxrpc_proto_abort("XJF", call, seq); - goto unlock; - } - offset += sizeof(struct rxrpc_jumbo_header); - seq++; - serial++; - annotation++; - if (flags & RXRPC_JUMBO_PACKET) - annotation |= RXRPC_RX_ANNO_JLAST; if (after(seq, hard_ack + call->rx_winsize)) { ack = RXRPC_ACK_EXCEEDS_WINDOW; ack_serial = serial; - if (!jumbo_bad) { - call->nr_jumbo_bad++; - jumbo_bad = true; + if (flags & RXRPC_JUMBO_PACKET) { + if (!jumbo_bad) { + call->nr_jumbo_bad++; + jumbo_bad = true; + } } + goto ack; } - _proto("Rx DATA Jumbo %%%u", serial); - goto next_subpacket; - } + if (flags & RXRPC_REQUEST_ACK && !ack) { + ack = RXRPC_ACK_REQUESTED; + ack_serial = serial; + } + + /* Queue the packet. We use a couple of memory barriers here as need + * to make sure that rx_top is perceived to be set after the buffer + * pointer and that the buffer pointer is set after the annotation and + * the skb data. + * + * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window() + * and also rxrpc_fill_out_ack(). + */ + if (!terminal) + rxrpc_get_skb(skb, rxrpc_skb_got); + call->rxtx_annotations[ix] = annotation; + smp_wmb(); + call->rxtx_buffer[ix] = skb; + if (after(seq, call->rx_top)) { + smp_store_release(&call->rx_top, seq); + } else if (before(seq, call->rx_top)) { + /* Send an immediate ACK if we fill in a hole */ + if (!ack) { + ack = RXRPC_ACK_DELAY; + ack_serial = serial; + } + immediate_ack = true; + } + + if (terminal) { + /* From this point on, we're not allowed to touch the + * packet any longer as its ref now belongs to the Rx + * ring. + */ + skb = NULL; + } - if (queued && flags & RXRPC_LAST_PACKET && !ack) { - ack = RXRPC_ACK_DELAY; - ack_serial = serial; + if (last) { + set_bit(RXRPC_CALL_RX_LAST, &call->flags); + if (!ack) { + ack = RXRPC_ACK_DELAY; + ack_serial = serial; + } + trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq); + } else { + trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq); + } + + if (after_eq(seq, call->rx_expect_next)) { + if (after(seq, call->rx_expect_next)) { + _net("OOS %u > %u", seq, call->rx_expect_next); + ack = RXRPC_ACK_OUT_OF_SEQUENCE; + ack_serial = serial; + } + call->rx_expect_next = seq + 1; + } } ack: @@ -606,13 +613,14 @@ ack: false, true, rxrpc_propose_ack_input_data); - if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1) { + if (seq0 == READ_ONCE(call->rx_hard_ack) + 1) { trace_rxrpc_notify_socket(call->debug_id, serial); rxrpc_notify_socket(call); } unlock: spin_unlock(&call->input_lock); + rxrpc_free_skb(skb, rxrpc_skb_freed); _leave(" [queued]"); } @@ -1021,7 +1029,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, switch (sp->hdr.type) { case RXRPC_PACKET_TYPE_DATA: rxrpc_input_data(call, skb); - break; + goto no_free; case RXRPC_PACKET_TYPE_ACK: rxrpc_input_ack(call, skb); @@ -1048,6 +1056,8 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, break; } + rxrpc_free_skb(skb, rxrpc_skb_freed); +no_free: _leave(""); } @@ -1109,7 +1119,7 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local, skb_queue_tail(&local->event_queue, skb); rxrpc_queue_local(local); } else { - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); } } @@ -1124,7 +1134,7 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) skb_queue_tail(&local->reject_queue, skb); rxrpc_queue_local(local); } else { - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); } } @@ -1188,7 +1198,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) if (skb->tstamp == 0) skb->tstamp = ktime_get_real(); - rxrpc_new_skb(skb, rxrpc_skb_rx_received); + rxrpc_new_skb(skb, rxrpc_skb_received); skb_pull(skb, sizeof(struct udphdr)); @@ -1205,7 +1215,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) static int lose; if ((lose++ & 7) == 7) { trace_rxrpc_rx_lose(sp); - rxrpc_free_skb(skb, rxrpc_skb_rx_lost); + rxrpc_free_skb(skb, rxrpc_skb_lost); return 0; } } @@ -1237,9 +1247,26 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) if (sp->hdr.callNumber == 0 || sp->hdr.seq == 0) goto bad_message; - if (sp->hdr.flags & RXRPC_JUMBO_PACKET && - !rxrpc_validate_jumbo(skb)) + if (!rxrpc_validate_data(skb)) goto bad_message; + + /* Unshare the packet so that it can be modified for in-place + * decryption. + */ + if (sp->hdr.securityIndex != 0) { + struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC); + if (!nskb) { + rxrpc_eaten_skb(skb, rxrpc_skb_unshared_nomem); + goto out; + } + + if (nskb != skb) { + rxrpc_eaten_skb(skb, rxrpc_skb_received); + rxrpc_new_skb(skb, rxrpc_skb_unshared); + skb = nskb; + sp = rxrpc_skb(skb); + } + } break; case RXRPC_PACKET_TYPE_CHALLENGE: @@ -1373,11 +1400,14 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) mutex_unlock(&call->user_mutex); } + /* Process a call packet; this either discards or passes on the ref + * elsewhere. + */ rxrpc_input_call_packet(call, skb); - goto discard; + goto out; discard: - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); out: trace_rxrpc_rx_done(0, 0); return 0; diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c index e93a78f7c05e..3ce6d628cd75 100644 --- a/net/rxrpc/local_event.c +++ b/net/rxrpc/local_event.c @@ -90,7 +90,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local) if (skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); - rxrpc_see_skb(skb, rxrpc_skb_rx_seen); + rxrpc_see_skb(skb, rxrpc_skb_seen); _debug("{%d},{%u}", local->debug_id, sp->hdr.type); switch (sp->hdr.type) { @@ -108,7 +108,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local) break; } - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); } _leave(""); diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 72a6e12a9304..36587260cabd 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -426,11 +426,14 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local) _enter("%d", local->debug_id); + local->dead = true; + mutex_lock(&rxnet->local_mutex); list_del_init(&local->link); mutex_unlock(&rxnet->local_mutex); - ASSERT(RB_EMPTY_ROOT(&local->client_conns)); + rxrpc_clean_up_local_conns(local); + rxrpc_service_connection_reaper(&rxnet->service_conn_reaper); ASSERT(!local->service); if (socket) { diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 369e516c4bdf..935bb60fff56 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -565,7 +565,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local) memset(&whdr, 0, sizeof(whdr)); while ((skb = skb_dequeue(&local->reject_queue))) { - rxrpc_see_skb(skb, rxrpc_skb_rx_seen); + rxrpc_see_skb(skb, rxrpc_skb_seen); sp = rxrpc_skb(skb); switch (skb->mark) { @@ -581,7 +581,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local) ioc = 2; break; default: - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); continue; } @@ -606,7 +606,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local) rxrpc_tx_point_reject); } - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); } _leave(""); diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index 7666ec72d37e..c97ebdc043e4 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c @@ -163,11 +163,11 @@ void rxrpc_error_report(struct sock *sk) _leave("UDP socket errqueue empty"); return; } - rxrpc_new_skb(skb, rxrpc_skb_rx_received); + rxrpc_new_skb(skb, rxrpc_skb_received); serr = SKB_EXT_ERR(skb); if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { _leave("UDP empty message"); - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); return; } @@ -177,7 +177,7 @@ void rxrpc_error_report(struct sock *sk) peer = NULL; if (!peer) { rcu_read_unlock(); - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); _leave(" [no peer]"); return; } @@ -189,7 +189,7 @@ void rxrpc_error_report(struct sock *sk) serr->ee.ee_code == ICMP_FRAG_NEEDED)) { rxrpc_adjust_mtu(peer, serr); rcu_read_unlock(); - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_put_peer(peer); _leave(" [MTU update]"); return; @@ -197,7 +197,7 @@ void rxrpc_error_report(struct sock *sk) rxrpc_store_error(peer, serr); rcu_read_unlock(); - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_put_peer(peer); _leave(""); diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h index 99ce322d7caa..49bb972539aa 100644 --- a/net/rxrpc/protocol.h +++ b/net/rxrpc/protocol.h @@ -89,6 +89,15 @@ struct rxrpc_jumbo_header { #define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */ #define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header)) +/* + * The maximum number of subpackets that can possibly fit in a UDP packet is: + * + * ((max_IP - IP_hdr - UDP_hdr) / RXRPC_JUMBO_SUBPKTLEN) + 1 + * = ((65535 - 28 - 28) / 1416) + 1 + * = 46 non-terminal packets and 1 terminal packet. + */ +#define RXRPC_MAX_NR_JUMBO 47 + /*****************************************************************************/ /* * on-the-wire Rx ACK packet data payload diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 9a7e1bc9791d..3b0becb12041 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -177,7 +177,8 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) struct sk_buff *skb; rxrpc_serial_t serial; rxrpc_seq_t hard_ack, top; - u8 flags; + bool last = false; + u8 subpacket; int ix; _enter("%d", call->debug_id); @@ -189,23 +190,25 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) hard_ack++; ix = hard_ack & RXRPC_RXTX_BUFF_MASK; skb = call->rxtx_buffer[ix]; - rxrpc_see_skb(skb, rxrpc_skb_rx_rotated); + rxrpc_see_skb(skb, rxrpc_skb_rotated); sp = rxrpc_skb(skb); - flags = sp->hdr.flags; - serial = sp->hdr.serial; - if (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - serial += (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - 1; + + subpacket = call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET; + serial = sp->hdr.serial + subpacket; + + if (subpacket == sp->nr_subpackets - 1 && + sp->rx_flags & RXRPC_SKB_INCL_LAST) + last = true; call->rxtx_buffer[ix] = NULL; call->rxtx_annotations[ix] = 0; /* Barrier against rxrpc_input_data(). */ smp_store_release(&call->rx_hard_ack, hard_ack); - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); - _debug("%u,%u,%02x", hard_ack, top, flags); trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack); - if (flags & RXRPC_LAST_PACKET) { + if (last) { rxrpc_end_rx_phase(call, serial); } else { /* Check to see if there's an ACK that needs sending. */ @@ -233,18 +236,19 @@ static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb, struct rxrpc_skb_priv *sp = rxrpc_skb(skb); rxrpc_seq_t seq = sp->hdr.seq; u16 cksum = sp->hdr.cksum; + u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET; _enter(""); /* For all but the head jumbo subpacket, the security checksum is in a * jumbo header immediately prior to the data. */ - if ((annotation & RXRPC_RX_ANNO_JUMBO) > 1) { + if (subpacket > 0) { __be16 tmp; if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0) BUG(); cksum = ntohs(tmp); - seq += (annotation & RXRPC_RX_ANNO_JUMBO) - 1; + seq += subpacket; } return call->conn->security->verify_packet(call, skb, offset, len, @@ -265,19 +269,18 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb, u8 *_annotation, unsigned int *_offset, unsigned int *_len) { + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); unsigned int offset = sizeof(struct rxrpc_wire_header); unsigned int len; int ret; u8 annotation = *_annotation; + u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET; /* Locate the subpacket */ + offset += subpacket * RXRPC_JUMBO_SUBPKTLEN; len = skb->len - offset; - if ((annotation & RXRPC_RX_ANNO_JUMBO) > 0) { - offset += (((annotation & RXRPC_RX_ANNO_JUMBO) - 1) * - RXRPC_JUMBO_SUBPKTLEN); - len = (annotation & RXRPC_RX_ANNO_JLAST) ? - skb->len - offset : RXRPC_JUMBO_SUBPKTLEN; - } + if (subpacket < sp->nr_subpackets - 1) + len = RXRPC_JUMBO_DATALEN; if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) { ret = rxrpc_verify_packet(call, skb, annotation, offset, len); @@ -303,6 +306,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call, { struct rxrpc_skb_priv *sp; struct sk_buff *skb; + rxrpc_serial_t serial; rxrpc_seq_t hard_ack, top, seq; size_t remain; bool last; @@ -336,12 +340,15 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call, break; } smp_rmb(); - rxrpc_see_skb(skb, rxrpc_skb_rx_seen); + rxrpc_see_skb(skb, rxrpc_skb_seen); sp = rxrpc_skb(skb); - if (!(flags & MSG_PEEK)) + if (!(flags & MSG_PEEK)) { + serial = sp->hdr.serial; + serial += call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET; trace_rxrpc_receive(call, rxrpc_receive_front, - sp->hdr.serial, seq); + serial, seq); + } if (msg) sock_recv_timestamp(msg, sock->sk, skb); diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index dbb109da1835..8d8aa3c230b5 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -223,10 +223,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, struct rxrpc_skb_priv *sp; struct rxrpc_crypt iv; struct scatterlist sg[16]; - struct sk_buff *trailer; unsigned int len; u16 check; - int nsg; int err; sp = rxrpc_skb(skb); @@ -250,15 +248,14 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, crypto_skcipher_encrypt(req); /* we want to encrypt the skbuff in-place */ - nsg = skb_cow_data(skb, 0, &trailer); - err = -ENOMEM; - if (nsg < 0 || nsg > 16) + err = -EMSGSIZE; + if (skb_shinfo(skb)->nr_frags > 16) goto out; len = data_size + call->conn->size_align - 1; len &= ~(call->conn->size_align - 1); - sg_init_table(sg, nsg); + sg_init_table(sg, ARRAY_SIZE(sg)); err = skb_to_sgvec(skb, sg, 0, len); if (unlikely(err < 0)) goto out; @@ -359,11 +356,10 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb, struct rxkad_level1_hdr sechdr; struct rxrpc_crypt iv; struct scatterlist sg[16]; - struct sk_buff *trailer; bool aborted; u32 data_size, buf; u16 check; - int nsg, ret; + int ret; _enter(""); @@ -376,11 +372,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb, /* Decrypt the skbuff in-place. TODO: We really want to decrypt * directly into the target buffer. */ - nsg = skb_cow_data(skb, 0, &trailer); - if (nsg < 0 || nsg > 16) - goto nomem; - - sg_init_table(sg, nsg); + sg_init_table(sg, ARRAY_SIZE(sg)); ret = skb_to_sgvec(skb, sg, offset, 8); if (unlikely(ret < 0)) return ret; @@ -428,10 +420,6 @@ protocol_error: if (aborted) rxrpc_send_abort_packet(call); return -EPROTO; - -nomem: - _leave(" = -ENOMEM"); - return -ENOMEM; } /* @@ -446,7 +434,6 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, struct rxkad_level2_hdr sechdr; struct rxrpc_crypt iv; struct scatterlist _sg[4], *sg; - struct sk_buff *trailer; bool aborted; u32 data_size, buf; u16 check; @@ -463,12 +450,11 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, /* Decrypt the skbuff in-place. TODO: We really want to decrypt * directly into the target buffer. */ - nsg = skb_cow_data(skb, 0, &trailer); - if (nsg < 0) - goto nomem; - sg = _sg; - if (unlikely(nsg > 4)) { + nsg = skb_shinfo(skb)->nr_frags; + if (nsg <= 4) { + nsg = 4; + } else { sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO); if (!sg) goto nomem; diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index bae14438f869..6a1547b270fe 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -176,7 +176,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, skb->tstamp = ktime_get_real(); ix = seq & RXRPC_RXTX_BUFF_MASK; - rxrpc_get_skb(skb, rxrpc_skb_tx_got); + rxrpc_get_skb(skb, rxrpc_skb_got); call->rxtx_annotations[ix] = annotation; smp_wmb(); call->rxtx_buffer[ix] = skb; @@ -248,7 +248,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, } out: - rxrpc_free_skb(skb, rxrpc_skb_tx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); _leave(" = %d", ret); return ret; } @@ -289,7 +289,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, skb = call->tx_pending; call->tx_pending = NULL; - rxrpc_see_skb(skb, rxrpc_skb_tx_seen); + rxrpc_see_skb(skb, rxrpc_skb_seen); copied = 0; do { @@ -336,7 +336,9 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, if (!skb) goto maybe_error; - rxrpc_new_skb(skb, rxrpc_skb_tx_new); + sp = rxrpc_skb(skb); + sp->rx_flags |= RXRPC_SKB_TX_BUFFER; + rxrpc_new_skb(skb, rxrpc_skb_new); _debug("ALLOC SEND %p", skb); @@ -346,7 +348,6 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, skb_reserve(skb, call->conn->security_size); skb->len += call->conn->security_size; - sp = rxrpc_skb(skb); sp->remain = chunk; if (sp->remain > skb_tailroom(skb)) sp->remain = skb_tailroom(skb); @@ -439,7 +440,7 @@ out: return ret; call_terminated: - rxrpc_free_skb(skb, rxrpc_skb_tx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); _leave(" = %d", call->error); return call->error; diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c index 9ad5045b7c2f..0348d2bf6f7d 100644 --- a/net/rxrpc/skbuff.c +++ b/net/rxrpc/skbuff.c @@ -14,7 +14,8 @@ #include <net/af_rxrpc.h> #include "ar-internal.h" -#define select_skb_count(op) (op >= rxrpc_skb_tx_cleaned ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs) +#define is_tx_skb(skb) (rxrpc_skb(skb)->rx_flags & RXRPC_SKB_TX_BUFFER) +#define select_skb_count(skb) (is_tx_skb(skb) ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs) /* * Note the allocation or reception of a socket buffer. @@ -22,8 +23,9 @@ void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) { const void *here = __builtin_return_address(0); - int n = atomic_inc_return(select_skb_count(op)); - trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); + int n = atomic_inc_return(select_skb_count(skb)); + trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, + rxrpc_skb(skb)->rx_flags, here); } /* @@ -33,8 +35,9 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) { const void *here = __builtin_return_address(0); if (skb) { - int n = atomic_read(select_skb_count(op)); - trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); + int n = atomic_read(select_skb_count(skb)); + trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, + rxrpc_skb(skb)->rx_flags, here); } } @@ -44,12 +47,23 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) { const void *here = __builtin_return_address(0); - int n = atomic_inc_return(select_skb_count(op)); - trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); + int n = atomic_inc_return(select_skb_count(skb)); + trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, + rxrpc_skb(skb)->rx_flags, here); skb_get(skb); } /* + * Note the dropping of a ref on a socket buffer by the core. + */ +void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) +{ + const void *here = __builtin_return_address(0); + int n = atomic_inc_return(&rxrpc_n_rx_skbs); + trace_rxrpc_skb(skb, op, 0, n, 0, here); +} + +/* * Note the destruction of a socket buffer. */ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) @@ -58,8 +72,9 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) if (skb) { int n; CHECK_SLAB_OKAY(&skb->users); - n = atomic_dec_return(select_skb_count(op)); - trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); + n = atomic_dec_return(select_skb_count(skb)); + trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, + rxrpc_skb(skb)->rx_flags, here); kfree_skb(skb); } } @@ -72,9 +87,10 @@ void rxrpc_purge_queue(struct sk_buff_head *list) const void *here = __builtin_return_address(0); struct sk_buff *skb; while ((skb = skb_dequeue((list))) != NULL) { - int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged)); - trace_rxrpc_skb(skb, rxrpc_skb_rx_purged, - refcount_read(&skb->users), n, here); + int n = atomic_dec_return(select_skb_count(skb)); + trace_rxrpc_skb(skb, rxrpc_skb_purged, + refcount_read(&skb->users), n, + rxrpc_skb(skb)->rx_flags, here); kfree_skb(skb); } } diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index fd1f7e799e23..04b7bd4ec751 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -422,7 +422,7 @@ static __net_init int bpf_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, bpf_net_id); - return tc_action_net_init(tn, &act_bpf_ops); + return tc_action_net_init(net, tn, &act_bpf_ops); } static void __net_exit bpf_exit_net(struct list_head *net_list) diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 32ac04d77a45..2b43cacf82af 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -231,7 +231,7 @@ static __net_init int connmark_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, connmark_net_id); - return tc_action_net_init(tn, &act_connmark_ops); + return tc_action_net_init(net, tn, &act_connmark_ops); } static void __net_exit connmark_exit_net(struct list_head *net_list) diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 9b9288267a54..d3cfad88dc3a 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -714,7 +714,7 @@ static __net_init int csum_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, csum_net_id); - return tc_action_net_init(tn, &act_csum_ops); + return tc_action_net_init(net, tn, &act_csum_ops); } static void __net_exit csum_exit_net(struct list_head *net_list) diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index 33a1a7406e87..cdd6f3818097 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -939,7 +939,7 @@ static __net_init int ct_init_net(struct net *net) tn->labels = true; } - return tc_action_net_init(&tn->tn, &act_ct_ops); + return tc_action_net_init(net, &tn->tn, &act_ct_ops); } static void __net_exit ct_exit_net(struct list_head *net_list) diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c index 06ef74b74911..0dbcfd1dca7b 100644 --- a/net/sched/act_ctinfo.c +++ b/net/sched/act_ctinfo.c @@ -376,7 +376,7 @@ static __net_init int ctinfo_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, ctinfo_net_id); - return tc_action_net_init(tn, &act_ctinfo_ops); + return tc_action_net_init(net, tn, &act_ctinfo_ops); } static void __net_exit ctinfo_exit_net(struct list_head *net_list) diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 8f0140c6ca58..324f1d1f6d47 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -278,7 +278,7 @@ static __net_init int gact_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, gact_net_id); - return tc_action_net_init(tn, &act_gact_ops); + return tc_action_net_init(net, tn, &act_gact_ops); } static void __net_exit gact_exit_net(struct list_head *net_list) diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 92ee853d43e6..3a31e241c647 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -890,7 +890,7 @@ static __net_init int ife_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, ife_net_id); - return tc_action_net_init(tn, &act_ife_ops); + return tc_action_net_init(net, tn, &act_ife_ops); } static void __net_exit ife_exit_net(struct list_head *net_list) diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index ce2c30a591d2..214a03d405cf 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -61,12 +61,13 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t, return 0; } -static void ipt_destroy_target(struct xt_entry_target *t) +static void ipt_destroy_target(struct xt_entry_target *t, struct net *net) { struct xt_tgdtor_param par = { .target = t->u.kernel.target, .targinfo = t->data, .family = NFPROTO_IPV4, + .net = net, }; if (par.target->destroy != NULL) par.target->destroy(&par); @@ -78,7 +79,7 @@ static void tcf_ipt_release(struct tc_action *a) struct tcf_ipt *ipt = to_ipt(a); if (ipt->tcfi_t) { - ipt_destroy_target(ipt->tcfi_t); + ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net); kfree(ipt->tcfi_t); } kfree(ipt->tcfi_tname); @@ -180,7 +181,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, spin_lock_bh(&ipt->tcf_lock); if (ret != ACT_P_CREATED) { - ipt_destroy_target(ipt->tcfi_t); + ipt_destroy_target(ipt->tcfi_t, net); kfree(ipt->tcfi_tname); kfree(ipt->tcfi_t); } @@ -350,7 +351,7 @@ static __net_init int ipt_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, ipt_net_id); - return tc_action_net_init(tn, &act_ipt_ops); + return tc_action_net_init(net, tn, &act_ipt_ops); } static void __net_exit ipt_exit_net(struct list_head *net_list) @@ -399,7 +400,7 @@ static __net_init int xt_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, xt_net_id); - return tc_action_net_init(tn, &act_xt_ops); + return tc_action_net_init(net, tn, &act_xt_ops); } static void __net_exit xt_exit_net(struct list_head *net_list) diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index be3f88dfc37e..9d1bf508075a 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -453,7 +453,7 @@ static __net_init int mirred_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, mirred_net_id); - return tc_action_net_init(tn, &act_mirred_ops); + return tc_action_net_init(net, tn, &act_mirred_ops); } static void __net_exit mirred_exit_net(struct list_head *net_list) diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c index 0f299e3b618c..e168df0e008a 100644 --- a/net/sched/act_mpls.c +++ b/net/sched/act_mpls.c @@ -375,7 +375,7 @@ static __net_init int mpls_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, mpls_net_id); - return tc_action_net_init(tn, &act_mpls_ops); + return tc_action_net_init(net, tn, &act_mpls_ops); } static void __net_exit mpls_exit_net(struct list_head *net_list) diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 7b858c11b1b5..ea4c5359e7df 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -327,7 +327,7 @@ static __net_init int nat_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, nat_net_id); - return tc_action_net_init(tn, &act_nat_ops); + return tc_action_net_init(net, tn, &act_nat_ops); } static void __net_exit nat_exit_net(struct list_head *net_list) diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 17360c6faeaa..cdfaa79382a2 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -498,7 +498,7 @@ static __net_init int pedit_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, pedit_net_id); - return tc_action_net_init(tn, &act_pedit_ops); + return tc_action_net_init(net, tn, &act_pedit_ops); } static void __net_exit pedit_exit_net(struct list_head *net_list) diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 49cec3e64a4d..6315e0f8d26e 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -371,7 +371,7 @@ static __net_init int police_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, police_net_id); - return tc_action_net_init(tn, &act_police_ops); + return tc_action_net_init(net, tn, &act_police_ops); } static void __net_exit police_exit_net(struct list_head *net_list) diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 595308d60133..10229124a992 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -102,13 +102,17 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); s->rate = rate; s->psample_group_num = psample_group_num; - RCU_INIT_POINTER(s->psample_group, psample_group); + rcu_swap_protected(s->psample_group, psample_group, + lockdep_is_held(&s->tcf_lock)); if (tb[TCA_SAMPLE_TRUNC_SIZE]) { s->truncate = true; s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]); } spin_unlock_bh(&s->tcf_lock); + + if (psample_group) + psample_group_put(psample_group); if (goto_ch) tcf_chain_put_by_act(goto_ch); @@ -265,7 +269,7 @@ static __net_init int sample_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, sample_net_id); - return tc_action_net_init(tn, &act_sample_ops); + return tc_action_net_init(net, tn, &act_sample_ops); } static void __net_exit sample_exit_net(struct list_head *net_list) diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 33aefa25b545..6120e56117ca 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -232,7 +232,7 @@ static __net_init int simp_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, simp_net_id); - return tc_action_net_init(tn, &act_simp_ops); + return tc_action_net_init(net, tn, &act_simp_ops); } static void __net_exit simp_exit_net(struct list_head *net_list) diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 37dced00b63d..6a8d3337c577 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -336,7 +336,7 @@ static __net_init int skbedit_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, skbedit_net_id); - return tc_action_net_init(tn, &act_skbedit_ops); + return tc_action_net_init(net, tn, &act_skbedit_ops); } static void __net_exit skbedit_exit_net(struct list_head *net_list) diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index 7da3518e18ef..888437f97ba6 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -287,7 +287,7 @@ static __net_init int skbmod_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, skbmod_net_id); - return tc_action_net_init(tn, &act_skbmod_ops); + return tc_action_net_init(net, tn, &act_skbmod_ops); } static void __net_exit skbmod_exit_net(struct list_head *net_list) diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 6d0debdc9b97..2f83a79f76aa 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -600,7 +600,7 @@ static __net_init int tunnel_key_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); - return tc_action_net_init(tn, &act_tunnel_key_ops); + return tc_action_net_init(net, tn, &act_tunnel_key_ops); } static void __net_exit tunnel_key_exit_net(struct list_head *net_list) diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 216b75709875..08aaf719a70f 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -348,7 +348,7 @@ static __net_init int vlan_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, vlan_net_id); - return tc_action_net_init(tn, &act_vlan_ops); + return tc_action_net_init(net, tn, &act_vlan_ops); } static void __net_exit vlan_exit_net(struct list_head *net_list) diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index 732e109c3055..810645b5c086 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c @@ -181,11 +181,6 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch) s64 credits; int len; - if (atomic64_read(&q->port_rate) == -1) { - WARN_ONCE(1, "cbs: dequeue() called with unknown port rate."); - return NULL; - } - if (q->credits < 0) { credits = timediff_to_credits(now - q->last, q->idleslope); @@ -303,11 +298,19 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q, static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q) { struct ethtool_link_ksettings ecmd; + int speed = SPEED_10; int port_rate = -1; + int err; + + err = __ethtool_get_link_ksettings(dev, &ecmd); + if (err < 0) + goto skip; + + if (ecmd.base.speed != SPEED_UNKNOWN) + speed = ecmd.base.speed; - if (!__ethtool_get_link_ksettings(dev, &ecmd) && - ecmd.base.speed != SPEED_UNKNOWN) - port_rate = ecmd.base.speed * 1000 * BYTES_PER_KBIT; +skip: + port_rate = speed * 1000 * BYTES_PER_KBIT; atomic64_set(&q->port_rate, port_rate); netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n", diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 11c03cf4aa74..137db1cbde85 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -624,8 +624,12 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, err = skb_array_produce(q, skb); - if (unlikely(err)) - return qdisc_drop_cpu(skb, qdisc, to_free); + if (unlikely(err)) { + if (qdisc_is_percpu_stats(qdisc)) + return qdisc_drop_cpu(skb, qdisc, to_free); + else + return qdisc_drop(skb, qdisc, to_free); + } qdisc_update_stats_at_enqueue(qdisc, pkt_len); return NET_XMIT_SUCCESS; @@ -688,11 +692,14 @@ static void pfifo_fast_reset(struct Qdisc *qdisc) kfree_skb(skb); } - for_each_possible_cpu(i) { - struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i); + if (qdisc_is_percpu_stats(qdisc)) { + for_each_possible_cpu(i) { + struct gnet_stats_queue *q; - q->backlog = 0; - q->qlen = 0; + q = per_cpu_ptr(qdisc->cpu_qstats, i); + q->backlog = 0; + q->qlen = 0; + } } } diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 540bde009ea5..84b863e2bdbd 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -477,11 +477,6 @@ static struct sk_buff *taprio_dequeue(struct Qdisc *sch) u32 gate_mask; int i; - if (atomic64_read(&q->picos_per_byte) == -1) { - WARN_ONCE(1, "taprio: dequeue() called with unknown picos per byte."); - return NULL; - } - rcu_read_lock(); entry = rcu_dereference(q->current_entry); /* if there's no entry, it means that the schedule didn't @@ -954,12 +949,20 @@ static void taprio_set_picos_per_byte(struct net_device *dev, struct taprio_sched *q) { struct ethtool_link_ksettings ecmd; - int picos_per_byte = -1; + int speed = SPEED_10; + int picos_per_byte; + int err; - if (!__ethtool_get_link_ksettings(dev, &ecmd) && - ecmd.base.speed != SPEED_UNKNOWN) - picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8, - ecmd.base.speed * 1000 * 1000); + err = __ethtool_get_link_ksettings(dev, &ecmd); + if (err < 0) + goto skip; + + if (ecmd.base.speed != SPEED_UNKNOWN) + speed = ecmd.base.speed; + +skip: + picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8, + speed * 1000 * 1000); atomic64_set(&q->picos_per_byte, picos_per_byte); netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n", @@ -1245,6 +1248,10 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt, */ q->clockid = -1; + spin_lock(&taprio_list_lock); + list_add(&q->taprio_list, &taprio_list); + spin_unlock(&taprio_list_lock); + if (sch->parent != TC_H_ROOT) return -EOPNOTSUPP; @@ -1262,10 +1269,6 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt, if (!opt) return -EINVAL; - spin_lock(&taprio_list_lock); - list_add(&q->taprio_list, &taprio_list); - spin_unlock(&taprio_list_lock); - for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *dev_queue; struct Qdisc *qdisc; diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 7325f382dbf4..957b9e3e1492 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c @@ -595,7 +595,7 @@ struct key *request_key_and_link(struct key_type *type, key = check_cached_key(&ctx); if (key) - return key; + goto error_free; /* search all the process keyrings for a key */ rcu_read_lock(); diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile index 045f5f7d68ab..13f1e8b9ac52 100644 --- a/tools/power/x86/turbostat/Makefile +++ b/tools/power/x86/turbostat/Makefile @@ -9,9 +9,10 @@ ifeq ("$(origin O)", "command line") endif turbostat : turbostat.c -override CFLAGS += -Wall -I../../../include +override CFLAGS += -O2 -Wall -I../../../include override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"' override CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"' +override CFLAGS += -D_FORTIFY_SOURCE=2 %: %.c @mkdir -p $(BUILD_OUTPUT) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 75fc4fb9901c..b2a86438f074 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -39,7 +39,6 @@ FILE *outf; int *fd_percpu; struct timeval interval_tv = {5, 0}; struct timespec interval_ts = {5, 0}; -struct timespec one_msec = {0, 1000000}; unsigned int num_iterations; unsigned int debug; unsigned int quiet; @@ -60,6 +59,7 @@ unsigned int do_irtl_hsw; unsigned int units = 1000000; /* MHz etc */ unsigned int genuine_intel; unsigned int authentic_amd; +unsigned int hygon_genuine; unsigned int max_level, max_extended_level; unsigned int has_invariant_tsc; unsigned int do_nhm_platform_info; @@ -100,6 +100,7 @@ unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */ unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ unsigned int has_misc_feature_control; unsigned int first_counter_read = 1; +int ignore_stdin; #define RAPL_PKG (1 << 0) /* 0x610 MSR_PKG_POWER_LIMIT */ @@ -166,6 +167,7 @@ size_t cpu_present_setsize, cpu_affinity_setsize, cpu_subset_size; struct thread_data { struct timeval tv_begin; struct timeval tv_end; + struct timeval tv_delta; unsigned long long tsc; unsigned long long aperf; unsigned long long mperf; @@ -506,6 +508,7 @@ unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAU unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC; #define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME) +#define DO_BIC_READ(COUNTER_NAME) (bic_present & COUNTER_NAME) #define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME) #define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT) #define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT) @@ -849,7 +852,6 @@ int dump_counters(struct thread_data *t, struct core_data *c, outp += sprintf(outp, "pc8: %016llX\n", p->pc8); outp += sprintf(outp, "pc9: %016llX\n", p->pc9); outp += sprintf(outp, "pc10: %016llX\n", p->pc10); - outp += sprintf(outp, "pc10: %016llX\n", p->pc10); outp += sprintf(outp, "cpu_lpi: %016llX\n", p->cpu_lpi); outp += sprintf(outp, "sys_lpi: %016llX\n", p->sys_lpi); outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg); @@ -911,7 +913,7 @@ int format_counters(struct thread_data *t, struct core_data *c, if (DO_BIC(BIC_TOD)) outp += sprintf(outp, "%10ld.%06ld\t", t->tv_end.tv_sec, t->tv_end.tv_usec); - interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; + interval_float = t->tv_delta.tv_sec + t->tv_delta.tv_usec/1000000.0; tsc = t->tsc * tsc_tweak; @@ -1287,6 +1289,14 @@ delta_core(struct core_data *new, struct core_data *old) } } +int soft_c1_residency_display(int bic) +{ + if (!DO_BIC(BIC_CPU_c1) || use_c1_residency_msr) + return 0; + + return DO_BIC_READ(bic); +} + /* * old = new - old */ @@ -1309,6 +1319,7 @@ delta_thread(struct thread_data *new, struct thread_data *old, * over-write old w/ new so we can print end of interval values */ + timersub(&new->tv_begin, &old->tv_begin, &old->tv_delta); old->tv_begin = new->tv_begin; old->tv_end = new->tv_end; @@ -1322,7 +1333,8 @@ delta_thread(struct thread_data *new, struct thread_data *old, old->c1 = new->c1 - old->c1; - if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) { + if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) || + soft_c1_residency_display(BIC_Avg_MHz)) { if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) { old->aperf = new->aperf - old->aperf; old->mperf = new->mperf - old->mperf; @@ -1404,6 +1416,8 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data t->tv_begin.tv_usec = 0; t->tv_end.tv_sec = 0; t->tv_end.tv_usec = 0; + t->tv_delta.tv_sec = 0; + t->tv_delta.tv_usec = 0; t->tsc = 0; t->aperf = 0; @@ -1573,6 +1587,9 @@ void compute_average(struct thread_data *t, struct core_data *c, for_all_cpus(sum_counters, t, c, p); + /* Use the global time delta for the average. */ + average.threads.tv_delta = tv_delta; + average.threads.tsc /= topo.num_cpus; average.threads.aperf /= topo.num_cpus; average.threads.mperf /= topo.num_cpus; @@ -1714,7 +1731,7 @@ void get_apic_id(struct thread_data *t) if (!DO_BIC(BIC_X2APIC)) return; - if (authentic_amd) { + if (authentic_amd || hygon_genuine) { unsigned int topology_extensions; if (max_extended_level < 0x8000001e) @@ -1762,19 +1779,20 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) struct msr_counter *mp; int i; - gettimeofday(&t->tv_begin, (struct timezone *)NULL); - if (cpu_migrate(cpu)) { fprintf(outf, "Could not migrate to CPU %d\n", cpu); return -1; } + gettimeofday(&t->tv_begin, (struct timezone *)NULL); + if (first_counter_read) get_apic_id(t); retry: t->tsc = rdtsc(); /* we are running on local CPU of interest */ - if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) { + if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) || + soft_c1_residency_display(BIC_Avg_MHz)) { unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time; /* @@ -1851,20 +1869,20 @@ retry: if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) goto done; - if (DO_BIC(BIC_CPU_c3)) { + if (DO_BIC(BIC_CPU_c3) || soft_c1_residency_display(BIC_CPU_c3)) { if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) return -6; } - if (DO_BIC(BIC_CPU_c6) && !do_knl_cstates) { + if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) { if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) return -7; - } else if (do_knl_cstates) { + } else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) { if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6)) return -7; } - if (DO_BIC(BIC_CPU_c7)) + if (DO_BIC(BIC_CPU_c7) || soft_c1_residency_display(BIC_CPU_c7)) if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7)) return -8; @@ -2912,6 +2930,7 @@ int snapshot_cpu_lpi_us(void) if (retval != 1) { fprintf(stderr, "Disabling Low Power Idle CPU output\n"); BIC_NOT_PRESENT(BIC_CPU_LPI); + fclose(fp); return -1; } @@ -2938,6 +2957,7 @@ int snapshot_sys_lpi_us(void) if (retval != 1) { fprintf(stderr, "Disabling Low Power Idle System output\n"); BIC_NOT_PRESENT(BIC_SYS_LPI); + fclose(fp); return -1; } fclose(fp); @@ -2985,8 +3005,6 @@ static void signal_handler (int signal) fprintf(stderr, "SIGUSR1\n"); break; } - /* make sure this manually-invoked interval is at least 1ms long */ - nanosleep(&one_msec, NULL); } void setup_signal_handler(void) @@ -3005,29 +3023,38 @@ void setup_signal_handler(void) void do_sleep(void) { - struct timeval select_timeout; + struct timeval tout; + struct timespec rest; fd_set readfds; int retval; FD_ZERO(&readfds); FD_SET(0, &readfds); - if (!isatty(fileno(stdin))) { + if (ignore_stdin) { nanosleep(&interval_ts, NULL); return; } - select_timeout = interval_tv; - retval = select(1, &readfds, NULL, NULL, &select_timeout); + tout = interval_tv; + retval = select(1, &readfds, NULL, NULL, &tout); if (retval == 1) { switch (getc(stdin)) { case 'q': exit_requested = 1; break; + case EOF: + /* + * 'stdin' is a pipe closed on the other end. There + * won't be any further input. + */ + ignore_stdin = 1; + /* Sleep the rest of the time */ + rest.tv_sec = (tout.tv_sec + tout.tv_usec / 1000000); + rest.tv_nsec = (tout.tv_usec % 1000000) * 1000; + nanosleep(&rest, NULL); } - /* make sure this manually-invoked interval is at least 1ms long */ - nanosleep(&one_msec, NULL); } } @@ -3209,6 +3236,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) break; case INTEL_FAM6_HASWELL_CORE: /* HSW */ case INTEL_FAM6_HASWELL_X: /* HSX */ + case INTEL_FAM6_HASWELL_ULT: /* HSW */ case INTEL_FAM6_HASWELL_GT3E: /* HSW */ case INTEL_FAM6_BROADWELL_CORE: /* BDW */ case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ @@ -3405,6 +3433,7 @@ int has_config_tdp(unsigned int family, unsigned int model) case INTEL_FAM6_IVYBRIDGE: /* IVB */ case INTEL_FAM6_HASWELL_CORE: /* HSW */ case INTEL_FAM6_HASWELL_X: /* HSX */ + case INTEL_FAM6_HASWELL_ULT: /* HSW */ case INTEL_FAM6_HASWELL_GT3E: /* HSW */ case INTEL_FAM6_BROADWELL_CORE: /* BDW */ case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ @@ -3803,6 +3832,7 @@ double get_tdp_amd(unsigned int family) { switch (family) { case 0x17: + case 0x18: default: /* This is the max stock TDP of HEDT/Server Fam17h chips */ return 250.0; @@ -3841,6 +3871,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model) case INTEL_FAM6_SANDYBRIDGE: case INTEL_FAM6_IVYBRIDGE: case INTEL_FAM6_HASWELL_CORE: /* HSW */ + case INTEL_FAM6_HASWELL_ULT: /* HSW */ case INTEL_FAM6_HASWELL_GT3E: /* HSW */ case INTEL_FAM6_BROADWELL_CORE: /* BDW */ case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ @@ -3982,6 +4013,7 @@ void rapl_probe_amd(unsigned int family, unsigned int model) switch (family) { case 0x17: /* Zen, Zen+ */ + case 0x18: /* Hygon Dhyana */ do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY; if (rapl_joules) { BIC_PRESENT(BIC_Pkg_J); @@ -4002,7 +4034,7 @@ void rapl_probe_amd(unsigned int family, unsigned int model) rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f)); rapl_power_units = ldexp(1.0, -(msr & 0xf)); - tdp = get_tdp_amd(model); + tdp = get_tdp_amd(family); rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; if (!quiet) @@ -4018,7 +4050,7 @@ void rapl_probe(unsigned int family, unsigned int model) { if (genuine_intel) rapl_probe_intel(family, model); - if (authentic_amd) + if (authentic_amd || hygon_genuine) rapl_probe_amd(family, model); } @@ -4032,6 +4064,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model) switch (model) { case INTEL_FAM6_HASWELL_CORE: /* HSW */ + case INTEL_FAM6_HASWELL_ULT: /* HSW */ case INTEL_FAM6_HASWELL_GT3E: /* HSW */ do_gfx_perf_limit_reasons = 1; case INTEL_FAM6_HASWELL_X: /* HSX */ @@ -4251,6 +4284,7 @@ int has_snb_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ case INTEL_FAM6_HASWELL_CORE: /* HSW */ case INTEL_FAM6_HASWELL_X: /* HSW */ + case INTEL_FAM6_HASWELL_ULT: /* HSW */ case INTEL_FAM6_HASWELL_GT3E: /* HSW */ case INTEL_FAM6_BROADWELL_CORE: /* BDW */ case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ @@ -4267,7 +4301,7 @@ int has_snb_msrs(unsigned int family, unsigned int model) } /* - * HSW adds support for additional MSRs: + * HSW ULT added support for C8/C9/C10 MSRs: * * MSR_PKG_C8_RESIDENCY 0x00000630 * MSR_PKG_C9_RESIDENCY 0x00000631 @@ -4278,13 +4312,13 @@ int has_snb_msrs(unsigned int family, unsigned int model) * MSR_PKGC10_IRTL 0x00000635 * */ -int has_hsw_msrs(unsigned int family, unsigned int model) +int has_c8910_msrs(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; switch (model) { - case INTEL_FAM6_HASWELL_CORE: + case INTEL_FAM6_HASWELL_ULT: /* HSW */ case INTEL_FAM6_BROADWELL_CORE: /* BDW */ case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ @@ -4568,9 +4602,6 @@ unsigned int intel_model_duplicates(unsigned int model) case INTEL_FAM6_XEON_PHI_KNM: return INTEL_FAM6_XEON_PHI_KNL; - case INTEL_FAM6_HASWELL_ULT: - return INTEL_FAM6_HASWELL_CORE; - case INTEL_FAM6_BROADWELL_X: case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */ return INTEL_FAM6_BROADWELL_X; @@ -4582,7 +4613,11 @@ unsigned int intel_model_duplicates(unsigned int model) return INTEL_FAM6_SKYLAKE_MOBILE; case INTEL_FAM6_ICELAKE_MOBILE: + case INTEL_FAM6_ICELAKE_NNPI: return INTEL_FAM6_CANNONLAKE_MOBILE; + + case INTEL_FAM6_ATOM_TREMONT_X: + return INTEL_FAM6_ATOM_GOLDMONT_X; } return model; } @@ -4600,6 +4635,8 @@ void process_cpuid() genuine_intel = 1; else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65) authentic_amd = 1; + else if (ebx == 0x6f677948 && ecx == 0x656e6975 && edx == 0x6e65476e) + hygon_genuine = 1; if (!quiet) fprintf(outf, "CPUID(0): %.4s%.4s%.4s ", @@ -4820,12 +4857,12 @@ void process_cpuid() BIC_NOT_PRESENT(BIC_CPU_c7); BIC_NOT_PRESENT(BIC_Pkgpc7); } - if (has_hsw_msrs(family, model)) { + if (has_c8910_msrs(family, model)) { BIC_PRESENT(BIC_Pkgpc8); BIC_PRESENT(BIC_Pkgpc9); BIC_PRESENT(BIC_Pkgpc10); } - do_irtl_hsw = has_hsw_msrs(family, model); + do_irtl_hsw = has_c8910_msrs(family, model); if (has_skl_msrs(family, model)) { BIC_PRESENT(BIC_Totl_c0); BIC_PRESENT(BIC_Any_c0); @@ -5123,7 +5160,7 @@ int initialize_counters(int cpu_id) void allocate_output_buffer() { - output_buffer = calloc(1, (1 + topo.num_cpus) * 1024); + output_buffer = calloc(1, (1 + topo.num_cpus) * 2048); outp = output_buffer; if (outp == NULL) err(-1, "calloc output buffer"); @@ -5269,7 +5306,7 @@ int get_and_dump_counters(void) } void print_version() { - fprintf(outf, "turbostat version 19.03.20" + fprintf(outf, "turbostat version 19.08.31" " - Len Brown <lenb@kernel.org>\n"); } diff --git a/tools/power/x86/x86_energy_perf_policy/Makefile b/tools/power/x86/x86_energy_perf_policy/Makefile index 1fdeef864e7c..666b325a62a2 100644 --- a/tools/power/x86/x86_energy_perf_policy/Makefile +++ b/tools/power/x86/x86_energy_perf_policy/Makefile @@ -9,8 +9,9 @@ ifeq ("$(origin O)", "command line") endif x86_energy_perf_policy : x86_energy_perf_policy.c -override CFLAGS += -Wall -I../../../include +override CFLAGS += -O2 -Wall -I../../../include override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"' +override CFLAGS += -D_FORTIFY_SOURCE=2 %: %.c @mkdir -p $(BUILD_OUTPUT) diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8 b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8 index 17db1c3af4d0..78c6361898b1 100644 --- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8 +++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8 @@ -40,7 +40,7 @@ in the same processor package. Hardware P-States (HWP) are effectively an expansion of hardware P-state control from the opportunistic turbo-mode P-state range to include the entire range of available P-states. -On Broadwell Xeon, the initial HWP implementation, EBP influenced HWP. +On Broadwell Xeon, the initial HWP implementation, EPB influenced HWP. That influence was removed in subsequent generations, where it was moved to the Energy_Performance_Preference (EPP) field in diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c index 34a796b303fe..3fe1eed900d4 100644 --- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c +++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c @@ -545,7 +545,7 @@ void cmdline(int argc, char **argv) progname = argv[0]; - while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw", + while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw:", long_options, &option_index)) != -1) { switch (opt) { case 'a': @@ -1259,6 +1259,15 @@ void probe_dev_msr(void) if (system("/sbin/modprobe msr > /dev/null 2>&1")) err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); } + +static void get_cpuid_or_exit(unsigned int leaf, + unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx) +{ + if (!__get_cpuid(leaf, eax, ebx, ecx, edx)) + errx(1, "Processor not supported\n"); +} + /* * early_cpuid() * initialize turbo_is_enabled, has_hwp, has_epb @@ -1266,15 +1275,10 @@ void probe_dev_msr(void) */ void early_cpuid(void) { - unsigned int eax, ebx, ecx, edx, max_level; + unsigned int eax, ebx, ecx, edx; unsigned int fms, family, model; - __get_cpuid(0, &max_level, &ebx, &ecx, &edx); - - if (max_level < 6) - errx(1, "Processor not supported\n"); - - __get_cpuid(1, &fms, &ebx, &ecx, &edx); + get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx); family = (fms >> 8) & 0xf; model = (fms >> 4) & 0xf; if (family == 6 || family == 0xf) @@ -1288,7 +1292,7 @@ void early_cpuid(void) bdx_highest_ratio = msr & 0xFF; } - __get_cpuid(0x6, &eax, &ebx, &ecx, &edx); + get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx); turbo_is_enabled = (eax >> 1) & 1; has_hwp = (eax >> 7) & 1; has_epb = (ecx >> 3) & 1; @@ -1306,7 +1310,7 @@ void parse_cpuid(void) eax = ebx = ecx = edx = 0; - __get_cpuid(0, &max_level, &ebx, &ecx, &edx); + get_cpuid_or_exit(0, &max_level, &ebx, &ecx, &edx); if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e) genuine_intel = 1; @@ -1315,7 +1319,7 @@ void parse_cpuid(void) fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ", (char *)&ebx, (char *)&edx, (char *)&ecx); - __get_cpuid(1, &fms, &ebx, &ecx, &edx); + get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx); family = (fms >> 8) & 0xf; model = (fms >> 4) & 0xf; stepping = fms & 0xf; @@ -1340,7 +1344,7 @@ void parse_cpuid(void) errx(1, "CPUID: no MSR"); - __get_cpuid(0x6, &eax, &ebx, &ecx, &edx); + get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx); /* turbo_is_enabled already set */ /* has_hwp already set */ has_hwp_notify = eax & (1 << 8); diff --git a/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py index affa7f2d9670..9539cffa9e5e 100644 --- a/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py +++ b/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py @@ -64,7 +64,7 @@ class SubPlugin(TdcPlugin): cmdlist.insert(0, self.args.NAMES['NS']) cmdlist.insert(0, 'exec') cmdlist.insert(0, 'netns') - cmdlist.insert(0, 'ip') + cmdlist.insert(0, self.args.NAMES['IP']) else: pass @@ -78,16 +78,16 @@ class SubPlugin(TdcPlugin): return command def _ports_create(self): - cmd = 'ip link add $DEV0 type veth peer name $DEV1' + cmd = '$IP link add $DEV0 type veth peer name $DEV1' self._exec_cmd('pre', cmd) - cmd = 'ip link set $DEV0 up' + cmd = '$IP link set $DEV0 up' self._exec_cmd('pre', cmd) if not self.args.namespace: - cmd = 'ip link set $DEV1 up' + cmd = '$IP link set $DEV1 up' self._exec_cmd('pre', cmd) def _ports_destroy(self): - cmd = 'ip link del $DEV0' + cmd = '$IP link del $DEV0' self._exec_cmd('post', cmd) def _ns_create(self): @@ -97,16 +97,16 @@ class SubPlugin(TdcPlugin): ''' self._ports_create() if self.args.namespace: - cmd = 'ip netns add {}'.format(self.args.NAMES['NS']) + cmd = '$IP netns add {}'.format(self.args.NAMES['NS']) self._exec_cmd('pre', cmd) - cmd = 'ip link set $DEV1 netns {}'.format(self.args.NAMES['NS']) + cmd = '$IP link set $DEV1 netns {}'.format(self.args.NAMES['NS']) self._exec_cmd('pre', cmd) - cmd = 'ip -n {} link set $DEV1 up'.format(self.args.NAMES['NS']) + cmd = '$IP -n {} link set $DEV1 up'.format(self.args.NAMES['NS']) self._exec_cmd('pre', cmd) if self.args.device: - cmd = 'ip link set $DEV2 netns {}'.format(self.args.NAMES['NS']) + cmd = '$IP link set $DEV2 netns {}'.format(self.args.NAMES['NS']) self._exec_cmd('pre', cmd) - cmd = 'ip -n {} link set $DEV2 up'.format(self.args.NAMES['NS']) + cmd = '$IP -n {} link set $DEV2 up'.format(self.args.NAMES['NS']) self._exec_cmd('pre', cmd) def _ns_destroy(self): @@ -115,7 +115,7 @@ class SubPlugin(TdcPlugin): devices as well) ''' if self.args.namespace: - cmd = 'ip netns delete {}'.format(self.args.NAMES['NS']) + cmd = '$IP netns delete {}'.format(self.args.NAMES['NS']) self._exec_cmd('post', cmd) def _exec_cmd(self, stage, command): diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 44efc2ff863f..0d090482720d 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -211,6 +211,12 @@ static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq, vgic_irq_set_phys_active(irq, true); } +static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq) +{ + return (vgic_irq_is_sgi(irq->intid) && + vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2); +} + void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val) @@ -223,6 +229,12 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, for_each_set_bit(i, &val, len * 8) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + /* GICD_ISPENDR0 SGI bits are WI */ + if (is_vgic_v2_sgi(vcpu, irq)) { + vgic_put_irq(vcpu->kvm, irq); + continue; + } + raw_spin_lock_irqsave(&irq->irq_lock, flags); if (irq->hw) vgic_hw_irq_spending(vcpu, irq, is_uaccess); @@ -270,6 +282,12 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, for_each_set_bit(i, &val, len * 8) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + /* GICD_ICPENDR0 SGI bits are WI */ + if (is_vgic_v2_sgi(vcpu, irq)) { + vgic_put_irq(vcpu->kvm, irq); + continue; + } + raw_spin_lock_irqsave(&irq->irq_lock, flags); if (irq->hw) diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 96aab77d0471..b00aa304c260 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -184,7 +184,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) if (vgic_irq_is_sgi(irq->intid)) { u32 src = ffs(irq->source); - BUG_ON(!src); + if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n", + irq->intid)) + return; + val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; irq->source &= ~(1 << (src - 1)); if (irq->source) { diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 0c653a1e5215..a4ad431c92a9 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -167,7 +167,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) model == KVM_DEV_TYPE_ARM_VGIC_V2) { u32 src = ffs(irq->source); - BUG_ON(!src); + if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n", + irq->intid)) + return; + val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; irq->source &= ~(1 << (src - 1)); if (irq->source) { diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 13d4b38a94ec..e7bde65ba67c 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -254,6 +254,13 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) bool penda, pendb; int ret; + /* + * list_sort may call this function with the same element when + * the list is fairly long. + */ + if (unlikely(irqa == irqb)) + return 0; + raw_spin_lock(&irqa->irq_lock); raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); |