diff options
481 files changed, 33118 insertions, 8481 deletions
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml b/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml new file mode 100644 index 000000000000..03a76729d26c --- /dev/null +++ b/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml @@ -0,0 +1,117 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/brcm,bcm2711-hdmi.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Broadcom BCM2711 HDMI Controller Device Tree Bindings + +maintainers: + - Eric Anholt <eric@anholt.net> + +properties: + compatible: + enum: + - brcm,bcm2711-hdmi0 + - brcm,bcm2711-hdmi1 + + reg: + items: + - description: HDMI controller register range + - description: DVP register range + - description: HDMI PHY register range + - description: Rate Manager register range + - description: Packet RAM register range + - description: Metadata RAM register range + - description: CSC register range + - description: CEC register range + - description: HD register range + + reg-names: + items: + - const: hdmi + - const: dvp + - const: phy + - const: rm + - const: packet + - const: metadata + - const: csc + - const: cec + - const: hd + + clocks: + items: + - description: The HDMI state machine clock + - description: The Pixel BVB clock + - description: The HDMI Audio parent clock + - description: The HDMI CEC parent clock + + clock-names: + items: + - const: hdmi + - const: bvb + - const: audio + - const: cec + + ddc: + allOf: + - $ref: /schemas/types.yaml#/definitions/phandle + description: > + Phandle of the I2C controller used for DDC EDID probing + + hpd-gpios: + description: > + The GPIO pin for the HDMI hotplug detect (if it doesn't appear + as an interrupt/status bit in the HDMI controller itself) + + dmas: + maxItems: 1 + description: > + Should contain one entry pointing to the DMA channel used to + transfer audio data. + + dma-names: + const: audio-rx + + resets: + maxItems: 1 + +required: + - compatible + - reg + - reg-names + - clocks + - resets + - ddc + +additionalProperties: false + +examples: + - | + hdmi0: hdmi@7ef00700 { + compatible = "brcm,bcm2711-hdmi0"; + reg = <0x7ef00700 0x300>, + <0x7ef00300 0x200>, + <0x7ef00f00 0x80>, + <0x7ef00f80 0x80>, + <0x7ef01b00 0x200>, + <0x7ef01f00 0x400>, + <0x7ef00200 0x80>, + <0x7ef04300 0x100>, + <0x7ef20000 0x100>; + reg-names = "hdmi", + "dvp", + "phy", + "rm", + "packet", + "metadata", + "csc", + "cec", + "hd"; + clocks = <&firmware_clocks 13>, <&firmware_clocks 14>, <&dvp 1>, <&clk_27MHz>; + clock-names = "hdmi", "bvb", "audio", "cec"; + resets = <&dvp 0>; + ddc = <&ddc0>; + }; + +... diff --git a/Documentation/devicetree/bindings/display/brcm,bcm2835-hvs.yaml b/Documentation/devicetree/bindings/display/brcm,bcm2835-hvs.yaml index 02410f8d6d49..e826ab0adb75 100644 --- a/Documentation/devicetree/bindings/display/brcm,bcm2835-hvs.yaml +++ b/Documentation/devicetree/bindings/display/brcm,bcm2835-hvs.yaml @@ -11,7 +11,9 @@ maintainers: properties: compatible: - const: brcm,bcm2835-hvs + enum: + - brcm,bcm2711-hvs + - brcm,bcm2835-hvs reg: maxItems: 1 @@ -19,6 +21,10 @@ properties: interrupts: maxItems: 1 + clocks: + maxItems: 1 + description: Core Clock + required: - compatible - reg @@ -26,6 +32,16 @@ required: additionalProperties: false +if: + properties: + compatible: + contains: + const: brcm,bcm2711-hvs" + +then: + required: + - clocks + examples: - | hvs@7e400000 { diff --git a/Documentation/devicetree/bindings/display/brcm,bcm2835-pixelvalve0.yaml b/Documentation/devicetree/bindings/display/brcm,bcm2835-pixelvalve0.yaml index e60791db1fa1..4e1ba03f6477 100644 --- a/Documentation/devicetree/bindings/display/brcm,bcm2835-pixelvalve0.yaml +++ b/Documentation/devicetree/bindings/display/brcm,bcm2835-pixelvalve0.yaml @@ -15,6 +15,11 @@ properties: - brcm,bcm2835-pixelvalve0 - brcm,bcm2835-pixelvalve1 - brcm,bcm2835-pixelvalve2 + - brcm,bcm2711-pixelvalve0 + - brcm,bcm2711-pixelvalve1 + - brcm,bcm2711-pixelvalve2 + - brcm,bcm2711-pixelvalve3 + - brcm,bcm2711-pixelvalve4 reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/display/brcm,bcm2835-vc4.yaml b/Documentation/devicetree/bindings/display/brcm,bcm2835-vc4.yaml index 0dcf0c397375..49a5e041aa49 100644 --- a/Documentation/devicetree/bindings/display/brcm,bcm2835-vc4.yaml +++ b/Documentation/devicetree/bindings/display/brcm,bcm2835-vc4.yaml @@ -17,6 +17,7 @@ description: > properties: compatible: enum: + - brcm,bcm2711-vc5 - brcm,bcm2835-vc4 - brcm,cygnus-vc4 diff --git a/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml b/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml new file mode 100644 index 000000000000..74d675fc6e7b --- /dev/null +++ b/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml @@ -0,0 +1,169 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: "http://devicetree.org/schemas/display/bridge/cdns,mhdp8546.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: Cadence MHDP8546 bridge + +maintainers: + - Swapnil Jakhade <sjakhade@cadence.com> + - Yuti Amonkar <yamonkar@cadence.com> + +properties: + compatible: + enum: + - cdns,mhdp8546 + - ti,j721e-mhdp8546 + + reg: + minItems: 1 + maxItems: 2 + items: + - description: + Register block of mhdptx apb registers up to PHY mapped area (AUX_CONFIG_P). + The AUX and PMA registers are not part of this range, they are instead + included in the associated PHY. + - description: + Register block for DSS_EDP0_INTG_CFG_VP registers in case of TI J7 SoCs. + + reg-names: + minItems: 1 + maxItems: 2 + items: + - const: mhdptx + - const: j721e-intg + + clocks: + maxItems: 1 + description: + DP bridge clock, used by the IP to know how to translate a number of + clock cycles into a time (which is used to comply with DP standard timings + and delays). + + phys: + maxItems: 1 + description: + phandle to the DisplayPort PHY. + + phy-names: + items: + - const: dpphy + + power-domains: + maxItems: 1 + + interrupts: + maxItems: 1 + + ports: + type: object + description: + Ports as described in Documentation/devicetree/bindings/graph.txt. + + properties: + '#address-cells': + const: 1 + + '#size-cells': + const: 0 + + port@0: + type: object + description: + First input port representing the DP bridge input. + + port@1: + type: object + description: + Second input port representing the DP bridge input. + + port@2: + type: object + description: + Third input port representing the DP bridge input. + + port@3: + type: object + description: + Fourth input port representing the DP bridge input. + + port@4: + type: object + description: + Output port representing the DP bridge output. + + required: + - port@0 + - port@4 + - '#address-cells' + - '#size-cells' + +allOf: + - if: + properties: + compatible: + contains: + const: ti,j721e-mhdp8546 + then: + properties: + reg: + minItems: 2 + reg-names: + minItems: 2 + else: + properties: + reg: + maxItems: 1 + reg-names: + maxItems: 1 + +required: + - compatible + - clocks + - reg + - reg-names + - phys + - phy-names + - interrupts + - ports + +additionalProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/arm-gic.h> + bus { + #address-cells = <2>; + #size-cells = <2>; + + mhdp: dp-bridge@f0fb000000 { + compatible = "cdns,mhdp8546"; + reg = <0xf0 0xfb000000 0x0 0x1000000>; + reg-names = "mhdptx"; + clocks = <&mhdp_clock>; + phys = <&dp_phy>; + phy-names = "dpphy"; + interrupts = <GIC_SPI 614 IRQ_TYPE_LEVEL_HIGH>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dp_bridge_input: endpoint { + remote-endpoint = <&xxx_dpi_output>; + }; + }; + + port@4 { + reg = <4>; + dp_bridge_output: endpoint { + remote-endpoint = <&xxx_dp_connector_input>; + }; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml index 68951d56ebba..ff3ae25f8b23 100644 --- a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml +++ b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml @@ -79,6 +79,9 @@ properties: The GPIO used to control the power down line of this device. maxItems: 1 + power-supply: + maxItems: 1 + required: - compatible - ports diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt b/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt index 819f3e31013c..3f6072651182 100644 --- a/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt +++ b/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt @@ -14,8 +14,10 @@ Required properties: - compatible : Shall contain one or more of - "renesas,r8a774a1-hdmi" for R8A774A1 (RZ/G2M) compatible HDMI TX - "renesas,r8a774b1-hdmi" for R8A774B1 (RZ/G2N) compatible HDMI TX + - "renesas,r8a774e1-hdmi" for R8A774E1 (RZ/G2H) compatible HDMI TX - "renesas,r8a7795-hdmi" for R8A7795 (R-Car H3) compatible HDMI TX - "renesas,r8a7796-hdmi" for R8A7796 (R-Car M3-W) compatible HDMI TX + - "renesas,r8a77961-hdmi" for R8A77961 (R-Car M3-W+) compatible HDMI TX - "renesas,r8a77965-hdmi" for R8A77965 (R-Car M3-N) compatible HDMI TX - "renesas,rcar-gen3-hdmi" for the generic R-Car Gen3 and RZ/G2 compatible HDMI TX @@ -42,7 +44,7 @@ Optional properties: Example: hdmi0: hdmi@fead0000 { - compatible = "renesas,r8a7795-dw-hdmi"; + compatible = "renesas,r8a7795-hdmi", "renesas,rcar-gen3-hdmi"; reg = <0 0xfead0000 0 0x10000>; interrupts = <0 389 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cpg CPG_CORE R8A7795_CLK_S0D4>, <&cpg CPG_MOD 729>; diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml index baaf2a2a6fed..e5b163951b91 100644 --- a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml +++ b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml @@ -16,11 +16,13 @@ description: | properties: compatible: enum: + - renesas,r8a7742-lvds # for RZ/G1H compatible LVDS encoders - renesas,r8a7743-lvds # for RZ/G1M compatible LVDS encoders - renesas,r8a7744-lvds # for RZ/G1N compatible LVDS encoders - renesas,r8a774a1-lvds # for RZ/G2M compatible LVDS encoders - renesas,r8a774b1-lvds # for RZ/G2N compatible LVDS encoders - renesas,r8a774c0-lvds # for RZ/G2E compatible LVDS encoders + - renesas,r8a774e1-lvds # for RZ/G2H compatible LVDS encoders - renesas,r8a7790-lvds # for R-Car H2 compatible LVDS encoders - renesas,r8a7791-lvds # for R-Car M2-W compatible LVDS encoders - renesas,r8a7793-lvds # for R-Car M2-N compatible LVDS encoders diff --git a/Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml b/Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml new file mode 100644 index 000000000000..f1f25aa794d9 --- /dev/null +++ b/Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml @@ -0,0 +1,108 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +# Copyright 2019 NXP +%YAML 1.2 +--- +$id: "http://devicetree.org/schemas/display/imx/nxp,imx8mq-dcss.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: iMX8MQ Display Controller Subsystem (DCSS) + +maintainers: + - Laurentiu Palcu <laurentiu.palcu@nxp.com> + +description: + + The DCSS (display controller sub system) is used to source up to three + display buffers, compose them, and drive a display using HDMI 2.0a(with HDCP + 2.2) or MIPI-DSI. The DCSS is intended to support up to 4kp60 displays. HDR10 + image processing capabilities are included to provide a solution capable of + driving next generation high dynamic range displays. + +properties: + compatible: + const: nxp,imx8mq-dcss + + reg: + items: + - description: DCSS base address and size, up to IRQ steer start + - description: DCSS BLKCTL base address and size + + interrupts: + items: + - description: Context loader completion and error interrupt + - description: DTG interrupt used to signal context loader trigger time + - description: DTG interrupt for Vblank + + interrupt-names: + items: + - const: ctxld + - const: ctxld_kick + - const: vblank + + clocks: + items: + - description: Display APB clock for all peripheral PIO access interfaces + - description: Display AXI clock needed by DPR, Scaler, RTRAM_CTRL + - description: RTRAM clock + - description: Pixel clock, can be driven either by HDMI phy clock or MIPI + - description: DTRC clock, needed by video decompressor + + clock-names: + items: + - const: apb + - const: axi + - const: rtrm + - const: pix + - const: dtrc + + assigned-clocks: + items: + - description: Phandle and clock specifier of IMX8MQ_CLK_DISP_AXI_ROOT + - description: Phandle and clock specifier of IMX8MQ_CLK_DISP_RTRM + - description: Phandle and clock specifier of either IMX8MQ_VIDEO2_PLL1_REF_SEL or + IMX8MQ_VIDEO_PLL1_REF_SEL + + assigned-clock-parents: + items: + - description: Phandle and clock specifier of IMX8MQ_SYS1_PLL_800M + - description: Phandle and clock specifier of IMX8MQ_SYS1_PLL_800M + - description: Phandle and clock specifier of IMX8MQ_CLK_27M + + assigned-clock-rates: + items: + - description: Must be 800 MHz + - description: Must be 400 MHz + + port: + type: object + description: + A port node pointing to the input port of a HDMI/DP or MIPI display bridge. + +additionalProperties: false + +examples: + - | + #include <dt-bindings/clock/imx8mq-clock.h> + dcss: display-controller@32e00000 { + compatible = "nxp,imx8mq-dcss"; + reg = <0x32e00000 0x2d000>, <0x32e2f000 0x1000>; + interrupts = <6>, <8>, <9>; + interrupt-names = "ctxld", "ctxld_kick", "vblank"; + interrupt-parent = <&irqsteer>; + clocks = <&clk IMX8MQ_CLK_DISP_APB_ROOT>, <&clk IMX8MQ_CLK_DISP_AXI_ROOT>, + <&clk IMX8MQ_CLK_DISP_RTRM_ROOT>, <&clk IMX8MQ_VIDEO2_PLL_OUT>, + <&clk IMX8MQ_CLK_DISP_DTRC>; + clock-names = "apb", "axi", "rtrm", "pix", "dtrc"; + assigned-clocks = <&clk IMX8MQ_CLK_DISP_AXI>, <&clk IMX8MQ_CLK_DISP_RTRM>, + <&clk IMX8MQ_VIDEO2_PLL1_REF_SEL>; + assigned-clock-parents = <&clk IMX8MQ_SYS1_PLL_800M>, <&clk IMX8MQ_SYS1_PLL_800M>, + <&clk IMX8MQ_CLK_27M>; + assigned-clock-rates = <800000000>, + <400000000>; + port { + dcss_out: endpoint { + remote-endpoint = <&hdmi_in>; + }; + }; + }; + diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt index b91e709db7a4..121220745d46 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt @@ -43,7 +43,7 @@ Required properties (all function blocks): "mediatek,<chip>-dpi" - DPI controller, see mediatek,dpi.txt "mediatek,<chip>-disp-mutex" - display mutex "mediatek,<chip>-disp-od" - overdrive - the supported chips are mt2701, mt2712 and mt8173. + the supported chips are mt2701, mt7623, mt2712 and mt8173. - reg: Physical base address and length of the function block register space - interrupts: The interrupt signal from the function block (required, except for merge and split function blocks). diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt index 77def4456706..dc1ebd13cc88 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt @@ -7,7 +7,7 @@ output bus. Required properties: - compatible: "mediatek,<chip>-dpi" - the supported chips are mt2701 , mt8173 and mt8183. + the supported chips are mt2701, mt7623, mt8173 and mt8183. - reg: Physical base address and length of the controller's registers - interrupts: The interrupt signal from the function block. - clocks: device clocks diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt index 8e4729de8c85..f06f24d405a5 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt @@ -7,7 +7,7 @@ channel output. Required properties: - compatible: "mediatek,<chip>-dsi" - the supported chips are mt2701, mt8173 and mt8183. +- the supported chips are mt2701, mt7623, mt8173 and mt8183. - reg: Physical base address and length of the controller's registers - interrupts: The interrupt signal from the function block. - clocks: device clocks @@ -26,7 +26,7 @@ The MIPI TX configuration module controls the MIPI D-PHY. Required properties: - compatible: "mediatek,<chip>-mipi-tx" - the supported chips are mt2701, mt8173 and mt8183. +- the supported chips are mt2701, 7623, mt8173 and mt8183. - reg: Physical base address and length of the controller's registers - clocks: PLL reference clock - clock-output-names: name of the output clock line to the DSI encoder diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt index 7b124242b0c5..6b1c586403e4 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt @@ -6,6 +6,7 @@ its parallel input. Required properties: - compatible: Should be "mediatek,<chip>-hdmi". +- the supported chips are mt2701, mt7623 and mt8173 - reg: Physical base address and length of the controller's registers - interrupts: The interrupt signal from the function block. - clocks: device clocks @@ -32,6 +33,7 @@ The HDMI CEC controller handles hotplug detection and CEC communication. Required properties: - compatible: Should be "mediatek,<chip>-cec" +- the supported chips are mt7623 and mt8173 - reg: Physical base address and length of the controller's registers - interrupts: The interrupt signal from the function block. - clocks: device clock @@ -44,6 +46,7 @@ The Mediatek's I2C controller is used to interface with I2C devices. Required properties: - compatible: Should be "mediatek,<chip>-hdmi-ddc" +- the supported chips are mt7623 and mt8173 - reg: Physical base address and length of the controller's registers - clocks: device clock - clock-names: Should be "ddc-i2c". @@ -56,6 +59,7 @@ output and drives the HDMI pads. Required properties: - compatible: "mediatek,<chip>-hdmi-phy" +- the supported chips are mt2701, mt7623 and mt8173 - reg: Physical base address and length of the module's registers - clocks: PLL reference clock - clock-names: must contain "pll_ref" diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt index 7884fd7a85c1..b9a64d3ff184 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi.txt +++ b/Documentation/devicetree/bindings/display/msm/dsi.txt @@ -90,6 +90,8 @@ Required properties: * "qcom,dsi-phy-14nm-660" * "qcom,dsi-phy-10nm" * "qcom,dsi-phy-10nm-8998" + * "qcom,dsi-phy-7nm" + * "qcom,dsi-phy-7nm-8150" - reg: Physical base address and length of the registers of PLL, PHY. Some revisions require the PHY regulator base address, whereas others require the PHY lane base address. See below for each PHY revision. @@ -98,7 +100,7 @@ Required properties: * "dsi_pll" * "dsi_phy" * "dsi_phy_regulator" - For DSI 14nm and 10nm PHYs: + For DSI 14nm, 10nm and 7nm PHYs: * "dsi_pll" * "dsi_phy" * "dsi_phy_lane" @@ -116,7 +118,7 @@ Required properties: - vcca-supply: phandle to vcca regulator device node For 14nm PHY: - vcca-supply: phandle to vcca regulator device node - For 10nm PHY: + For 10nm and 7nm PHY: - vdds-supply: phandle to vdds regulator device node Optional properties: diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt index 51cd4d162770..7d65c24fcda8 100644 --- a/Documentation/devicetree/bindings/display/renesas,du.txt +++ b/Documentation/devicetree/bindings/display/renesas,du.txt @@ -3,6 +3,7 @@ Required Properties: - compatible: must be one of the following. + - "renesas,du-r8a7742" for R8A7742 (RZ/G1H) compatible DU - "renesas,du-r8a7743" for R8A7743 (RZ/G1M) compatible DU - "renesas,du-r8a7744" for R8A7744 (RZ/G1N) compatible DU - "renesas,du-r8a7745" for R8A7745 (RZ/G1E) compatible DU @@ -10,6 +11,7 @@ Required Properties: - "renesas,du-r8a774a1" for R8A774A1 (RZ/G2M) compatible DU - "renesas,du-r8a774b1" for R8A774B1 (RZ/G2N) compatible DU - "renesas,du-r8a774c0" for R8A774C0 (RZ/G2E) compatible DU + - "renesas,du-r8a774e1" for R8A774E1 (RZ/G2H) compatible DU - "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU - "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU - "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU @@ -18,6 +20,7 @@ Required Properties: - "renesas,du-r8a7794" for R8A7794 (R-Car E2) compatible DU - "renesas,du-r8a7795" for R8A7795 (R-Car H3) compatible DU - "renesas,du-r8a7796" for R8A7796 (R-Car M3-W) compatible DU + - "renesas,du-r8a77961" for R8A77961 (R-Car M3-W+) compatible DU - "renesas,du-r8a77965" for R8A77965 (R-Car M3-N) compatible DU - "renesas,du-r8a77970" for R8A77970 (R-Car V3M) compatible DU - "renesas,du-r8a77980" for R8A77980 (R-Car V3H) compatible DU @@ -68,6 +71,7 @@ corresponding to each DU output. Port0 Port1 Port2 Port3 ----------------------------------------------------------------------------- + R8A7742 (RZ/G1H) DPAD 0 LVDS 0 LVDS 1 - R8A7743 (RZ/G1M) DPAD 0 LVDS 0 - - R8A7744 (RZ/G1N) DPAD 0 LVDS 0 - - R8A7745 (RZ/G1E) DPAD 0 DPAD 1 - - @@ -75,6 +79,7 @@ corresponding to each DU output. R8A774A1 (RZ/G2M) DPAD 0 HDMI 0 LVDS 0 - R8A774B1 (RZ/G2N) DPAD 0 HDMI 0 LVDS 0 - R8A774C0 (RZ/G2E) DPAD 0 LVDS 0 LVDS 1 - + R8A774E1 (RZ/G2H) DPAD 0 HDMI 0 LVDS 0 - R8A7779 (R-Car H1) DPAD 0 DPAD 1 - - R8A7790 (R-Car H2) DPAD 0 LVDS 0 LVDS 1 - R8A7791 (R-Car M2-W) DPAD 0 LVDS 0 - - @@ -83,6 +88,7 @@ corresponding to each DU output. R8A7794 (R-Car E2) DPAD 0 DPAD 1 - - R8A7795 (R-Car H3) DPAD 0 HDMI 0 HDMI 1 LVDS 0 R8A7796 (R-Car M3-W) DPAD 0 HDMI 0 LVDS 0 - + R8A77961 (R-Car M3-W+) DPAD 0 HDMI 0 LVDS 0 - R8A77965 (R-Car M3-N) DPAD 0 HDMI 0 LVDS 0 - R8A77970 (R-Car V3M) DPAD 0 LVDS 0 - - R8A77980 (R-Car V3H) DPAD 0 LVDS 0 - - diff --git a/Documentation/devicetree/bindings/display/ssd1307fb.txt b/Documentation/devicetree/bindings/display/ssd1307fb.txt index 27333b9551b3..2dcb6d12d137 100644 --- a/Documentation/devicetree/bindings/display/ssd1307fb.txt +++ b/Documentation/devicetree/bindings/display/ssd1307fb.txt @@ -19,6 +19,7 @@ Optional properties: - vbat-supply: The supply for VBAT - solomon,segment-no-remap: Display needs normal (non-inverted) data column to segment mapping + - solomon,col-offset: Offset of columns (COL/SEG) that the screen is mapped to. - solomon,com-seq: Display uses sequential COM pin configuration - solomon,com-lrremap: Display uses left-right COM pin remap - solomon,com-invdir: Display uses inverted COM pin scan direction diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst index eaaaafc21134..aa4d2420f79e 100644 --- a/Documentation/driver-api/driver-model/devres.rst +++ b/Documentation/driver-api/driver-model/devres.rst @@ -263,7 +263,7 @@ DMA dmam_pool_destroy() DRM - devm_drm_dev_init() + devm_drm_dev_alloc() GPIO devm_gpiod_get() diff --git a/Documentation/fb/fbcon.rst b/Documentation/fb/fbcon.rst index e57a3d1d085a..a7b487cba307 100644 --- a/Documentation/fb/fbcon.rst +++ b/Documentation/fb/fbcon.rst @@ -20,8 +20,8 @@ A. Configuration ================ The framebuffer console can be enabled by using your favorite kernel -configuration tool. It is under Device Drivers->Graphics Support->Frame -buffer Devices->Console display driver support->Framebuffer Console Support. +configuration tool. It is under Device Drivers->Graphics Support-> +Console display driver support->Framebuffer Console Support. Select 'y' to compile support statically or 'm' for module support. The module will be fbcon. diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst index 2a198838fca9..a20102f7db69 100644 --- a/Documentation/userspace-api/ioctl/ioctl-number.rst +++ b/Documentation/userspace-api/ioctl/ioctl-number.rst @@ -356,8 +356,6 @@ Code Seq# Include File Comments 0xEC 00-01 drivers/platform/chrome/cros_ec_dev.h ChromeOS EC driver 0xF3 00-3F drivers/usb/misc/sisusbvga/sisusb.h sisfb (in development) <mailto:thomas@winischhofer.net> -0xF4 00-1F video/mbxfb.h mbxfb - <mailto:raph@8d.com> 0xF6 all LTTng Linux Trace Toolkit Next Generation <mailto:mathieu.desnoyers@efficios.com> 0xFD all linux/dm-ioctl.h diff --git a/MAINTAINERS b/MAINTAINERS index 2611c18dec98..a976cac89777 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5411,7 +5411,7 @@ F: drivers/gpu/drm/panel/panel-arm-versatile.c DRM DRIVER FOR ASPEED BMC GFX M: Joel Stanley <joel@jms.id.au> -L: linux-aspeed@lists.ozlabs.org +L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers) S: Supported T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/devicetree/bindings/gpu/aspeed-gfx.txt @@ -5419,7 +5419,10 @@ F: drivers/gpu/drm/aspeed/ DRM DRIVER FOR AST SERVER GRAPHICS CHIPS M: Dave Airlie <airlied@redhat.com> -S: Odd Fixes +R: Thomas Zimmermann <tzimmermann@suse.de> +L: dri-devel@lists.freedesktop.org +S: Supported +T: git git://anongit.freedesktop.org/drm/drm-misc F: drivers/gpu/drm/ast/ DRM DRIVER FOR BOCHS VIRTUAL GPU @@ -5507,7 +5510,10 @@ F: include/uapi/drm/mga_drm.h DRM DRIVER FOR MGA G200 GRAPHICS CHIPS M: Dave Airlie <airlied@redhat.com> -S: Odd Fixes +R: Thomas Zimmermann <tzimmermann@suse.de> +L: dri-devel@lists.freedesktop.org +S: Supported +T: git git://anongit.freedesktop.org/drm/drm-misc F: drivers/gpu/drm/mgag200/ DRM DRIVER FOR MI0283QT @@ -5652,13 +5658,15 @@ F: drivers/gpu/drm/panel/panel-tpo-tpg110.c DRM DRIVER FOR USB DISPLAYLINK VIDEO ADAPTERS M: Dave Airlie <airlied@redhat.com> R: Sean Paul <sean@poorly.run> +R: Thomas Zimmermann <tzimmermann@suse.de> L: dri-devel@lists.freedesktop.org -S: Odd Fixes +S: Supported T: git git://anongit.freedesktop.org/drm/drm-misc F: drivers/gpu/drm/udl/ DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS) M: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com> +M: Melissa Wen <melissa.srw@gmail.com> R: Haneen Mohammed <hamohammed.sa@gmail.com> R: Daniel Vetter <daniel@ffwll.ch> L: dri-devel@lists.freedesktop.org @@ -5819,6 +5827,7 @@ L: dri-devel@lists.freedesktop.org S: Supported F: Documentation/devicetree/bindings/display/mediatek/ F: drivers/gpu/drm/mediatek/ +F: drivers/phy/mediatek/phy-mtk-hdmi* DRM DRIVERS FOR NVIDIA TEGRA M: Thierry Reding <thierry.reding@gmail.com> @@ -12462,6 +12471,14 @@ F: drivers/iio/gyro/fxas21002c_core.c F: drivers/iio/gyro/fxas21002c_i2c.c F: drivers/iio/gyro/fxas21002c_spi.c +NXP i.MX 8MQ DCSS DRIVER +M: Laurentiu Palcu <laurentiu.palcu@oss.nxp.com> +R: Lucas Stach <l.stach@pengutronix.de> +L: dri-devel@lists.freedesktop.org +S: Maintained +F: Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml +F: drivers/gpu/drm/imx/dcss/ + NXP SGTL5000 DRIVER M: Fabio Estevam <festevam@gmail.com> L: alsa-devel@alsa-project.org (moderated for non-subscribers) diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 5e2bfbcf526f..a8d7d83ac761 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -67,7 +67,15 @@ ACPI_MODULE_NAME("acpi_lpss"); #define LPSS_CLK_DIVIDER BIT(2) #define LPSS_LTR BIT(3) #define LPSS_SAVE_CTX BIT(4) -#define LPSS_NO_D3_DELAY BIT(5) +/* + * For some devices the DSDT AML code for another device turns off the device + * before our suspend handler runs, causing us to read/save all 1-s (0xffffffff) + * as ctx register values. + * Luckily these devices always use the same ctx register values, so we can + * work around this by saving the ctx registers once on activation. + */ +#define LPSS_SAVE_CTX_ONCE BIT(5) +#define LPSS_NO_D3_DELAY BIT(6) struct lpss_private_data; @@ -254,9 +262,10 @@ static const struct lpss_device_desc byt_pwm_dev_desc = { }; static const struct lpss_device_desc bsw_pwm_dev_desc = { - .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, + .flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY, .prv_offset = 0x800, .setup = bsw_pwm_setup, + .resume_from_noirq = true, }; static const struct lpss_device_desc byt_uart_dev_desc = { @@ -884,9 +893,14 @@ static int acpi_lpss_activate(struct device *dev) * we have to deassert reset line to be sure that ->probe() will * recognize the device. */ - if (pdata->dev_desc->flags & LPSS_SAVE_CTX) + if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE)) lpss_deassert_reset(pdata); +#ifdef CONFIG_PM + if (pdata->dev_desc->flags & LPSS_SAVE_CTX_ONCE) + acpi_lpss_save_ctx(dev, pdata); +#endif + return 0; } @@ -1030,7 +1044,7 @@ static int acpi_lpss_resume(struct device *dev) acpi_lpss_d3_to_d0_delay(pdata); - if (pdata->dev_desc->flags & LPSS_SAVE_CTX) + if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE)) acpi_lpss_restore_ctx(dev, pdata); return 0; diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c index 6914e4f0ce98..2b2095542816 100644 --- a/drivers/char/agp/amd-k7-agp.c +++ b/drivers/char/agp/amd-k7-agp.c @@ -425,7 +425,7 @@ static int agp_amdk7_probe(struct pci_dev *pdev, return -ENOMEM; bridge->driver = &amd_irongate_driver; - bridge->dev_private_data = &amd_irongate_private, + bridge->dev_private_data = &amd_irongate_private; bridge->dev = pdev; bridge->capndx = cap_ptr; diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c index 623205bcd04a..f78e756157db 100644 --- a/drivers/char/agp/nvidia-agp.c +++ b/drivers/char/agp/nvidia-agp.c @@ -382,7 +382,7 @@ static int agp_nvidia_probe(struct pci_dev *pdev, return -ENOMEM; bridge->driver = &nvidia_driver; - bridge->dev_private_data = &nvidia_private, + bridge->dev_private_data = &nvidia_private; bridge->dev = pdev; bridge->capndx = cap_ptr; diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c index 7729414100ff..f875970bda65 100644 --- a/drivers/char/agp/sworks-agp.c +++ b/drivers/char/agp/sworks-agp.c @@ -513,7 +513,7 @@ static int agp_serverworks_probe(struct pci_dev *pdev, return -ENOMEM; bridge->driver = &sworks_driver; - bridge->dev_private_data = &serverworks_private, + bridge->dev_private_data = &serverworks_private; bridge->dev = pci_dev_get(pdev); pci_set_drvdata(pdev, bridge); diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 43624b4ee13d..7475e09b0680 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -283,6 +283,7 @@ EXPORT_SYMBOL(dma_fence_begin_signalling); /** * dma_fence_end_signalling - end a critical DMA fence signalling section + * @cookie: opaque cookie from dma_fence_begin_signalling() * * Closes a critical section annotation opened by dma_fence_begin_signalling(). */ diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 434a3314fb0e..1c8f2581cb09 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -98,12 +98,14 @@ static int __init dma_resv_lockdep(void) struct mm_struct *mm = mm_alloc(); struct ww_acquire_ctx ctx; struct dma_resv obj; + struct address_space mapping; int ret; if (!mm) return -ENOMEM; dma_resv_init(&obj); + address_space_init_once(&mapping); mmap_read_lock(mm); ww_acquire_init(&ctx, &reservation_ww_class); @@ -111,6 +113,9 @@ static int __init dma_resv_lockdep(void) if (ret == -EDEADLK) dma_resv_lock_slow(&obj, &ctx); fs_reclaim_acquire(GFP_KERNEL); + /* for unmap_mapping_range on trylocked buffer objects in shrinkers */ + i_mmap_lock_write(&mapping); + i_mmap_unlock_write(&mapping); #ifdef CONFIG_MMU_NOTIFIER lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); __dma_fence_might_wait(); diff --git a/drivers/dma-buf/heaps/heap-helpers.c b/drivers/dma-buf/heaps/heap-helpers.c index 9f964ca3f59c..d0696cf937af 100644 --- a/drivers/dma-buf/heaps/heap-helpers.c +++ b/drivers/dma-buf/heaps/heap-helpers.c @@ -140,13 +140,12 @@ struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direction) { struct dma_heaps_attachment *a = attachment->priv; - struct sg_table *table; - - table = &a->table; + struct sg_table *table = &a->table; + int ret; - if (!dma_map_sg(attachment->dev, table->sgl, table->nents, - direction)) - table = ERR_PTR(-ENOMEM); + ret = dma_map_sgtable(attachment->dev, table, direction, 0); + if (ret) + table = ERR_PTR(ret); return table; } @@ -154,7 +153,7 @@ static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table, enum dma_data_direction direction) { - dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction); + dma_unmap_sgtable(attachment->dev, table, direction, 0); } static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf) diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index acb26c627d27..db732f71e59a 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -63,10 +63,9 @@ static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf, GFP_KERNEL); if (ret < 0) goto err; - if (!dma_map_sg(dev, sg->sgl, sg->nents, direction)) { - ret = -EINVAL; + ret = dma_map_sgtable(dev, sg, direction, 0); + if (ret < 0) goto err; - } return sg; err: @@ -78,7 +77,7 @@ err: static void put_sg_table(struct device *dev, struct sg_table *sg, enum dma_data_direction direction) { - dma_unmap_sg(dev, sg->sgl, sg->nents, direction); + dma_unmap_sgtable(dev, sg, direction, 0); sg_free_table(sg); kfree(sg); } @@ -308,6 +307,9 @@ static long udmabuf_ioctl(struct file *filp, unsigned int ioctl, static const struct file_operations udmabuf_fops = { .owner = THIS_MODULE, .unlocked_ioctl = udmabuf_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = udmabuf_ioctl, +#endif }; static struct miscdevice udmabuf_misc = { diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 2f31579f91d4..81569009f884 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -100,7 +100,7 @@ obj-$(CONFIG_DRM_MSM) += msm/ obj-$(CONFIG_DRM_TEGRA) += tegra/ obj-$(CONFIG_DRM_STM) += stm/ obj-$(CONFIG_DRM_STI) += sti/ -obj-$(CONFIG_DRM_IMX) += imx/ +obj-y += imx/ obj-$(CONFIG_DRM_INGENIC) += ingenic/ obj-$(CONFIG_DRM_MEDIATEK) += mediatek/ obj-$(CONFIG_DRM_MESON) += meson/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 427a7c5ba93d..957934926b24 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -303,7 +303,8 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, switch (bo->tbo.mem.mem_type) { case TTM_PL_TT: - sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, + sgt = drm_prime_pages_to_sg(obj->dev, + bo->tbo.ttm->pages, bo->tbo.num_pages); if (IS_ERR(sgt)) return sgt; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f3e2fbcfadfb..c241317edee7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1164,25 +1164,20 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, if (ret) return ret; - adev = kzalloc(sizeof(*adev), GFP_KERNEL); - if (!adev) - return -ENOMEM; + adev = devm_drm_dev_alloc(&pdev->dev, &kms_driver, typeof(*adev), ddev); + if (IS_ERR(adev)) + return PTR_ERR(adev); adev->dev = &pdev->dev; adev->pdev = pdev; ddev = adev_to_drm(adev); - ret = drm_dev_init(ddev, &kms_driver, &pdev->dev); - if (ret) - goto err_free; - - drmm_add_final_kfree(ddev, adev); if (!supports_atomic) ddev->driver_features &= ~DRIVER_ATOMIC; ret = pci_enable_device(pdev); if (ret) - goto err_free; + return ret; ddev->pdev = pdev; pci_set_drvdata(pdev, ddev); @@ -1210,8 +1205,6 @@ retry_init: err_pci: pci_disable_device(pdev); -err_free: - drm_dev_put(ddev); return ret; } @@ -1228,7 +1221,6 @@ amdgpu_pci_remove(struct pci_dev *pdev) amdgpu_driver_unload_kms(dev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); - drm_dev_put(dev); } static void diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 2b1b7c136343..f203e4a6a3f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -95,8 +95,6 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) man->use_tt = true; man->func = &amdgpu_gtt_mgr_func; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; ttm_resource_manager_init(man, gtt_size >> PAGE_SHIFT); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 887a4da5eefc..ac043baac05d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -136,8 +136,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; + places[c].mem_type = TTM_PL_VRAM; + places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) places[c].lpfn = visible_pfn; @@ -152,7 +152,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) if (domain & AMDGPU_GEM_DOMAIN_GTT) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_TT; + places[c].mem_type = TTM_PL_TT; + places[c].flags = 0; if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) places[c].flags |= TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; @@ -164,7 +165,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) if (domain & AMDGPU_GEM_DOMAIN_CPU) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_SYSTEM; + places[c].mem_type = TTM_PL_SYSTEM; + places[c].flags = 0; if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) places[c].flags |= TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; @@ -176,28 +178,32 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) if (domain & AMDGPU_GEM_DOMAIN_GDS) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS; + places[c].mem_type = AMDGPU_PL_GDS; + places[c].flags = TTM_PL_FLAG_UNCACHED; c++; } if (domain & AMDGPU_GEM_DOMAIN_GWS) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS; + places[c].mem_type = AMDGPU_PL_GWS; + places[c].flags = TTM_PL_FLAG_UNCACHED; c++; } if (domain & AMDGPU_GEM_DOMAIN_OA) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA; + places[c].mem_type = AMDGPU_PL_OA; + places[c].flags = TTM_PL_FLAG_UNCACHED; c++; } if (!c) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + places[c].mem_type = TTM_PL_SYSTEM; + places[c].flags = TTM_PL_MASK_CACHING; c++; } @@ -594,7 +600,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && - bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { + bo->tbo.mem.mem_type == TTM_PL_VRAM) { struct dma_fence *fence; r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 4a85f8cedd77..8039d2399584 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -63,12 +63,15 @@ #define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128 +static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, + struct ttm_resource *bo_mem); + static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev, unsigned int type, uint64_t size) { return ttm_range_man_init(&adev->mman.bdev, type, - TTM_PL_FLAG_UNCACHED, TTM_PL_FLAG_UNCACHED, false, size >> PAGE_SHIFT); } @@ -88,7 +91,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, static const struct ttm_place placements = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM + .mem_type = TTM_PL_SYSTEM, + .flags = TTM_PL_MASK_CACHING }; /* Don't handle scatter gather BOs */ @@ -175,24 +179,6 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) } /** - * amdgpu_move_null - Register memory for a buffer object - * - * @bo: The bo to assign the memory to - * @new_mem: The memory to be assigned. - * - * Assign the memory from new_mem to the memory of the buffer object bo. - */ -static void amdgpu_move_null(struct ttm_buffer_object *bo, - struct ttm_resource *new_mem) -{ - struct ttm_resource *old_mem = &bo->mem; - - BUG_ON(old_mem->mm_node != NULL); - *old_mem = *new_mem; - new_mem->mm_node = NULL; -} - -/** * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer. * * @bo: The bo to assign the memory to. @@ -514,9 +500,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, /* Always block for VM page tables before committing the new location */ if (bo->type == ttm_bo_type_kernel) - r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem); + r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem); else - r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); + r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem); dma_fence_put(fence); return r; @@ -551,7 +537,8 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, placement.busy_placement = &placements; placements.fpfn = 0; placements.lpfn = 0; - placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + placements.mem_type = TTM_PL_TT; + placements.flags = TTM_PL_MASK_CACHING; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); if (unlikely(r)) { pr_err("Failed to find GTT space for blit from VRAM\n"); @@ -564,8 +551,12 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, goto out_cleanup; } + r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); + if (unlikely(r)) + goto out_cleanup; + /* Bind the memory to the GTT space */ - r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx); + r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } @@ -607,7 +598,8 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, placement.busy_placement = &placements; placements.fpfn = 0; placements.lpfn = 0; - placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + placements.mem_type = TTM_PL_TT; + placements.flags = TTM_PL_MASK_CACHING; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); if (unlikely(r)) { pr_err("Failed to find GTT space for blit to VRAM\n"); @@ -676,7 +668,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, adev = amdgpu_ttm_adev(bo->bdev); if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { - amdgpu_move_null(bo, new_mem); + ttm_bo_move_null(bo, new_mem); return 0; } if ((old_mem->mem_type == TTM_PL_TT && @@ -684,7 +676,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_TT)) { /* bind is enough */ - amdgpu_move_null(bo, new_mem); + ttm_bo_move_null(bo, new_mem); return 0; } if (old_mem->mem_type == AMDGPU_PL_GDS || @@ -694,7 +686,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, new_mem->mem_type == AMDGPU_PL_GWS || new_mem->mem_type == AMDGPU_PL_OA) { /* Nothing to save here */ - amdgpu_move_null(bo, new_mem); + ttm_bo_move_null(bo, new_mem); return 0; } @@ -773,7 +765,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr + mem->bus.offset; - mem->bus.base = adev->gmc.aper_base; + mem->bus.offset += adev->gmc.aper_base; mem->bus.is_iomem = true; break; default: @@ -785,12 +777,13 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, unsigned long page_offset) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); uint64_t offset = (page_offset << PAGE_SHIFT); struct drm_mm_node *mm; mm = amdgpu_find_mm_node(&bo->mem, &offset); - return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + - (offset >> PAGE_SHIFT); + offset += adev->gmc.aper_base; + return mm->start + (offset >> PAGE_SHIFT); } /** @@ -824,6 +817,7 @@ struct amdgpu_ttm_tt { uint64_t userptr; struct task_struct *usertask; uint32_t userflags; + bool bound; #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) struct hmm_range *range; #endif @@ -991,9 +985,10 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) * * Called by amdgpu_ttm_backend_bind() **/ -static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) +static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; int r; @@ -1028,9 +1023,10 @@ release_sg: /** * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages */ -static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) +static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); @@ -1111,16 +1107,23 @@ gart_bind_fail: * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). * This handles binding GTT memory to the device address space. */ -static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, +static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, struct ttm_resource *bo_mem) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void*)ttm; uint64_t flags; int r = 0; + if (!bo_mem) + return -EINVAL; + + if (gtt->bound) + return 0; + if (gtt->userptr) { - r = amdgpu_ttm_tt_pin_userptr(ttm); + r = amdgpu_ttm_tt_pin_userptr(bdev, ttm); if (r) { DRM_ERROR("failed to pin userptr\n"); return r; @@ -1152,6 +1155,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, if (r) DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", ttm->num_pages, gtt->offset); + gtt->bound = true; return r; } @@ -1191,8 +1195,8 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) placement.busy_placement = &placements; placements.fpfn = 0; placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; - placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | - TTM_PL_FLAG_TT; + placements.mem_type = TTM_PL_TT; + placements.flags = bo->mem.placement; r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); if (unlikely(r)) @@ -1243,15 +1247,19 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and * ttm_tt_destroy(). */ -static void amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) +static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; int r; + if (!gtt->bound) + return; + /* if the pages have userptr pinning then clear that first */ if (gtt->userptr) - amdgpu_ttm_tt_unpin_userptr(ttm); + amdgpu_ttm_tt_unpin_userptr(bdev, ttm); if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) return; @@ -1261,12 +1269,16 @@ static void amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) if (r) DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", gtt->ttm.ttm.num_pages, gtt->offset); + gtt->bound = false; } -static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) +static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; + amdgpu_ttm_backend_unbind(bdev, ttm); + ttm_tt_destroy_common(bdev, ttm); if (gtt->usertask) put_task_struct(gtt->usertask); @@ -1274,12 +1286,6 @@ static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) kfree(gtt); } -static struct ttm_backend_func amdgpu_backend_func = { - .bind = &amdgpu_ttm_backend_bind, - .unbind = &amdgpu_ttm_backend_unbind, - .destroy = &amdgpu_ttm_backend_destroy, -}; - /** * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO * @@ -1296,7 +1302,6 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, if (gtt == NULL) { return NULL; } - gtt->ttm.ttm.func = &amdgpu_backend_func; gtt->gobj = &bo->base; /* allocate space for the uninitialized page entries */ @@ -1313,10 +1318,11 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, * Map the pages of a ttm_tt object to an address space visible * to the underlying device. */ -static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, - struct ttm_operation_ctx *ctx) +static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ @@ -1326,7 +1332,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, return -ENOMEM; ttm->page_flags |= TTM_PAGE_FLAG_SG; - ttm->state = tt_unbound; + ttm_tt_set_populated(ttm); return 0; } @@ -1346,7 +1352,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); - ttm->state = tt_unbound; + ttm_tt_set_populated(ttm); return 0; } @@ -1367,7 +1373,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, * Unmaps pages of a ttm_tt object from the device address space and * unpopulates the page array backing it. */ -static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) +static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_device *adev; @@ -1391,7 +1397,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) if (ttm->page_flags & TTM_PAGE_FLAG_SG) return; - adev = amdgpu_ttm_adev(ttm->bdev); + adev = amdgpu_ttm_adev(bdev); #ifdef CONFIG_SWIOTLB if (adev->need_swiotlb && swiotlb_nr_tbl()) { @@ -1697,6 +1703,9 @@ static struct ttm_bo_driver amdgpu_bo_driver = { .ttm_tt_create = &amdgpu_ttm_tt_create, .ttm_tt_populate = &amdgpu_ttm_tt_populate, .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, + .ttm_tt_bind = &amdgpu_ttm_backend_bind, + .ttm_tt_unbind = &amdgpu_ttm_backend_unbind, + .ttm_tt_destroy = &amdgpu_ttm_backend_destroy, .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, .evict_flags = &amdgpu_evict_flags, .move = &amdgpu_bo_move, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 68c5e3662b87..a87951b2f06d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -32,10 +32,6 @@ #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1) #define AMDGPU_PL_OA (TTM_PL_PRIV + 2) -#define AMDGPU_PL_FLAG_GDS (TTM_PL_FLAG_PRIV << 0) -#define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1) -#define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2) - #define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index b2adc2abc581..01c1171afbe0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -179,9 +179,6 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) struct ttm_resource_manager *man = &mgr->manager; int ret; - man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; - man->default_caching = TTM_PL_FLAG_WC; - ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT); man->func = &amdgpu_vram_mgr_func; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 57738164625b..bb1bc7f5d149 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7494,6 +7494,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) bool mode_set_reset_required = false; drm_atomic_helper_update_legacy_modeset_state(dev, state); + drm_atomic_helper_calc_timestamping_constants(state); dm_state = dm_atomic_get_new_state(state); if (dm_state && dm_state->context) { diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 38dfaa46d306..a887b6a5f8bd 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -757,7 +757,7 @@ static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) static void armada_drm_crtc_destroy(struct drm_crtc *crtc) { struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); - struct armada_private *priv = crtc->dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(crtc->dev); if (dcrtc->cursor_obj) drm_gem_object_put(&dcrtc->cursor_obj->obj); @@ -901,7 +901,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, struct resource *res, int irq, const struct armada_variant *variant, struct device_node *port) { - struct armada_private *priv = drm->dev_private; + struct armada_private *priv = drm_to_armada_dev(drm); struct armada_crtc *dcrtc; struct drm_plane *primary; void __iomem *base; diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c index c6fc2f1d58e9..29f4b52e3c8d 100644 --- a/drivers/gpu/drm/armada/armada_debugfs.c +++ b/drivers/gpu/drm/armada/armada_debugfs.c @@ -19,7 +19,7 @@ static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct armada_private *priv = dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(dev); struct drm_printer p = drm_seq_file_printer(m); mutex_lock(&priv->linear_lock); diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h index a11bdaccbb33..6a5a87932576 100644 --- a/drivers/gpu/drm/armada/armada_drm.h +++ b/drivers/gpu/drm/armada/armada_drm.h @@ -73,6 +73,8 @@ struct armada_private { #endif }; +#define drm_to_armada_dev(dev) container_of(dev, struct armada_private, drm) + int armada_fbdev_init(struct drm_device *); void armada_fbdev_fini(struct drm_device *); diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 5fc25c3f445c..980d3f1f8f16 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -87,24 +87,13 @@ static int armada_drm_bind(struct device *dev) "armada-drm")) return -EBUSY; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - /* - * The drm_device structure must be at the start of - * armada_private for drm_dev_put() to work correctly. - */ - BUILD_BUG_ON(offsetof(struct armada_private, drm) != 0); - - ret = drm_dev_init(&priv->drm, &armada_drm_driver, dev); - if (ret) { - dev_err(dev, "[" DRM_NAME ":%s] drm_dev_init failed: %d\n", - __func__, ret); - kfree(priv); - return ret; + priv = devm_drm_dev_alloc(dev, &armada_drm_driver, + struct armada_private, drm); + if (IS_ERR(priv)) { + dev_err(dev, "[" DRM_NAME ":%s] devm_drm_dev_alloc failed: %li\n", + __func__, PTR_ERR(priv)); + return PTR_ERR(priv); } - drmm_add_final_kfree(&priv->drm, priv); /* Remove early framebuffers */ ret = drm_fb_helper_remove_conflicting_framebuffers(NULL, @@ -117,8 +106,6 @@ static int armada_drm_bind(struct device *dev) return ret; } - priv->drm.dev_private = priv; - dev_set_drvdata(dev, &priv->drm); /* Mode setting support */ @@ -174,14 +161,13 @@ static int armada_drm_bind(struct device *dev) err_kms: drm_mode_config_cleanup(&priv->drm); drm_mm_takedown(&priv->linear); - drm_dev_put(&priv->drm); return ret; } static void armada_drm_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - struct armada_private *priv = drm->dev_private; + struct armada_private *priv = drm_to_armada_dev(drm); drm_kms_helper_poll_fini(&priv->drm); armada_fbdev_fini(&priv->drm); @@ -194,8 +180,6 @@ static void armada_drm_unbind(struct device *dev) drm_mode_config_cleanup(&priv->drm); drm_mm_takedown(&priv->linear); - - drm_dev_put(&priv->drm); } static int compare_of(struct device *dev, void *data) diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index 0c4601275507..38f5170c0fea 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -117,7 +117,7 @@ static const struct drm_fb_helper_funcs armada_fb_helper_funcs = { int armada_fbdev_init(struct drm_device *dev) { - struct armada_private *priv = dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(dev); struct drm_fb_helper *fbh; int ret; @@ -151,7 +151,7 @@ int armada_fbdev_init(struct drm_device *dev) void armada_fbdev_fini(struct drm_device *dev) { - struct armada_private *priv = dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(dev); struct drm_fb_helper *fbh = priv->fbdev; if (fbh) { diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 8005614d2e6b..6654bccd9466 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -39,7 +39,7 @@ static size_t roundup_gem_size(size_t size) void armada_gem_free_object(struct drm_gem_object *obj) { struct armada_gem_object *dobj = drm_to_armada_gem(obj); - struct armada_private *priv = obj->dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(obj->dev); DRM_DEBUG_DRIVER("release obj %p\n", dobj); @@ -77,7 +77,7 @@ void armada_gem_free_object(struct drm_gem_object *obj) int armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj) { - struct armada_private *priv = dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(dev); size_t size = obj->obj.size; if (obj->page || obj->linear) @@ -379,7 +379,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, struct armada_gem_object *dobj = drm_to_armada_gem(obj); struct scatterlist *sg; struct sg_table *sgt; - int i, num; + int i; sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) @@ -395,22 +395,18 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, mapping = dobj->obj.filp->f_mapping; - for_each_sg(sgt->sgl, sg, count, i) { + for_each_sgtable_sg(sgt, sg, i) { struct page *page; page = shmem_read_mapping_page(mapping, i); - if (IS_ERR(page)) { - num = i; + if (IS_ERR(page)) goto release; - } sg_set_page(sg, page, PAGE_SIZE, 0); } - if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) { - num = sgt->nents; + if (dma_map_sgtable(attach->dev, sgt, dir, 0)) goto release; - } } else if (dobj->page) { /* Single contiguous page */ if (sg_alloc_table(sgt, 1, GFP_KERNEL)) @@ -418,7 +414,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0); - if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) + if (dma_map_sgtable(attach->dev, sgt, dir, 0)) goto free_table; } else if (dobj->linear) { /* Single contiguous physical region - no struct page */ @@ -432,8 +428,9 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, return sgt; release: - for_each_sg(sgt->sgl, sg, num, i) - put_page(sg_page(sg)); + for_each_sgtable_sg(sgt, sg, i) + if (sg_page(sg)) + put_page(sg_page(sg)); free_table: sg_free_table(sgt); free_sgt: @@ -449,11 +446,12 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, int i; if (!dobj->linear) - dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); + dma_unmap_sgtable(attach->dev, sgt, dir, 0); if (dobj->obj.filp) { struct scatterlist *sg; - for_each_sg(sgt->sgl, sg, sgt->nents, i) + + for_each_sgtable_sg(sgt, sg, i) put_page(sg_page(sg)); } diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index 07f0da4d9ba1..30e01101f59e 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -344,7 +344,7 @@ static int armada_overlay_set_property(struct drm_plane *plane, struct drm_plane_state *state, struct drm_property *property, uint64_t val) { - struct armada_private *priv = plane->dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(plane->dev); #define K2R(val) (((val) >> 0) & 0xff) #define K2G(val) (((val) >> 8) & 0xff) @@ -412,7 +412,7 @@ static int armada_overlay_get_property(struct drm_plane *plane, const struct drm_plane_state *state, struct drm_property *property, uint64_t *val) { - struct armada_private *priv = plane->dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(plane->dev); #define C2K(c,s) (((c) >> (s)) & 0xff) #define R2BGR(r,g,b,s) (C2K(r,s) << 0 | C2K(g,s) << 8 | C2K(b,s) << 16) @@ -505,7 +505,7 @@ static const struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = { static int armada_overlay_create_properties(struct drm_device *dev) { - struct armada_private *priv = dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(dev); if (priv->colorkey_prop) return 0; @@ -539,7 +539,7 @@ static int armada_overlay_create_properties(struct drm_device *dev) int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs) { - struct armada_private *priv = dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(dev); struct drm_mode_object *mobj; struct drm_plane *overlay; int ret; diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index 903f4f304647..2b424b2b85cc 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -63,15 +63,21 @@ static const struct drm_mode_config_funcs aspeed_gfx_mode_config_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -static void aspeed_gfx_setup_mode_config(struct drm_device *drm) +static int aspeed_gfx_setup_mode_config(struct drm_device *drm) { - drm_mode_config_init(drm); + int ret; + + ret = drmm_mode_config_init(drm); + if (ret) + return ret; drm->mode_config.min_width = 0; drm->mode_config.min_height = 0; drm->mode_config.max_width = 800; drm->mode_config.max_height = 600; drm->mode_config.funcs = &aspeed_gfx_mode_config_funcs; + + return ret; } static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data) @@ -144,7 +150,9 @@ static int aspeed_gfx_load(struct drm_device *drm) writel(0, priv->base + CRT_CTRL1); writel(0, priv->base + CRT_CTRL2); - aspeed_gfx_setup_mode_config(drm); + ret = aspeed_gfx_setup_mode_config(drm); + if (ret < 0) + return ret; ret = drm_vblank_init(drm, 1); if (ret < 0) { @@ -179,7 +187,6 @@ static int aspeed_gfx_load(struct drm_device *drm) static void aspeed_gfx_unload(struct drm_device *drm) { drm_kms_helper_poll_fini(drm); - drm_mode_config_cleanup(drm); } DEFINE_DRM_GEM_CMA_FOPS(fops); diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index c1af6b725933..467049ca8430 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -177,6 +177,8 @@ struct ast_private *ast_device_create(struct drm_driver *drv, #define AST_IO_MM_OFFSET (0x380) +#define AST_IO_VGAIR1_VREFRESH BIT(3) + #define __ast_read(x) \ static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \ u##x val = 0;\ diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 62fe682a7de6..834a156e3a75 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -514,6 +514,16 @@ static void ast_set_start_address_crt1(struct ast_private *ast, } +static void ast_wait_for_vretrace(struct ast_private *ast) +{ + unsigned long timeout = jiffies + HZ; + u8 vgair1; + + do { + vgair1 = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ); + } while (!(vgair1 & AST_IO_VGAIR1_VREFRESH) && time_before(jiffies, timeout)); +} + /* * Primary plane */ @@ -562,13 +572,24 @@ ast_primary_plane_helper_atomic_update(struct drm_plane *plane, struct drm_plane_state *state = plane->state; struct drm_gem_vram_object *gbo; s64 gpu_addr; + struct drm_framebuffer *fb = state->fb; + struct drm_framebuffer *old_fb = old_state->fb; - gbo = drm_gem_vram_of_gem(state->fb->obj[0]); + if (!old_fb || (fb->format != old_fb->format)) { + struct drm_crtc_state *crtc_state = state->crtc->state; + struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state); + struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info; + + ast_set_color_reg(ast, fb->format); + ast_set_vbios_color_reg(ast, fb->format, vbios_mode_info); + } + + gbo = drm_gem_vram_of_gem(fb->obj[0]); gpu_addr = drm_gem_vram_offset(gbo); if (drm_WARN_ON_ONCE(dev, gpu_addr < 0)) return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */ - ast_set_offset_reg(ast, state->fb); + ast_set_offset_reg(ast, fb); ast_set_start_address_crt1(ast, (u32)gpu_addr); ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00); @@ -733,6 +754,7 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { + struct drm_device *dev = crtc->dev; struct ast_crtc_state *ast_state; const struct drm_format_info *format; bool succ; @@ -743,8 +765,8 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc, ast_state = to_ast_crtc_state(state); format = ast_state->format; - if (!format) - return 0; + if (drm_WARN_ON_ONCE(dev, !format)) + return -EINVAL; /* BUG: We didn't set format in primary check(). */ succ = ast_get_vbios_mode_info(format, &state->mode, &state->adjusted_mode, @@ -755,39 +777,17 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc, return 0; } -static void ast_crtc_helper_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) -{ - struct ast_private *ast = to_ast_private(crtc->dev); - - ast_open_key(ast); -} - -static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) +static void +ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) { struct drm_device *dev = crtc->dev; struct ast_private *ast = to_ast_private(dev); - struct ast_crtc_state *ast_state; - const struct drm_format_info *format; - struct ast_vbios_mode_info *vbios_mode_info; - struct drm_display_mode *adjusted_mode; - - ast_state = to_ast_crtc_state(crtc->state); - - format = ast_state->format; - if (!format) - return; - - vbios_mode_info = &ast_state->vbios_mode_info; - - ast_set_color_reg(ast, format); - ast_set_vbios_color_reg(ast, format, vbios_mode_info); - - if (!crtc->state->mode_changed) - return; - - adjusted_mode = &crtc->state->adjusted_mode; + struct drm_crtc_state *crtc_state = crtc->state; + struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state); + struct ast_vbios_mode_info *vbios_mode_info = + &ast_crtc_state->vbios_mode_info; + struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; ast_set_vbios_mode_reg(ast, adjusted_mode, vbios_mode_info); ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06); @@ -796,12 +796,7 @@ static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, ast_set_dclk_reg(ast, adjusted_mode, vbios_mode_info); ast_set_crtthd_reg(ast); ast_set_sync_reg(ast, adjusted_mode, vbios_mode_info); -} -static void -ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) -{ ast_crtc_dpms(crtc, DRM_MODE_DPMS_ON); } @@ -809,13 +804,32 @@ static void ast_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { + struct drm_device *dev = crtc->dev; + struct ast_private *ast = to_ast_private(dev); + ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + + /* + * HW cursors require the underlying primary plane and CRTC to + * display a valid mode and image. This is not the case during + * full modeset operations. So we temporarily disable any active + * plane, including the HW cursor. Each plane's atomic_update() + * helper will re-enable it if necessary. + * + * We only do this during *full* modesets. It does not affect + * simple pageflips on the planes. + */ + drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false); + + /* + * Ensure that no scanout takes place before reprogramming mode + * and format registers. + */ + ast_wait_for_vretrace(ast); } static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = { .atomic_check = ast_crtc_helper_atomic_check, - .atomic_begin = ast_crtc_helper_atomic_begin, - .atomic_flush = ast_crtc_helper_atomic_flush, .atomic_enable = ast_crtc_helper_atomic_enable, .atomic_disable = ast_crtc_helper_atomic_disable, }; @@ -1054,6 +1068,11 @@ static int ast_connector_init(struct drm_device *dev) * Mode config */ +static const struct drm_mode_config_helper_funcs +ast_mode_config_helper_funcs = { + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, +}; + static const struct drm_mode_config_funcs ast_mode_config_funcs = { .fb_create = drm_gem_fb_create, .mode_valid = drm_vram_helper_mode_valid, @@ -1093,6 +1112,8 @@ int ast_mode_config_init(struct ast_private *ast) dev->mode_config.max_height = 1200; } + dev->mode_config.helper_private = &ast_mode_config_helper_funcs; + memset(&ast->primary_plane, 0, sizeof(ast->primary_plane)); ret = drm_universal_plane_init(dev, &ast->primary_plane, 0x01, &ast_primary_plane_funcs, diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 3e11af4e9f63..ef91646441b1 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -241,6 +241,8 @@ source "drivers/gpu/drm/bridge/analogix/Kconfig" source "drivers/gpu/drm/bridge/adv7511/Kconfig" +source "drivers/gpu/drm/bridge/cadence/Kconfig" + source "drivers/gpu/drm/bridge/synopsys/Kconfig" endmenu diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index c589a6a7cbe1..2b3aff104e46 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -25,4 +25,5 @@ obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o obj-y += analogix/ +obj-y += cadence/ obj-y += synopsys/ diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig new file mode 100644 index 000000000000..511d67b16d14 --- /dev/null +++ b/drivers/gpu/drm/bridge/cadence/Kconfig @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: GPL-2.0-only +config DRM_CDNS_MHDP8546 + tristate "Cadence DPI/DP bridge" + select DRM_KMS_HELPER + select DRM_PANEL_BRIDGE + depends on OF + help + Support Cadence DPI to DP bridge. This is an internal + bridge and is meant to be directly embedded in a SoC. + It takes a DPI stream as input and outputs it encoded + in DP format. + +if DRM_CDNS_MHDP8546 + +config DRM_CDNS_MHDP8546_J721E + depends on ARCH_K3_J721E_SOC || COMPILE_TEST + bool "J721E Cadence DPI/DP wrapper support" + default y + help + Support J721E Cadence DPI/DP wrapper. This is a wrapper + which adds support for J721E related platform ops. It + initializes the J721E Display Port and sets up the + clock and data muxes. +endif diff --git a/drivers/gpu/drm/bridge/cadence/Makefile b/drivers/gpu/drm/bridge/cadence/Makefile new file mode 100644 index 000000000000..8f647991b374 --- /dev/null +++ b/drivers/gpu/drm/bridge/cadence/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_DRM_CDNS_MHDP8546) += cdns-mhdp8546.o +cdns-mhdp8546-y := cdns-mhdp8546-core.o +cdns-mhdp8546-$(CONFIG_DRM_CDNS_MHDP8546_J721E) += cdns-mhdp8546-j721e.o diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c new file mode 100644 index 000000000000..d0c65610ebb5 --- /dev/null +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c @@ -0,0 +1,2532 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence MHDP8546 DP bridge driver. + * + * Copyright (C) 2020 Cadence Design Systems, Inc. + * + * Authors: Quentin Schulz <quentin.schulz@free-electrons.com> + * Swapnil Jakhade <sjakhade@cadence.com> + * Yuti Amonkar <yamonkar@cadence.com> + * Tomi Valkeinen <tomi.valkeinen@ti.com> + * Jyri Sarha <jsarha@ti.com> + * + * TODO: + * - Implement optimized mailbox communication using mailbox interrupts + * - Add support for power management + * - Add support for features like audio, MST and fast link training + * - Implement request_fw_cancel to handle HW_STATE + * - Fix asynchronous loading of firmware implementation + * - Add DRM helper function for cdns_mhdp_lower_link_rate + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/firmware.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/phy/phy.h> +#include <linux/phy/phy-dp.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/wait.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_atomic_state_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_connector.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_dp_helper.h> +#include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> +#include <drm/drm_probe_helper.h> + +#include <asm/unaligned.h> + +#include "cdns-mhdp8546-core.h" + +#include "cdns-mhdp8546-j721e.h" + +static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp) +{ + int ret, empty; + + WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex)); + + ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY, + empty, !empty, MAILBOX_RETRY_US, + MAILBOX_TIMEOUT_US); + if (ret < 0) + return ret; + + return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff; +} + +static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val) +{ + int ret, full; + + WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex)); + + ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL, + full, !full, MAILBOX_RETRY_US, + MAILBOX_TIMEOUT_US); + if (ret < 0) + return ret; + + writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA); + + return 0; +} + +static int cdns_mhdp_mailbox_recv_header(struct cdns_mhdp_device *mhdp, + u8 module_id, u8 opcode, + u16 req_size) +{ + u32 mbox_size, i; + u8 header[4]; + int ret; + + /* read the header of the message */ + for (i = 0; i < sizeof(header); i++) { + ret = cdns_mhdp_mailbox_read(mhdp); + if (ret < 0) + return ret; + + header[i] = ret; + } + + mbox_size = get_unaligned_be16(header + 2); + + if (opcode != header[0] || module_id != header[1] || + req_size != mbox_size) { + /* + * If the message in mailbox is not what we want, we need to + * clear the mailbox by reading its contents. + */ + for (i = 0; i < mbox_size; i++) + if (cdns_mhdp_mailbox_read(mhdp) < 0) + break; + + return -EINVAL; + } + + return 0; +} + +static int cdns_mhdp_mailbox_recv_data(struct cdns_mhdp_device *mhdp, + u8 *buff, u16 buff_size) +{ + u32 i; + int ret; + + for (i = 0; i < buff_size; i++) { + ret = cdns_mhdp_mailbox_read(mhdp); + if (ret < 0) + return ret; + + buff[i] = ret; + } + + return 0; +} + +static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id, + u8 opcode, u16 size, u8 *message) +{ + u8 header[4]; + int ret, i; + + header[0] = opcode; + header[1] = module_id; + put_unaligned_be16(size, header + 2); + + for (i = 0; i < sizeof(header); i++) { + ret = cdns_mhdp_mailbox_write(mhdp, header[i]); + if (ret) + return ret; + } + + for (i = 0; i < size; i++) { + ret = cdns_mhdp_mailbox_write(mhdp, message[i]); + if (ret) + return ret; + } + + return 0; +} + +static +int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value) +{ + u8 msg[4], resp[8]; + int ret; + + put_unaligned_be32(addr, msg); + + mutex_lock(&mhdp->mbox_mutex); + + ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL, + GENERAL_REGISTER_READ, + sizeof(msg), msg); + if (ret) + goto out; + + ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_GENERAL, + GENERAL_REGISTER_READ, + sizeof(resp)); + if (ret) + goto out; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, resp, sizeof(resp)); + if (ret) + goto out; + + /* Returned address value should be the same as requested */ + if (memcmp(msg, resp, sizeof(msg))) { + ret = -EINVAL; + goto out; + } + + *value = get_unaligned_be32(resp + 4); + +out: + mutex_unlock(&mhdp->mbox_mutex); + if (ret) { + dev_err(mhdp->dev, "Failed to read register\n"); + *value = 0; + } + + return ret; +} + +static +int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val) +{ + u8 msg[6]; + int ret; + + put_unaligned_be16(addr, msg); + put_unaligned_be32(val, msg + 2); + + mutex_lock(&mhdp->mbox_mutex); + + ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, + DPTX_WRITE_REGISTER, sizeof(msg), msg); + + mutex_unlock(&mhdp->mbox_mutex); + + return ret; +} + +static +int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr, + u8 start_bit, u8 bits_no, u32 val) +{ + u8 field[8]; + int ret; + + put_unaligned_be16(addr, field); + field[2] = start_bit; + field[3] = bits_no; + put_unaligned_be32(val, field + 4); + + mutex_lock(&mhdp->mbox_mutex); + + ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, + DPTX_WRITE_FIELD, sizeof(field), field); + + mutex_unlock(&mhdp->mbox_mutex); + + return ret; +} + +static +int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp, + u32 addr, u8 *data, u16 len) +{ + u8 msg[5], reg[5]; + int ret; + + put_unaligned_be16(len, msg); + put_unaligned_be24(addr, msg + 2); + + mutex_lock(&mhdp->mbox_mutex); + + ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, + DPTX_READ_DPCD, sizeof(msg), msg); + if (ret) + goto out; + + ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, + DPTX_READ_DPCD, + sizeof(reg) + len); + if (ret) + goto out; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg)); + if (ret) + goto out; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, data, len); + +out: + mutex_unlock(&mhdp->mbox_mutex); + + return ret; +} + +static +int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value) +{ + u8 msg[6], reg[5]; + int ret; + + put_unaligned_be16(1, msg); + put_unaligned_be24(addr, msg + 2); + msg[5] = value; + + mutex_lock(&mhdp->mbox_mutex); + + ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, + DPTX_WRITE_DPCD, sizeof(msg), msg); + if (ret) + goto out; + + ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, + DPTX_WRITE_DPCD, sizeof(reg)); + if (ret) + goto out; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg)); + if (ret) + goto out; + + if (addr != get_unaligned_be24(reg + 2)) + ret = -EINVAL; + +out: + mutex_unlock(&mhdp->mbox_mutex); + + if (ret) + dev_err(mhdp->dev, "dpcd write failed: %d\n", ret); + return ret; +} + +static +int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable) +{ + u8 msg[5]; + int ret, i; + + msg[0] = GENERAL_MAIN_CONTROL; + msg[1] = MB_MODULE_ID_GENERAL; + msg[2] = 0; + msg[3] = 1; + msg[4] = enable ? FW_ACTIVE : FW_STANDBY; + + mutex_lock(&mhdp->mbox_mutex); + + for (i = 0; i < sizeof(msg); i++) { + ret = cdns_mhdp_mailbox_write(mhdp, msg[i]); + if (ret) + goto out; + } + + /* read the firmware state */ + ret = cdns_mhdp_mailbox_recv_data(mhdp, msg, sizeof(msg)); + if (ret) + goto out; + + ret = 0; + +out: + mutex_unlock(&mhdp->mbox_mutex); + + if (ret < 0) + dev_err(mhdp->dev, "set firmware active failed\n"); + return ret; +} + +static +int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp) +{ + u8 status; + int ret; + + mutex_lock(&mhdp->mbox_mutex); + + ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, + DPTX_HPD_STATE, 0, NULL); + if (ret) + goto err_get_hpd; + + ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, + DPTX_HPD_STATE, + sizeof(status)); + if (ret) + goto err_get_hpd; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, &status, sizeof(status)); + if (ret) + goto err_get_hpd; + + mutex_unlock(&mhdp->mbox_mutex); + + dev_dbg(mhdp->dev, "%s: HPD %splugged\n", __func__, + status ? "" : "un"); + + return status; + +err_get_hpd: + mutex_unlock(&mhdp->mbox_mutex); + + return ret; +} + +static +int cdns_mhdp_get_edid_block(void *data, u8 *edid, + unsigned int block, size_t length) +{ + struct cdns_mhdp_device *mhdp = data; + u8 msg[2], reg[2], i; + int ret; + + mutex_lock(&mhdp->mbox_mutex); + + for (i = 0; i < 4; i++) { + msg[0] = block / 2; + msg[1] = block % 2; + + ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, + DPTX_GET_EDID, sizeof(msg), msg); + if (ret) + continue; + + ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, + DPTX_GET_EDID, + sizeof(reg) + length); + if (ret) + continue; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg)); + if (ret) + continue; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, edid, length); + if (ret) + continue; + + if (reg[0] == length && reg[1] == block / 2) + break; + } + + mutex_unlock(&mhdp->mbox_mutex); + + if (ret) + dev_err(mhdp->dev, "get block[%d] edid failed: %d\n", + block, ret); + + return ret; +} + +static +int cdns_mhdp_read_hpd_event(struct cdns_mhdp_device *mhdp) +{ + u8 event = 0; + int ret; + + mutex_lock(&mhdp->mbox_mutex); + + ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, + DPTX_READ_EVENT, 0, NULL); + if (ret) + goto out; + + ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, + DPTX_READ_EVENT, sizeof(event)); + if (ret < 0) + goto out; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, &event, sizeof(event)); +out: + mutex_unlock(&mhdp->mbox_mutex); + + if (ret < 0) + return ret; + + dev_dbg(mhdp->dev, "%s: %s%s%s%s\n", __func__, + (event & DPTX_READ_EVENT_HPD_TO_HIGH) ? "TO_HIGH " : "", + (event & DPTX_READ_EVENT_HPD_TO_LOW) ? "TO_LOW " : "", + (event & DPTX_READ_EVENT_HPD_PULSE) ? "PULSE " : "", + (event & DPTX_READ_EVENT_HPD_STATE) ? "HPD_STATE " : ""); + + return event; +} + +static +int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp, unsigned int nlanes, + unsigned int udelay, const u8 *lanes_data, + u8 link_status[DP_LINK_STATUS_SIZE]) +{ + u8 payload[7]; + u8 hdr[5]; /* For DPCD read response header */ + u32 addr; + int ret; + + if (nlanes != 4 && nlanes != 2 && nlanes != 1) { + dev_err(mhdp->dev, "invalid number of lanes: %u\n", nlanes); + ret = -EINVAL; + goto out; + } + + payload[0] = nlanes; + put_unaligned_be16(udelay, payload + 1); + memcpy(payload + 3, lanes_data, nlanes); + + mutex_lock(&mhdp->mbox_mutex); + + ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, + DPTX_ADJUST_LT, + sizeof(payload), payload); + if (ret) + goto out; + + /* Yes, read the DPCD read command response */ + ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, + DPTX_READ_DPCD, + sizeof(hdr) + DP_LINK_STATUS_SIZE); + if (ret) + goto out; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, hdr, sizeof(hdr)); + if (ret) + goto out; + + addr = get_unaligned_be24(hdr + 2); + if (addr != DP_LANE0_1_STATUS) + goto out; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, link_status, + DP_LINK_STATUS_SIZE); + +out: + mutex_unlock(&mhdp->mbox_mutex); + + if (ret) + dev_err(mhdp->dev, "Failed to adjust Link Training.\n"); + + return ret; +} + +/** + * cdns_mhdp_link_power_up() - power up a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +static +int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link) +{ + u8 value; + int err; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D0; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + /* + * According to the DP 1.1 specification, a "Sink Device must exit the + * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink + * Control Field" (register 0x600). + */ + usleep_range(1000, 2000); + + return 0; +} + +/** + * cdns_mhdp_link_power_down() - power down a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +static +int cdns_mhdp_link_power_down(struct drm_dp_aux *aux, + struct cdns_mhdp_link *link) +{ + u8 value; + int err; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D3; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + return 0; +} + +/** + * cdns_mhdp_link_configure() - configure a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +static +int cdns_mhdp_link_configure(struct drm_dp_aux *aux, + struct cdns_mhdp_link *link) +{ + u8 values[2]; + int err; + + values[0] = drm_dp_link_rate_to_bw_code(link->rate); + values[1] = link->num_lanes; + + if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); + if (err < 0) + return err; + + return 0; +} + +static unsigned int cdns_mhdp_max_link_rate(struct cdns_mhdp_device *mhdp) +{ + return min(mhdp->host.link_rate, mhdp->sink.link_rate); +} + +static u8 cdns_mhdp_max_num_lanes(struct cdns_mhdp_device *mhdp) +{ + return min(mhdp->sink.lanes_cnt, mhdp->host.lanes_cnt); +} + +static u8 cdns_mhdp_eq_training_pattern_supported(struct cdns_mhdp_device *mhdp) +{ + return fls(mhdp->host.pattern_supp & mhdp->sink.pattern_supp); +} + +static bool cdns_mhdp_get_ssc_supported(struct cdns_mhdp_device *mhdp) +{ + /* Check if SSC is supported by both sides */ + return mhdp->host.ssc && mhdp->sink.ssc; +} + +static enum drm_connector_status cdns_mhdp_detect(struct cdns_mhdp_device *mhdp) +{ + dev_dbg(mhdp->dev, "%s: %d\n", __func__, mhdp->plugged); + + if (mhdp->plugged) + return connector_status_connected; + else + return connector_status_disconnected; +} + +static int cdns_mhdp_check_fw_version(struct cdns_mhdp_device *mhdp) +{ + u32 major_num, minor_num, revision; + u32 fw_ver, lib_ver; + + fw_ver = (readl(mhdp->regs + CDNS_VER_H) << 8) + | readl(mhdp->regs + CDNS_VER_L); + + lib_ver = (readl(mhdp->regs + CDNS_LIB_H_ADDR) << 8) + | readl(mhdp->regs + CDNS_LIB_L_ADDR); + + if (lib_ver < 33984) { + /* + * Older FW versions with major number 1, used to store FW + * version information by storing repository revision number + * in registers. This is for identifying these FW versions. + */ + major_num = 1; + minor_num = 2; + if (fw_ver == 26098) { + revision = 15; + } else if (lib_ver == 0 && fw_ver == 0) { + revision = 17; + } else { + dev_err(mhdp->dev, "Unsupported FW version: fw_ver = %u, lib_ver = %u\n", + fw_ver, lib_ver); + return -ENODEV; + } + } else { + /* To identify newer FW versions with major number 2 onwards. */ + major_num = fw_ver / 10000; + minor_num = (fw_ver / 100) % 100; + revision = (fw_ver % 10000) % 100; + } + + dev_dbg(mhdp->dev, "FW version: v%u.%u.%u\n", major_num, minor_num, + revision); + return 0; +} + +static int cdns_mhdp_fw_activate(const struct firmware *fw, + struct cdns_mhdp_device *mhdp) +{ + unsigned int reg; + int ret; + + /* Release uCPU reset and stall it. */ + writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL); + + memcpy_toio(mhdp->regs + CDNS_MHDP_IMEM, fw->data, fw->size); + + /* Leave debug mode, release stall */ + writel(0, mhdp->regs + CDNS_APB_CTRL); + + /* + * Wait for the KEEP_ALIVE "message" on the first 8 bits. + * Updated each sched "tick" (~2ms) + */ + ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg, + reg & CDNS_KEEP_ALIVE_MASK, 500, + CDNS_KEEP_ALIVE_TIMEOUT); + if (ret) { + dev_err(mhdp->dev, + "device didn't give any life sign: reg %d\n", reg); + return ret; + } + + ret = cdns_mhdp_check_fw_version(mhdp); + if (ret) + return ret; + + /* Init events to 0 as it's not cleared by FW at boot but on read */ + readl(mhdp->regs + CDNS_SW_EVENT0); + readl(mhdp->regs + CDNS_SW_EVENT1); + readl(mhdp->regs + CDNS_SW_EVENT2); + readl(mhdp->regs + CDNS_SW_EVENT3); + + /* Activate uCPU */ + ret = cdns_mhdp_set_firmware_active(mhdp, true); + if (ret) + return ret; + + spin_lock(&mhdp->start_lock); + + mhdp->hw_state = MHDP_HW_READY; + + /* + * Here we must keep the lock while enabling the interrupts + * since it would otherwise be possible that interrupt enable + * code is executed after the bridge is detached. The similar + * situation is not possible in attach()/detach() callbacks + * since the hw_state changes from MHDP_HW_READY to + * MHDP_HW_STOPPED happens only due to driver removal when + * bridge should already be detached. + */ + if (mhdp->bridge_attached) + writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT, + mhdp->regs + CDNS_APB_INT_MASK); + + spin_unlock(&mhdp->start_lock); + + wake_up(&mhdp->fw_load_wq); + dev_dbg(mhdp->dev, "DP FW activated\n"); + + return 0; +} + +static void cdns_mhdp_fw_cb(const struct firmware *fw, void *context) +{ + struct cdns_mhdp_device *mhdp = context; + bool bridge_attached; + int ret; + + dev_dbg(mhdp->dev, "firmware callback\n"); + + if (!fw || !fw->data) { + dev_err(mhdp->dev, "%s: No firmware.\n", __func__); + return; + } + + ret = cdns_mhdp_fw_activate(fw, mhdp); + + release_firmware(fw); + + if (ret) + return; + + /* + * XXX how to make sure the bridge is still attached when + * calling drm_kms_helper_hotplug_event() after releasing + * the lock? We should not hold the spin lock when + * calling drm_kms_helper_hotplug_event() since it may + * cause a dead lock. FB-dev console calls detect from the + * same thread just down the call stack started here. + */ + spin_lock(&mhdp->start_lock); + bridge_attached = mhdp->bridge_attached; + spin_unlock(&mhdp->start_lock); + if (bridge_attached) { + if (mhdp->connector.dev) + drm_kms_helper_hotplug_event(mhdp->bridge.dev); + else + drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp)); + } +} + +static int cdns_mhdp_load_firmware(struct cdns_mhdp_device *mhdp) +{ + int ret; + + ret = request_firmware_nowait(THIS_MODULE, true, FW_NAME, mhdp->dev, + GFP_KERNEL, mhdp, cdns_mhdp_fw_cb); + if (ret) { + dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n", + FW_NAME, ret); + return ret; + } + + return 0; +} + +static ssize_t cdns_mhdp_transfer(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg) +{ + struct cdns_mhdp_device *mhdp = dev_get_drvdata(aux->dev); + int ret; + + if (msg->request != DP_AUX_NATIVE_WRITE && + msg->request != DP_AUX_NATIVE_READ) + return -EOPNOTSUPP; + + if (msg->request == DP_AUX_NATIVE_WRITE) { + const u8 *buf = msg->buffer; + unsigned int i; + + for (i = 0; i < msg->size; ++i) { + ret = cdns_mhdp_dpcd_write(mhdp, + msg->address + i, buf[i]); + if (!ret) + continue; + + dev_err(mhdp->dev, + "Failed to write DPCD addr %u\n", + msg->address + i); + + return ret; + } + } else { + ret = cdns_mhdp_dpcd_read(mhdp, msg->address, + msg->buffer, msg->size); + if (ret) { + dev_err(mhdp->dev, + "Failed to read DPCD addr %u\n", + msg->address); + + return ret; + } + } + + return msg->size; +} + +static int cdns_mhdp_link_training_init(struct cdns_mhdp_device *mhdp) +{ + union phy_configure_opts phy_cfg; + u32 reg32; + int ret; + + drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_DISABLE); + + /* Reset PHY configuration */ + reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1); + if (!mhdp->host.scrambler) + reg32 |= CDNS_PHY_SCRAMBLER_BYPASS; + + cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32); + + cdns_mhdp_reg_write(mhdp, CDNS_DP_ENHNCD, + mhdp->sink.enhanced & mhdp->host.enhanced); + + cdns_mhdp_reg_write(mhdp, CDNS_DP_LANE_EN, + CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes)); + + cdns_mhdp_link_configure(&mhdp->aux, &mhdp->link); + phy_cfg.dp.link_rate = mhdp->link.rate / 100; + phy_cfg.dp.lanes = mhdp->link.num_lanes; + + memset(phy_cfg.dp.voltage, 0, sizeof(phy_cfg.dp.voltage)); + memset(phy_cfg.dp.pre, 0, sizeof(phy_cfg.dp.pre)); + + phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp); + phy_cfg.dp.set_lanes = true; + phy_cfg.dp.set_rate = true; + phy_cfg.dp.set_voltages = true; + ret = phy_configure(mhdp->phy, &phy_cfg); + if (ret) { + dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n", + __func__, ret); + return ret; + } + + cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, + CDNS_PHY_COMMON_CONFIG | + CDNS_PHY_TRAINING_EN | + CDNS_PHY_TRAINING_TYPE(1) | + CDNS_PHY_SCRAMBLER_BYPASS); + + drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE); + + return 0; +} + +static void cdns_mhdp_get_adjust_train(struct cdns_mhdp_device *mhdp, + u8 link_status[DP_LINK_STATUS_SIZE], + u8 lanes_data[CDNS_DP_MAX_NUM_LANES], + union phy_configure_opts *phy_cfg) +{ + u8 adjust, max_pre_emph, max_volt_swing; + u8 set_volt, set_pre; + unsigned int i; + + max_pre_emph = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis) + << DP_TRAIN_PRE_EMPHASIS_SHIFT; + max_volt_swing = CDNS_VOLT_SWING(mhdp->host.volt_swing); + + for (i = 0; i < mhdp->link.num_lanes; i++) { + /* Check if Voltage swing and pre-emphasis are within limits */ + adjust = drm_dp_get_adjust_request_voltage(link_status, i); + set_volt = min(adjust, max_volt_swing); + + adjust = drm_dp_get_adjust_request_pre_emphasis(link_status, i); + set_pre = min(adjust, max_pre_emph) + >> DP_TRAIN_PRE_EMPHASIS_SHIFT; + + /* + * Voltage swing level and pre-emphasis level combination is + * not allowed: leaving pre-emphasis as-is, and adjusting + * voltage swing. + */ + if (set_volt + set_pre > 3) + set_volt = 3 - set_pre; + + phy_cfg->dp.voltage[i] = set_volt; + lanes_data[i] = set_volt; + + if (set_volt == max_volt_swing) + lanes_data[i] |= DP_TRAIN_MAX_SWING_REACHED; + + phy_cfg->dp.pre[i] = set_pre; + lanes_data[i] |= (set_pre << DP_TRAIN_PRE_EMPHASIS_SHIFT); + + if (set_pre == (max_pre_emph >> DP_TRAIN_PRE_EMPHASIS_SHIFT)) + lanes_data[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + } +} + +static +void cdns_mhdp_set_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], + unsigned int lane, u8 volt) +{ + unsigned int s = ((lane & 1) ? + DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : + DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); + unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1); + + link_status[idx] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK << s); + link_status[idx] |= volt << s; +} + +static +void cdns_mhdp_set_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], + unsigned int lane, u8 pre_emphasis) +{ + unsigned int s = ((lane & 1) ? + DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : + DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); + unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1); + + link_status[idx] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK << s); + link_status[idx] |= pre_emphasis << s; +} + +static void cdns_mhdp_adjust_requested_eq(struct cdns_mhdp_device *mhdp, + u8 link_status[DP_LINK_STATUS_SIZE]) +{ + u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis); + u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing); + unsigned int i; + u8 volt, pre; + + for (i = 0; i < mhdp->link.num_lanes; i++) { + volt = drm_dp_get_adjust_request_voltage(link_status, i); + pre = drm_dp_get_adjust_request_pre_emphasis(link_status, i); + if (volt + pre > 3) + cdns_mhdp_set_adjust_request_voltage(link_status, i, + 3 - pre); + if (mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING) + cdns_mhdp_set_adjust_request_voltage(link_status, i, + max_volt); + if (mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS) + cdns_mhdp_set_adjust_request_pre_emphasis(link_status, + i, max_pre); + } +} + +static void cdns_mhdp_print_lt_status(const char *prefix, + struct cdns_mhdp_device *mhdp, + union phy_configure_opts *phy_cfg) +{ + char vs[8] = "0/0/0/0"; + char pe[8] = "0/0/0/0"; + unsigned int i; + + for (i = 0; i < mhdp->link.num_lanes; i++) { + vs[i * 2] = '0' + phy_cfg->dp.voltage[i]; + pe[i * 2] = '0' + phy_cfg->dp.pre[i]; + } + + vs[i * 2 - 1] = '\0'; + pe[i * 2 - 1] = '\0'; + + dev_dbg(mhdp->dev, "%s, %u lanes, %u Mbps, vs %s, pe %s\n", + prefix, + mhdp->link.num_lanes, mhdp->link.rate / 100, + vs, pe); +} + +static bool cdns_mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp, + u8 eq_tps, + unsigned int training_interval) +{ + u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0; + u8 link_status[DP_LINK_STATUS_SIZE]; + union phy_configure_opts phy_cfg; + u32 reg32; + int ret; + bool r; + + dev_dbg(mhdp->dev, "Starting EQ phase\n"); + + /* Enable link training TPS[eq_tps] in PHY */ + reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_EN | + CDNS_PHY_TRAINING_TYPE(eq_tps); + if (eq_tps != 4) + reg32 |= CDNS_PHY_SCRAMBLER_BYPASS; + cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32); + + drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, + (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE : + CDNS_DP_TRAINING_PATTERN_4); + + drm_dp_dpcd_read_link_status(&mhdp->aux, link_status); + + do { + cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data, + &phy_cfg); + phy_cfg.dp.lanes = mhdp->link.num_lanes; + phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp); + phy_cfg.dp.set_lanes = false; + phy_cfg.dp.set_rate = false; + phy_cfg.dp.set_voltages = true; + ret = phy_configure(mhdp->phy, &phy_cfg); + if (ret) { + dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n", + __func__, ret); + goto err; + } + + cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, + training_interval, lanes_data, link_status); + + r = drm_dp_clock_recovery_ok(link_status, mhdp->link.num_lanes); + if (!r) + goto err; + + if (drm_dp_channel_eq_ok(link_status, mhdp->link.num_lanes)) { + cdns_mhdp_print_lt_status("EQ phase ok", mhdp, + &phy_cfg); + return true; + } + + fail_counter_short++; + + cdns_mhdp_adjust_requested_eq(mhdp, link_status); + } while (fail_counter_short < 5); + +err: + cdns_mhdp_print_lt_status("EQ phase failed", mhdp, &phy_cfg); + + return false; +} + +static void cdns_mhdp_adjust_requested_cr(struct cdns_mhdp_device *mhdp, + u8 link_status[DP_LINK_STATUS_SIZE], + u8 *req_volt, u8 *req_pre) +{ + const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing); + const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis); + unsigned int i; + + for (i = 0; i < mhdp->link.num_lanes; i++) { + u8 val; + + val = mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING ? + max_volt : req_volt[i]; + cdns_mhdp_set_adjust_request_voltage(link_status, i, val); + + val = mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS ? + max_pre : req_pre[i]; + cdns_mhdp_set_adjust_request_pre_emphasis(link_status, i, val); + } +} + +static +void cdns_mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done, + bool *same_before_adjust, bool *max_swing_reached, + u8 before_cr[CDNS_DP_MAX_NUM_LANES], + u8 after_cr[DP_LINK_STATUS_SIZE], u8 *req_volt, + u8 *req_pre) +{ + const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing); + const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis); + bool same_pre, same_volt; + unsigned int i; + u8 adjust; + + *same_before_adjust = false; + *max_swing_reached = false; + *cr_done = drm_dp_clock_recovery_ok(after_cr, mhdp->link.num_lanes); + + for (i = 0; i < mhdp->link.num_lanes; i++) { + adjust = drm_dp_get_adjust_request_voltage(after_cr, i); + req_volt[i] = min(adjust, max_volt); + + adjust = drm_dp_get_adjust_request_pre_emphasis(after_cr, i) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT; + req_pre[i] = min(adjust, max_pre); + + same_pre = (before_cr[i] & DP_TRAIN_PRE_EMPHASIS_MASK) == + req_pre[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT; + same_volt = (before_cr[i] & DP_TRAIN_VOLTAGE_SWING_MASK) == + req_volt[i]; + if (same_pre && same_volt) + *same_before_adjust = true; + + /* 3.1.5.2 in DP Standard v1.4. Table 3-1 */ + if (!*cr_done && req_volt[i] + req_pre[i] >= 3) { + *max_swing_reached = true; + return; + } + } +} + +static bool cdns_mhdp_link_training_cr(struct cdns_mhdp_device *mhdp) +{ + u8 lanes_data[CDNS_DP_MAX_NUM_LANES], + fail_counter_short = 0, fail_counter_cr_long = 0; + u8 link_status[DP_LINK_STATUS_SIZE]; + bool cr_done; + union phy_configure_opts phy_cfg; + int ret; + + dev_dbg(mhdp->dev, "Starting CR phase\n"); + + ret = cdns_mhdp_link_training_init(mhdp); + if (ret) + goto err; + + drm_dp_dpcd_read_link_status(&mhdp->aux, link_status); + + do { + u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {}; + u8 requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {}; + bool same_before_adjust, max_swing_reached; + + cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data, + &phy_cfg); + phy_cfg.dp.lanes = mhdp->link.num_lanes; + phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp); + phy_cfg.dp.set_lanes = false; + phy_cfg.dp.set_rate = false; + phy_cfg.dp.set_voltages = true; + ret = phy_configure(mhdp->phy, &phy_cfg); + if (ret) { + dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n", + __func__, ret); + goto err; + } + + cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100, + lanes_data, link_status); + + cdns_mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust, + &max_swing_reached, lanes_data, + link_status, + requested_adjust_volt_swing, + requested_adjust_pre_emphasis); + + if (max_swing_reached) { + dev_err(mhdp->dev, "CR: max swing reached\n"); + goto err; + } + + if (cr_done) { + cdns_mhdp_print_lt_status("CR phase ok", mhdp, + &phy_cfg); + return true; + } + + /* Not all CR_DONE bits set */ + fail_counter_cr_long++; + + if (same_before_adjust) { + fail_counter_short++; + continue; + } + + fail_counter_short = 0; + /* + * Voltage swing/pre-emphasis adjust requested + * during CR phase + */ + cdns_mhdp_adjust_requested_cr(mhdp, link_status, + requested_adjust_volt_swing, + requested_adjust_pre_emphasis); + } while (fail_counter_short < 5 && fail_counter_cr_long < 10); + +err: + cdns_mhdp_print_lt_status("CR phase failed", mhdp, &phy_cfg); + + return false; +} + +static void cdns_mhdp_lower_link_rate(struct cdns_mhdp_link *link) +{ + switch (drm_dp_link_rate_to_bw_code(link->rate)) { + case DP_LINK_BW_2_7: + link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62); + break; + case DP_LINK_BW_5_4: + link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7); + break; + case DP_LINK_BW_8_1: + link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4); + break; + } +} + +static int cdns_mhdp_link_training(struct cdns_mhdp_device *mhdp, + unsigned int training_interval) +{ + u32 reg32; + const u8 eq_tps = cdns_mhdp_eq_training_pattern_supported(mhdp); + int ret; + + while (1) { + if (!cdns_mhdp_link_training_cr(mhdp)) { + if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) != + DP_LINK_BW_1_62) { + dev_dbg(mhdp->dev, + "Reducing link rate during CR phase\n"); + cdns_mhdp_lower_link_rate(&mhdp->link); + + continue; + } else if (mhdp->link.num_lanes > 1) { + dev_dbg(mhdp->dev, + "Reducing lanes number during CR phase\n"); + mhdp->link.num_lanes >>= 1; + mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp); + + continue; + } + + dev_err(mhdp->dev, + "Link training failed during CR phase\n"); + goto err; + } + + if (cdns_mhdp_link_training_channel_eq(mhdp, eq_tps, + training_interval)) + break; + + if (mhdp->link.num_lanes > 1) { + dev_dbg(mhdp->dev, + "Reducing lanes number during EQ phase\n"); + mhdp->link.num_lanes >>= 1; + + continue; + } else if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) != + DP_LINK_BW_1_62) { + dev_dbg(mhdp->dev, + "Reducing link rate during EQ phase\n"); + cdns_mhdp_lower_link_rate(&mhdp->link); + mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp); + + continue; + } + + dev_err(mhdp->dev, "Link training failed during EQ phase\n"); + goto err; + } + + dev_dbg(mhdp->dev, "Link training ok. Lanes: %u, Rate %u Mbps\n", + mhdp->link.num_lanes, mhdp->link.rate / 100); + + drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, + mhdp->host.scrambler ? 0 : + DP_LINK_SCRAMBLING_DISABLE); + + ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, ®32); + if (ret < 0) { + dev_err(mhdp->dev, + "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n", + ret); + return ret; + } + reg32 &= ~GENMASK(1, 0); + reg32 |= CDNS_DP_NUM_LANES(mhdp->link.num_lanes); + reg32 |= CDNS_DP_WR_FAILING_EDGE_VSYNC; + reg32 |= CDNS_DP_FRAMER_EN; + cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, reg32); + + /* Reset PHY config */ + reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1); + if (!mhdp->host.scrambler) + reg32 |= CDNS_PHY_SCRAMBLER_BYPASS; + cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32); + + return 0; +err: + /* Reset PHY config */ + reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1); + if (!mhdp->host.scrambler) + reg32 |= CDNS_PHY_SCRAMBLER_BYPASS; + cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32); + + drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_DISABLE); + + return -EIO; +} + +static u32 cdns_mhdp_get_training_interval_us(struct cdns_mhdp_device *mhdp, + u32 interval) +{ + if (interval == 0) + return 400; + if (interval < 5) + return 4000 << (interval - 1); + dev_err(mhdp->dev, + "wrong training interval returned by DPCD: %d\n", interval); + return 0; +} + +static void cdns_mhdp_fill_host_caps(struct cdns_mhdp_device *mhdp) +{ + unsigned int link_rate; + + /* Get source capabilities based on PHY attributes */ + + mhdp->host.lanes_cnt = mhdp->phy->attrs.bus_width; + if (!mhdp->host.lanes_cnt) + mhdp->host.lanes_cnt = 4; + + link_rate = mhdp->phy->attrs.max_link_rate; + if (!link_rate) + link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1); + else + /* PHY uses Mb/s, DRM uses tens of kb/s. */ + link_rate *= 100; + + mhdp->host.link_rate = link_rate; + mhdp->host.volt_swing = CDNS_VOLT_SWING(3); + mhdp->host.pre_emphasis = CDNS_PRE_EMPHASIS(3); + mhdp->host.pattern_supp = CDNS_SUPPORT_TPS(1) | + CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) | + CDNS_SUPPORT_TPS(4); + mhdp->host.lane_mapping = CDNS_LANE_MAPPING_NORMAL; + mhdp->host.fast_link = false; + mhdp->host.enhanced = true; + mhdp->host.scrambler = true; + mhdp->host.ssc = false; +} + +static void cdns_mhdp_fill_sink_caps(struct cdns_mhdp_device *mhdp, + u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + mhdp->sink.link_rate = mhdp->link.rate; + mhdp->sink.lanes_cnt = mhdp->link.num_lanes; + mhdp->sink.enhanced = !!(mhdp->link.capabilities & + DP_LINK_CAP_ENHANCED_FRAMING); + + /* Set SSC support */ + mhdp->sink.ssc = !!(dpcd[DP_MAX_DOWNSPREAD] & + DP_MAX_DOWNSPREAD_0_5); + + /* Set TPS support */ + mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2); + if (drm_dp_tps3_supported(dpcd)) + mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3); + if (drm_dp_tps4_supported(dpcd)) + mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4); + + /* Set fast link support */ + mhdp->sink.fast_link = !!(dpcd[DP_MAX_DOWNSPREAD] & + DP_NO_AUX_HANDSHAKE_LINK_TRAINING); +} + +static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp) +{ + u8 dpcd[DP_RECEIVER_CAP_SIZE], amp[2]; + u32 resp, interval, interval_us; + u8 ext_cap_chk = 0; + unsigned int addr; + int err; + + WARN_ON(!mutex_is_locked(&mhdp->link_mutex)); + + drm_dp_dpcd_readb(&mhdp->aux, DP_TRAINING_AUX_RD_INTERVAL, + &ext_cap_chk); + + if (ext_cap_chk & DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT) + addr = DP_DP13_DPCD_REV; + else + addr = DP_DPCD_REV; + + err = drm_dp_dpcd_read(&mhdp->aux, addr, dpcd, DP_RECEIVER_CAP_SIZE); + if (err < 0) { + dev_err(mhdp->dev, "Failed to read receiver capabilities\n"); + return err; + } + + mhdp->link.revision = dpcd[0]; + mhdp->link.rate = drm_dp_bw_code_to_link_rate(dpcd[1]); + mhdp->link.num_lanes = dpcd[2] & DP_MAX_LANE_COUNT_MASK; + + if (dpcd[2] & DP_ENHANCED_FRAME_CAP) + mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; + + dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n"); + cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link); + + cdns_mhdp_fill_sink_caps(mhdp, dpcd); + + mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp); + mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp); + + /* Disable framer for link training */ + err = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp); + if (err < 0) { + dev_err(mhdp->dev, + "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n", + err); + return err; + } + + resp &= ~CDNS_DP_FRAMER_EN; + cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp); + + /* Spread AMP if required, enable 8b/10b coding */ + amp[0] = cdns_mhdp_get_ssc_supported(mhdp) ? DP_SPREAD_AMP_0_5 : 0; + amp[1] = DP_SET_ANSI_8B10B; + drm_dp_dpcd_write(&mhdp->aux, DP_DOWNSPREAD_CTRL, amp, 2); + + if (mhdp->host.fast_link & mhdp->sink.fast_link) { + dev_err(mhdp->dev, "fastlink not supported\n"); + return -EOPNOTSUPP; + } + + interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & DP_TRAINING_AUX_RD_MASK; + interval_us = cdns_mhdp_get_training_interval_us(mhdp, interval); + if (!interval_us || + cdns_mhdp_link_training(mhdp, interval_us)) { + dev_err(mhdp->dev, "Link training failed. Exiting.\n"); + return -EIO; + } + + mhdp->link_up = true; + + return 0; +} + +static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp) +{ + WARN_ON(!mutex_is_locked(&mhdp->link_mutex)); + + if (mhdp->plugged) + cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link); + + mhdp->link_up = false; +} + +static struct edid *cdns_mhdp_get_edid(struct cdns_mhdp_device *mhdp, + struct drm_connector *connector) +{ + if (!mhdp->plugged) + return NULL; + + return drm_do_get_edid(connector, cdns_mhdp_get_edid_block, mhdp); +} + +static int cdns_mhdp_get_modes(struct drm_connector *connector) +{ + struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector); + struct edid *edid; + int num_modes; + + if (!mhdp->plugged) + return 0; + + edid = cdns_mhdp_get_edid(mhdp, connector); + if (!edid) { + dev_err(mhdp->dev, "Failed to read EDID\n"); + return 0; + } + + drm_connector_update_edid_property(connector, edid); + num_modes = drm_add_edid_modes(connector, edid); + kfree(edid); + + /* + * HACK: Warn about unsupported display formats until we deal + * with them correctly. + */ + if (connector->display_info.color_formats && + !(connector->display_info.color_formats & + mhdp->display_fmt.color_format)) + dev_warn(mhdp->dev, + "%s: No supported color_format found (0x%08x)\n", + __func__, connector->display_info.color_formats); + + if (connector->display_info.bpc && + connector->display_info.bpc < mhdp->display_fmt.bpc) + dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n", + __func__, connector->display_info.bpc, + mhdp->display_fmt.bpc); + + return num_modes; +} + +static int cdns_mhdp_connector_detect(struct drm_connector *conn, + struct drm_modeset_acquire_ctx *ctx, + bool force) +{ + struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn); + + return cdns_mhdp_detect(mhdp); +} + +static u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt) +{ + u32 bpp; + + if (fmt->y_only) + return fmt->bpc; + + switch (fmt->color_format) { + case DRM_COLOR_FORMAT_RGB444: + case DRM_COLOR_FORMAT_YCRCB444: + bpp = fmt->bpc * 3; + break; + case DRM_COLOR_FORMAT_YCRCB422: + bpp = fmt->bpc * 2; + break; + case DRM_COLOR_FORMAT_YCRCB420: + bpp = fmt->bpc * 3 / 2; + break; + default: + bpp = fmt->bpc * 3; + WARN_ON(1); + } + return bpp; +} + +static +bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp, + const struct drm_display_mode *mode, + unsigned int lanes, unsigned int rate) +{ + u32 max_bw, req_bw, bpp; + + /* + * mode->clock is expressed in kHz. Multiplying by bpp and dividing by 8 + * we get the number of kB/s. DisplayPort applies a 8b-10b encoding, the + * value thus equals the bandwidth in 10kb/s units, which matches the + * units of the rate parameter. + */ + + bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt); + req_bw = mode->clock * bpp / 8; + max_bw = lanes * rate; + if (req_bw > max_bw) { + dev_dbg(mhdp->dev, + "Unsupported Mode: %s, Req BW: %u, Available Max BW:%u\n", + mode->name, req_bw, max_bw); + + return false; + } + + return true; +} + +static +enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn, + struct drm_display_mode *mode) +{ + struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn); + + mutex_lock(&mhdp->link_mutex); + + if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes, + mhdp->link.rate)) { + mutex_unlock(&mhdp->link_mutex); + return MODE_CLOCK_HIGH; + } + + mutex_unlock(&mhdp->link_mutex); + return MODE_OK; +} + +static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = { + .detect_ctx = cdns_mhdp_connector_detect, + .get_modes = cdns_mhdp_get_modes, + .mode_valid = cdns_mhdp_mode_valid, +}; + +static const struct drm_connector_funcs cdns_mhdp_conn_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .reset = drm_atomic_helper_connector_reset, + .destroy = drm_connector_cleanup, +}; + +static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp) +{ + u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36; + struct drm_connector *conn = &mhdp->connector; + struct drm_bridge *bridge = &mhdp->bridge; + int ret; + + if (!bridge->encoder) { + dev_err(mhdp->dev, "Parent encoder object not found"); + return -ENODEV; + } + + conn->polled = DRM_CONNECTOR_POLL_HPD; + + ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs, + DRM_MODE_CONNECTOR_DisplayPort); + if (ret) { + dev_err(mhdp->dev, "Failed to initialize connector with drm\n"); + return ret; + } + + drm_connector_helper_add(conn, &cdns_mhdp_conn_helper_funcs); + + ret = drm_display_info_set_bus_formats(&conn->display_info, + &bus_format, 1); + if (ret) + return ret; + + ret = drm_connector_attach_encoder(conn, bridge->encoder); + if (ret) { + dev_err(mhdp->dev, "Failed to attach connector to encoder\n"); + return ret; + } + + return 0; +} + +static int cdns_mhdp_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + bool hw_ready; + int ret; + + dev_dbg(mhdp->dev, "%s\n", __func__); + + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { + ret = cdns_mhdp_connector_init(mhdp); + if (ret) + return ret; + } + + spin_lock(&mhdp->start_lock); + + mhdp->bridge_attached = true; + hw_ready = mhdp->hw_state == MHDP_HW_READY; + + spin_unlock(&mhdp->start_lock); + + /* Enable SW event interrupts */ + if (hw_ready) + writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT, + mhdp->regs + CDNS_APB_INT_MASK); + + return 0; +} + +static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp, + const struct drm_display_mode *mode) +{ + unsigned int dp_framer_sp = 0, msa_horizontal_1, + msa_vertical_1, bnd_hsync2vsync, hsync2vsync_pol_ctrl, + misc0 = 0, misc1 = 0, pxl_repr, + front_porch, back_porch, msa_h0, msa_v0, hsync, vsync, + dp_vertical_1; + u8 stream_id = mhdp->stream_id; + u32 bpp, bpc, pxlfmt, framer; + int ret; + + pxlfmt = mhdp->display_fmt.color_format; + bpc = mhdp->display_fmt.bpc; + + /* + * If YCBCR supported and stream not SD, use ITU709 + * Need to handle ITU version with YCBCR420 when supported + */ + if ((pxlfmt == DRM_COLOR_FORMAT_YCRCB444 || + pxlfmt == DRM_COLOR_FORMAT_YCRCB422) && mode->crtc_vdisplay >= 720) + misc0 = DP_YCBCR_COEFFICIENTS_ITU709; + + bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt); + + switch (pxlfmt) { + case DRM_COLOR_FORMAT_RGB444: + pxl_repr = CDNS_DP_FRAMER_RGB << CDNS_DP_FRAMER_PXL_FORMAT; + misc0 |= DP_COLOR_FORMAT_RGB; + break; + case DRM_COLOR_FORMAT_YCRCB444: + pxl_repr = CDNS_DP_FRAMER_YCBCR444 << CDNS_DP_FRAMER_PXL_FORMAT; + misc0 |= DP_COLOR_FORMAT_YCbCr444 | DP_TEST_DYNAMIC_RANGE_CEA; + break; + case DRM_COLOR_FORMAT_YCRCB422: + pxl_repr = CDNS_DP_FRAMER_YCBCR422 << CDNS_DP_FRAMER_PXL_FORMAT; + misc0 |= DP_COLOR_FORMAT_YCbCr422 | DP_TEST_DYNAMIC_RANGE_CEA; + break; + case DRM_COLOR_FORMAT_YCRCB420: + pxl_repr = CDNS_DP_FRAMER_YCBCR420 << CDNS_DP_FRAMER_PXL_FORMAT; + break; + default: + pxl_repr = CDNS_DP_FRAMER_Y_ONLY << CDNS_DP_FRAMER_PXL_FORMAT; + } + + switch (bpc) { + case 6: + misc0 |= DP_TEST_BIT_DEPTH_6; + pxl_repr |= CDNS_DP_FRAMER_6_BPC; + break; + case 8: + misc0 |= DP_TEST_BIT_DEPTH_8; + pxl_repr |= CDNS_DP_FRAMER_8_BPC; + break; + case 10: + misc0 |= DP_TEST_BIT_DEPTH_10; + pxl_repr |= CDNS_DP_FRAMER_10_BPC; + break; + case 12: + misc0 |= DP_TEST_BIT_DEPTH_12; + pxl_repr |= CDNS_DP_FRAMER_12_BPC; + break; + case 16: + misc0 |= DP_TEST_BIT_DEPTH_16; + pxl_repr |= CDNS_DP_FRAMER_16_BPC; + break; + } + + bnd_hsync2vsync = CDNS_IP_BYPASS_V_INTERFACE; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + bnd_hsync2vsync |= CDNS_IP_DET_INTERLACE_FORMAT; + + cdns_mhdp_reg_write(mhdp, CDNS_BND_HSYNC2VSYNC(stream_id), + bnd_hsync2vsync); + + hsync2vsync_pol_ctrl = 0; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + hsync2vsync_pol_ctrl |= CDNS_H2V_HSYNC_POL_ACTIVE_LOW; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + hsync2vsync_pol_ctrl |= CDNS_H2V_VSYNC_POL_ACTIVE_LOW; + cdns_mhdp_reg_write(mhdp, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id), + hsync2vsync_pol_ctrl); + + cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_PXL_REPR(stream_id), pxl_repr); + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + dp_framer_sp |= CDNS_DP_FRAMER_INTERLACE; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + dp_framer_sp |= CDNS_DP_FRAMER_HSYNC_POL_LOW; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + dp_framer_sp |= CDNS_DP_FRAMER_VSYNC_POL_LOW; + cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_SP(stream_id), dp_framer_sp); + + front_porch = mode->crtc_hsync_start - mode->crtc_hdisplay; + back_porch = mode->crtc_htotal - mode->crtc_hsync_end; + cdns_mhdp_reg_write(mhdp, CDNS_DP_FRONT_BACK_PORCH(stream_id), + CDNS_DP_FRONT_PORCH(front_porch) | + CDNS_DP_BACK_PORCH(back_porch)); + + cdns_mhdp_reg_write(mhdp, CDNS_DP_BYTE_COUNT(stream_id), + mode->crtc_hdisplay * bpp / 8); + + msa_h0 = mode->crtc_htotal - mode->crtc_hsync_start; + cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_0(stream_id), + CDNS_DP_MSAH0_H_TOTAL(mode->crtc_htotal) | + CDNS_DP_MSAH0_HSYNC_START(msa_h0)); + + hsync = mode->crtc_hsync_end - mode->crtc_hsync_start; + msa_horizontal_1 = CDNS_DP_MSAH1_HSYNC_WIDTH(hsync) | + CDNS_DP_MSAH1_HDISP_WIDTH(mode->crtc_hdisplay); + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + msa_horizontal_1 |= CDNS_DP_MSAH1_HSYNC_POL_LOW; + cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_1(stream_id), + msa_horizontal_1); + + msa_v0 = mode->crtc_vtotal - mode->crtc_vsync_start; + cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_0(stream_id), + CDNS_DP_MSAV0_V_TOTAL(mode->crtc_vtotal) | + CDNS_DP_MSAV0_VSYNC_START(msa_v0)); + + vsync = mode->crtc_vsync_end - mode->crtc_vsync_start; + msa_vertical_1 = CDNS_DP_MSAV1_VSYNC_WIDTH(vsync) | + CDNS_DP_MSAV1_VDISP_WIDTH(mode->crtc_vdisplay); + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + msa_vertical_1 |= CDNS_DP_MSAV1_VSYNC_POL_LOW; + cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_1(stream_id), + msa_vertical_1); + + if ((mode->flags & DRM_MODE_FLAG_INTERLACE) && + mode->crtc_vtotal % 2 == 0) + misc1 = DP_TEST_INTERLACED; + if (mhdp->display_fmt.y_only) + misc1 |= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY; + /* Use VSC SDP for Y420 */ + if (pxlfmt == DRM_COLOR_FORMAT_YCRCB420) + misc1 = CDNS_DP_TEST_VSC_SDP; + + cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_MISC(stream_id), + misc0 | (misc1 << 8)); + + cdns_mhdp_reg_write(mhdp, CDNS_DP_HORIZONTAL(stream_id), + CDNS_DP_H_HSYNC_WIDTH(hsync) | + CDNS_DP_H_H_TOTAL(mode->crtc_hdisplay)); + + cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_0(stream_id), + CDNS_DP_V0_VHEIGHT(mode->crtc_vdisplay) | + CDNS_DP_V0_VSTART(msa_v0)); + + dp_vertical_1 = CDNS_DP_V1_VTOTAL(mode->crtc_vtotal); + if ((mode->flags & DRM_MODE_FLAG_INTERLACE) && + mode->crtc_vtotal % 2 == 0) + dp_vertical_1 |= CDNS_DP_V1_VTOTAL_EVEN; + + cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_1(stream_id), dp_vertical_1); + + cdns_mhdp_reg_write_bit(mhdp, CDNS_DP_VB_ID(stream_id), 2, 1, + (mode->flags & DRM_MODE_FLAG_INTERLACE) ? + CDNS_DP_VB_ID_INTERLACED : 0); + + ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &framer); + if (ret < 0) { + dev_err(mhdp->dev, + "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n", + ret); + return; + } + framer |= CDNS_DP_FRAMER_EN; + framer &= ~CDNS_DP_NO_VIDEO_MODE; + cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, framer); +} + +static void cdns_mhdp_sst_enable(struct cdns_mhdp_device *mhdp, + const struct drm_display_mode *mode) +{ + u32 rate, vs, required_bandwidth, available_bandwidth; + s32 line_thresh1, line_thresh2, line_thresh = 0; + int pxlclock = mode->crtc_clock; + u32 tu_size = 64; + u32 bpp; + + /* Get rate in MSymbols per second per lane */ + rate = mhdp->link.rate / 1000; + + bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt); + + required_bandwidth = pxlclock * bpp / 8; + available_bandwidth = mhdp->link.num_lanes * rate; + + vs = tu_size * required_bandwidth / available_bandwidth; + vs /= 1000; + + if (vs == tu_size) + vs = tu_size - 1; + + line_thresh1 = ((vs + 1) << 5) * 8 / bpp; + line_thresh2 = (pxlclock << 5) / 1000 / rate * (vs + 1) - (1 << 5); + line_thresh = line_thresh1 - line_thresh2 / (s32)mhdp->link.num_lanes; + line_thresh = (line_thresh >> 5) + 2; + + mhdp->stream_id = 0; + + cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU, + CDNS_DP_FRAMER_TU_VS(vs) | + CDNS_DP_FRAMER_TU_SIZE(tu_size) | + CDNS_DP_FRAMER_TU_CNT_RST_EN); + + cdns_mhdp_reg_write(mhdp, CDNS_DP_LINE_THRESH(0), + line_thresh & GENMASK(5, 0)); + + cdns_mhdp_reg_write(mhdp, CDNS_DP_STREAM_CONFIG_2(0), + CDNS_DP_SC2_TU_VS_DIFF((tu_size - vs > 3) ? + 0 : tu_size - vs)); + + cdns_mhdp_configure_video(mhdp, mode); +} + +static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + struct drm_atomic_state *state = bridge_state->base.state; + struct cdns_mhdp_bridge_state *mhdp_state; + struct drm_crtc_state *crtc_state; + struct drm_connector *connector; + struct drm_connector_state *conn_state; + struct drm_bridge_state *new_state; + const struct drm_display_mode *mode; + u32 resp; + int ret; + + dev_dbg(mhdp->dev, "bridge enable\n"); + + mutex_lock(&mhdp->link_mutex); + + if (mhdp->plugged && !mhdp->link_up) { + ret = cdns_mhdp_link_up(mhdp); + if (ret < 0) + goto out; + } + + if (mhdp->info && mhdp->info->ops && mhdp->info->ops->enable) + mhdp->info->ops->enable(mhdp); + + /* Enable VIF clock for stream 0 */ + ret = cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp); + if (ret < 0) { + dev_err(mhdp->dev, "Failed to read CDNS_DPTX_CAR %d\n", ret); + goto out; + } + + cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR, + resp | CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN); + + connector = drm_atomic_get_new_connector_for_encoder(state, + bridge->encoder); + if (WARN_ON(!connector)) + goto out; + + conn_state = drm_atomic_get_new_connector_state(state, connector); + if (WARN_ON(!conn_state)) + goto out; + + crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); + if (WARN_ON(!crtc_state)) + goto out; + + mode = &crtc_state->adjusted_mode; + + new_state = drm_atomic_get_new_bridge_state(state, bridge); + if (WARN_ON(!new_state)) + goto out; + + if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes, + mhdp->link.rate)) { + ret = -EINVAL; + goto out; + } + + cdns_mhdp_sst_enable(mhdp, mode); + + mhdp_state = to_cdns_mhdp_bridge_state(new_state); + + mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode); + drm_mode_set_name(mhdp_state->current_mode); + + dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name); + + mhdp->bridge_enabled = true; + +out: + mutex_unlock(&mhdp->link_mutex); + if (ret < 0) + schedule_work(&mhdp->modeset_retry_work); +} + +static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + u32 resp; + + dev_dbg(mhdp->dev, "%s\n", __func__); + + mutex_lock(&mhdp->link_mutex); + + mhdp->bridge_enabled = false; + cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp); + resp &= ~CDNS_DP_FRAMER_EN; + resp |= CDNS_DP_NO_VIDEO_MODE; + cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp); + + cdns_mhdp_link_down(mhdp); + + /* Disable VIF clock for stream 0 */ + cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp); + cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR, + resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN)); + + if (mhdp->info && mhdp->info->ops && mhdp->info->ops->disable) + mhdp->info->ops->disable(mhdp); + + mutex_unlock(&mhdp->link_mutex); +} + +static void cdns_mhdp_detach(struct drm_bridge *bridge) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + + dev_dbg(mhdp->dev, "%s\n", __func__); + + spin_lock(&mhdp->start_lock); + + mhdp->bridge_attached = false; + + spin_unlock(&mhdp->start_lock); + + writel(~0, mhdp->regs + CDNS_APB_INT_MASK); +} + +static struct drm_bridge_state * +cdns_mhdp_bridge_atomic_duplicate_state(struct drm_bridge *bridge) +{ + struct cdns_mhdp_bridge_state *state; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base); + + return &state->base; +} + +static void +cdns_mhdp_bridge_atomic_destroy_state(struct drm_bridge *bridge, + struct drm_bridge_state *state) +{ + struct cdns_mhdp_bridge_state *cdns_mhdp_state; + + cdns_mhdp_state = to_cdns_mhdp_bridge_state(state); + + if (cdns_mhdp_state->current_mode) { + drm_mode_destroy(bridge->dev, cdns_mhdp_state->current_mode); + cdns_mhdp_state->current_mode = NULL; + } + + kfree(cdns_mhdp_state); +} + +static struct drm_bridge_state * +cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge) +{ + struct cdns_mhdp_bridge_state *cdns_mhdp_state; + + cdns_mhdp_state = kzalloc(sizeof(*cdns_mhdp_state), GFP_KERNEL); + if (!cdns_mhdp_state) + return NULL; + + __drm_atomic_helper_bridge_reset(bridge, &cdns_mhdp_state->base); + + return &cdns_mhdp_state->base; +} + +static int cdns_mhdp_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + const struct drm_display_mode *mode = &crtc_state->adjusted_mode; + + mutex_lock(&mhdp->link_mutex); + + if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes, + mhdp->link.rate)) { + dev_err(mhdp->dev, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n", + __func__, mode->name, mhdp->link.num_lanes, + mhdp->link.rate / 100); + mutex_unlock(&mhdp->link_mutex); + return -EINVAL; + } + + mutex_unlock(&mhdp->link_mutex); + return 0; +} + +static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + + return cdns_mhdp_detect(mhdp); +} + +static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + + return cdns_mhdp_get_edid(mhdp, connector); +} + +static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + + /* Enable SW event interrupts */ + if (mhdp->bridge_attached) + writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT, + mhdp->regs + CDNS_APB_INT_MASK); +} + +static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + + writel(CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK); +} + +static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = { + .atomic_enable = cdns_mhdp_atomic_enable, + .atomic_disable = cdns_mhdp_atomic_disable, + .atomic_check = cdns_mhdp_atomic_check, + .attach = cdns_mhdp_attach, + .detach = cdns_mhdp_detach, + .atomic_duplicate_state = cdns_mhdp_bridge_atomic_duplicate_state, + .atomic_destroy_state = cdns_mhdp_bridge_atomic_destroy_state, + .atomic_reset = cdns_mhdp_bridge_atomic_reset, + .detect = cdns_mhdp_bridge_detect, + .get_edid = cdns_mhdp_bridge_get_edid, + .hpd_enable = cdns_mhdp_bridge_hpd_enable, + .hpd_disable = cdns_mhdp_bridge_hpd_disable, +}; + +static bool cdns_mhdp_detect_hpd(struct cdns_mhdp_device *mhdp, bool *hpd_pulse) +{ + int hpd_event, hpd_status; + + *hpd_pulse = false; + + hpd_event = cdns_mhdp_read_hpd_event(mhdp); + + /* Getting event bits failed, bail out */ + if (hpd_event < 0) { + dev_warn(mhdp->dev, "%s: read event failed: %d\n", + __func__, hpd_event); + return false; + } + + hpd_status = cdns_mhdp_get_hpd_status(mhdp); + if (hpd_status < 0) { + dev_warn(mhdp->dev, "%s: get hpd status failed: %d\n", + __func__, hpd_status); + return false; + } + + if (hpd_event & DPTX_READ_EVENT_HPD_PULSE) + *hpd_pulse = true; + + return !!hpd_status; +} + +static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp) +{ + struct cdns_mhdp_bridge_state *cdns_bridge_state; + struct drm_display_mode *current_mode; + bool old_plugged = mhdp->plugged; + struct drm_bridge_state *state; + u8 status[DP_LINK_STATUS_SIZE]; + bool hpd_pulse; + int ret = 0; + + mutex_lock(&mhdp->link_mutex); + + mhdp->plugged = cdns_mhdp_detect_hpd(mhdp, &hpd_pulse); + + if (!mhdp->plugged) { + cdns_mhdp_link_down(mhdp); + mhdp->link.rate = mhdp->host.link_rate; + mhdp->link.num_lanes = mhdp->host.lanes_cnt; + goto out; + } + + /* + * If we get a HPD pulse event and we were and still are connected, + * check the link status. If link status is ok, there's nothing to do + * as we don't handle DP interrupts. If link status is bad, continue + * with full link setup. + */ + if (hpd_pulse && old_plugged == mhdp->plugged) { + ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status); + + /* + * If everything looks fine, just return, as we don't handle + * DP IRQs. + */ + if (ret > 0 && + drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) && + drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes)) + goto out; + + /* If link is bad, mark link as down so that we do a new LT */ + mhdp->link_up = false; + } + + if (!mhdp->link_up) { + ret = cdns_mhdp_link_up(mhdp); + if (ret < 0) + goto out; + } + + if (mhdp->bridge_enabled) { + state = drm_priv_to_bridge_state(mhdp->bridge.base.state); + if (!state) { + ret = -EINVAL; + goto out; + } + + cdns_bridge_state = to_cdns_mhdp_bridge_state(state); + if (!cdns_bridge_state) { + ret = -EINVAL; + goto out; + } + + current_mode = cdns_bridge_state->current_mode; + if (!current_mode) { + ret = -EINVAL; + goto out; + } + + if (!cdns_mhdp_bandwidth_ok(mhdp, current_mode, mhdp->link.num_lanes, + mhdp->link.rate)) { + ret = -EINVAL; + goto out; + } + + dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, + current_mode->name); + + cdns_mhdp_sst_enable(mhdp, current_mode); + } +out: + mutex_unlock(&mhdp->link_mutex); + return ret; +} + +static void cdns_mhdp_modeset_retry_fn(struct work_struct *work) +{ + struct cdns_mhdp_device *mhdp; + struct drm_connector *conn; + + mhdp = container_of(work, typeof(*mhdp), modeset_retry_work); + + conn = &mhdp->connector; + + /* Grab the locks before changing connector property */ + mutex_lock(&conn->dev->mode_config.mutex); + + /* + * Set connector link status to BAD and send a Uevent to notify + * userspace to do a modeset. + */ + drm_connector_set_link_status_property(conn, DRM_MODE_LINK_STATUS_BAD); + mutex_unlock(&conn->dev->mode_config.mutex); + + /* Send Hotplug uevent so userspace can reprobe */ + drm_kms_helper_hotplug_event(mhdp->bridge.dev); +} + +static irqreturn_t cdns_mhdp_irq_handler(int irq, void *data) +{ + struct cdns_mhdp_device *mhdp = data; + u32 apb_stat, sw_ev0; + bool bridge_attached; + int ret; + + apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS); + if (!(apb_stat & CDNS_APB_INT_MASK_SW_EVENT_INT)) + return IRQ_NONE; + + sw_ev0 = readl(mhdp->regs + CDNS_SW_EVENT0); + + /* + * Calling drm_kms_helper_hotplug_event() when not attached + * to drm device causes an oops because the drm_bridge->dev + * is NULL. See cdns_mhdp_fw_cb() comments for details about the + * problems related drm_kms_helper_hotplug_event() call. + */ + spin_lock(&mhdp->start_lock); + bridge_attached = mhdp->bridge_attached; + spin_unlock(&mhdp->start_lock); + + if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) { + ret = cdns_mhdp_update_link_status(mhdp); + if (mhdp->connector.dev) { + if (ret < 0) + schedule_work(&mhdp->modeset_retry_work); + else + drm_kms_helper_hotplug_event(mhdp->bridge.dev); + } else { + drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp)); + } + } + + return IRQ_HANDLED; +} + +static int cdns_mhdp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cdns_mhdp_device *mhdp; + unsigned long rate; + struct clk *clk; + int ret; + int irq; + + mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL); + if (!mhdp) + return -ENOMEM; + + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) { + dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk)); + return PTR_ERR(clk); + } + + mhdp->clk = clk; + mhdp->dev = dev; + mutex_init(&mhdp->mbox_mutex); + mutex_init(&mhdp->link_mutex); + spin_lock_init(&mhdp->start_lock); + + drm_dp_aux_init(&mhdp->aux); + mhdp->aux.dev = dev; + mhdp->aux.transfer = cdns_mhdp_transfer; + + mhdp->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mhdp->regs)) { + dev_err(dev, "Failed to get memory resource\n"); + return PTR_ERR(mhdp->regs); + } + + mhdp->phy = devm_of_phy_get_by_index(dev, pdev->dev.of_node, 0); + if (IS_ERR(mhdp->phy)) { + dev_err(dev, "no PHY configured\n"); + return PTR_ERR(mhdp->phy); + } + + platform_set_drvdata(pdev, mhdp); + + mhdp->info = of_device_get_match_data(dev); + + clk_prepare_enable(clk); + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync failed\n"); + pm_runtime_disable(dev); + goto clk_disable; + } + + if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) { + ret = mhdp->info->ops->init(mhdp); + if (ret != 0) { + dev_err(dev, "MHDP platform initialization failed: %d\n", + ret); + goto runtime_put; + } + } + + rate = clk_get_rate(clk); + writel(rate % 1000000, mhdp->regs + CDNS_SW_CLK_L); + writel(rate / 1000000, mhdp->regs + CDNS_SW_CLK_H); + + dev_dbg(dev, "func clk rate %lu Hz\n", rate); + + writel(~0, mhdp->regs + CDNS_APB_INT_MASK); + + irq = platform_get_irq(pdev, 0); + ret = devm_request_threaded_irq(mhdp->dev, irq, NULL, + cdns_mhdp_irq_handler, IRQF_ONESHOT, + "mhdp8546", mhdp); + if (ret) { + dev_err(dev, "cannot install IRQ %d\n", irq); + ret = -EIO; + goto plat_fini; + } + + cdns_mhdp_fill_host_caps(mhdp); + + /* Initialize link rate and num of lanes to host values */ + mhdp->link.rate = mhdp->host.link_rate; + mhdp->link.num_lanes = mhdp->host.lanes_cnt; + + /* The only currently supported format */ + mhdp->display_fmt.y_only = false; + mhdp->display_fmt.color_format = DRM_COLOR_FORMAT_RGB444; + mhdp->display_fmt.bpc = 8; + + mhdp->bridge.of_node = pdev->dev.of_node; + mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs; + mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | + DRM_BRIDGE_OP_HPD; + mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort; + if (mhdp->info) + mhdp->bridge.timings = mhdp->info->timings; + + ret = phy_init(mhdp->phy); + if (ret) { + dev_err(mhdp->dev, "Failed to initialize PHY: %d\n", ret); + goto plat_fini; + } + + /* Initialize the work for modeset in case of link train failure */ + INIT_WORK(&mhdp->modeset_retry_work, cdns_mhdp_modeset_retry_fn); + + init_waitqueue_head(&mhdp->fw_load_wq); + + ret = cdns_mhdp_load_firmware(mhdp); + if (ret) + goto phy_exit; + + drm_bridge_add(&mhdp->bridge); + + return 0; + +phy_exit: + phy_exit(mhdp->phy); +plat_fini: + if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit) + mhdp->info->ops->exit(mhdp); +runtime_put: + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); +clk_disable: + clk_disable_unprepare(mhdp->clk); + + return ret; +} + +static int cdns_mhdp_remove(struct platform_device *pdev) +{ + struct cdns_mhdp_device *mhdp = dev_get_drvdata(&pdev->dev); + unsigned long timeout = msecs_to_jiffies(100); + bool stop_fw = false; + int ret; + + drm_bridge_remove(&mhdp->bridge); + + ret = wait_event_timeout(mhdp->fw_load_wq, + mhdp->hw_state == MHDP_HW_READY, + timeout); + if (ret == 0) + dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n", + __func__); + else + stop_fw = true; + + spin_lock(&mhdp->start_lock); + mhdp->hw_state = MHDP_HW_STOPPED; + spin_unlock(&mhdp->start_lock); + + if (stop_fw) + ret = cdns_mhdp_set_firmware_active(mhdp, false); + + phy_exit(mhdp->phy); + + if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit) + mhdp->info->ops->exit(mhdp); + + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + cancel_work_sync(&mhdp->modeset_retry_work); + flush_scheduled_work(); + + clk_disable_unprepare(mhdp->clk); + + return ret; +} + +static const struct of_device_id mhdp_ids[] = { + { .compatible = "cdns,mhdp8546", }, +#ifdef CONFIG_DRM_CDNS_MHDP8546_J721E + { .compatible = "ti,j721e-mhdp8546", + .data = &(const struct cdns_mhdp_platform_info) { + .timings = &mhdp_ti_j721e_bridge_timings, + .ops = &mhdp_ti_j721e_ops, + }, + }, +#endif + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mhdp_ids); + +static struct platform_driver mhdp_driver = { + .driver = { + .name = "cdns-mhdp8546", + .of_match_table = of_match_ptr(mhdp_ids), + }, + .probe = cdns_mhdp_probe, + .remove = cdns_mhdp_remove, +}; +module_platform_driver(mhdp_driver); + +MODULE_FIRMWARE(FW_NAME); + +MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>"); +MODULE_AUTHOR("Swapnil Jakhade <sjakhade@cadence.com>"); +MODULE_AUTHOR("Yuti Amonkar <yamonkar@cadence.com>"); +MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); +MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>"); +MODULE_DESCRIPTION("Cadence MHDP8546 DP bridge driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:cdns-mhdp8546"); diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h new file mode 100644 index 000000000000..5897a85e3159 --- /dev/null +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h @@ -0,0 +1,400 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence MHDP8546 DP bridge driver. + * + * Copyright (C) 2020 Cadence Design Systems, Inc. + * + * Author: Quentin Schulz <quentin.schulz@free-electrons.com> + * Swapnil Jakhade <sjakhade@cadence.com> + */ + +#ifndef CDNS_MHDP8546_CORE_H +#define CDNS_MHDP8546_CORE_H + +#include <linux/bits.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> + +#include <drm/drm_bridge.h> +#include <drm/drm_connector.h> +#include <drm/drm_dp_helper.h> + +struct clk; +struct device; +struct phy; + +/* Register offsets */ +#define CDNS_APB_CTRL 0x00000 +#define CDNS_CPU_STALL BIT(3) + +#define CDNS_MAILBOX_FULL 0x00008 +#define CDNS_MAILBOX_EMPTY 0x0000c +#define CDNS_MAILBOX_TX_DATA 0x00010 +#define CDNS_MAILBOX_RX_DATA 0x00014 +#define CDNS_KEEP_ALIVE 0x00018 +#define CDNS_KEEP_ALIVE_MASK GENMASK(7, 0) + +#define CDNS_VER_L 0x0001C +#define CDNS_VER_H 0x00020 +#define CDNS_LIB_L_ADDR 0x00024 +#define CDNS_LIB_H_ADDR 0x00028 + +#define CDNS_MB_INT_MASK 0x00034 +#define CDNS_MB_INT_STATUS 0x00038 + +#define CDNS_SW_CLK_L 0x0003c +#define CDNS_SW_CLK_H 0x00040 + +#define CDNS_SW_EVENT0 0x00044 +#define CDNS_DPTX_HPD BIT(0) + +#define CDNS_SW_EVENT1 0x00048 +#define CDNS_SW_EVENT2 0x0004c +#define CDNS_SW_EVENT3 0x00050 + +#define CDNS_APB_INT_MASK 0x0006C +#define CDNS_APB_INT_MASK_MAILBOX_INT BIT(0) +#define CDNS_APB_INT_MASK_SW_EVENT_INT BIT(1) + +#define CDNS_APB_INT_STATUS 0x00070 + +#define CDNS_DPTX_CAR 0x00904 +#define CDNS_VIF_CLK_EN BIT(0) +#define CDNS_VIF_CLK_RSTN BIT(1) + +#define CDNS_SOURCE_VIDEO_IF(s) (0x00b00 + ((s) * 0x20)) +#define CDNS_BND_HSYNC2VSYNC(s) (CDNS_SOURCE_VIDEO_IF(s) + \ + 0x00) +#define CDNS_IP_DTCT_WIN GENMASK(11, 0) +#define CDNS_IP_DET_INTERLACE_FORMAT BIT(12) +#define CDNS_IP_BYPASS_V_INTERFACE BIT(13) + +#define CDNS_HSYNC2VSYNC_POL_CTRL(s) (CDNS_SOURCE_VIDEO_IF(s) + \ + 0x10) +#define CDNS_H2V_HSYNC_POL_ACTIVE_LOW BIT(1) +#define CDNS_H2V_VSYNC_POL_ACTIVE_LOW BIT(2) + +#define CDNS_DPTX_PHY_CONFIG 0x02000 +#define CDNS_PHY_TRAINING_EN BIT(0) +#define CDNS_PHY_TRAINING_TYPE(x) (((x) & GENMASK(3, 0)) << 1) +#define CDNS_PHY_SCRAMBLER_BYPASS BIT(5) +#define CDNS_PHY_ENCODER_BYPASS BIT(6) +#define CDNS_PHY_SKEW_BYPASS BIT(7) +#define CDNS_PHY_TRAINING_AUTO BIT(8) +#define CDNS_PHY_LANE0_SKEW(x) (((x) & GENMASK(2, 0)) << 9) +#define CDNS_PHY_LANE1_SKEW(x) (((x) & GENMASK(2, 0)) << 12) +#define CDNS_PHY_LANE2_SKEW(x) (((x) & GENMASK(2, 0)) << 15) +#define CDNS_PHY_LANE3_SKEW(x) (((x) & GENMASK(2, 0)) << 18) +#define CDNS_PHY_COMMON_CONFIG (CDNS_PHY_LANE1_SKEW(1) | \ + CDNS_PHY_LANE2_SKEW(2) | \ + CDNS_PHY_LANE3_SKEW(3)) +#define CDNS_PHY_10BIT_EN BIT(21) + +#define CDNS_DP_FRAMER_GLOBAL_CONFIG 0x02200 +#define CDNS_DP_NUM_LANES(x) ((x) - 1) +#define CDNS_DP_MST_EN BIT(2) +#define CDNS_DP_FRAMER_EN BIT(3) +#define CDNS_DP_RATE_GOVERNOR_EN BIT(4) +#define CDNS_DP_NO_VIDEO_MODE BIT(5) +#define CDNS_DP_DISABLE_PHY_RST BIT(6) +#define CDNS_DP_WR_FAILING_EDGE_VSYNC BIT(7) + +#define CDNS_DP_FRAMER_TU 0x02208 +#define CDNS_DP_FRAMER_TU_SIZE(x) (((x) & GENMASK(6, 0)) << 8) +#define CDNS_DP_FRAMER_TU_VS(x) ((x) & GENMASK(5, 0)) +#define CDNS_DP_FRAMER_TU_CNT_RST_EN BIT(15) + +#define CDNS_DP_MTPH_CONTROL 0x02264 +#define CDNS_DP_MTPH_ECF_EN BIT(0) +#define CDNS_DP_MTPH_ACT_EN BIT(1) +#define CDNS_DP_MTPH_LVP_EN BIT(2) + +#define CDNS_DP_MTPH_STATUS 0x0226C +#define CDNS_DP_MTPH_ACT_STATUS BIT(0) + +#define CDNS_DP_LANE_EN 0x02300 +#define CDNS_DP_LANE_EN_LANES(x) GENMASK((x) - 1, 0) + +#define CDNS_DP_ENHNCD 0x02304 + +#define CDNS_DPTX_STREAM(s) (0x03000 + (s) * 0x80) +#define CDNS_DP_MSA_HORIZONTAL_0(s) (CDNS_DPTX_STREAM(s) + 0x00) +#define CDNS_DP_MSAH0_H_TOTAL(x) (x) +#define CDNS_DP_MSAH0_HSYNC_START(x) ((x) << 16) + +#define CDNS_DP_MSA_HORIZONTAL_1(s) (CDNS_DPTX_STREAM(s) + 0x04) +#define CDNS_DP_MSAH1_HSYNC_WIDTH(x) (x) +#define CDNS_DP_MSAH1_HSYNC_POL_LOW BIT(15) +#define CDNS_DP_MSAH1_HDISP_WIDTH(x) ((x) << 16) + +#define CDNS_DP_MSA_VERTICAL_0(s) (CDNS_DPTX_STREAM(s) + 0x08) +#define CDNS_DP_MSAV0_V_TOTAL(x) (x) +#define CDNS_DP_MSAV0_VSYNC_START(x) ((x) << 16) + +#define CDNS_DP_MSA_VERTICAL_1(s) (CDNS_DPTX_STREAM(s) + 0x0c) +#define CDNS_DP_MSAV1_VSYNC_WIDTH(x) (x) +#define CDNS_DP_MSAV1_VSYNC_POL_LOW BIT(15) +#define CDNS_DP_MSAV1_VDISP_WIDTH(x) ((x) << 16) + +#define CDNS_DP_MSA_MISC(s) (CDNS_DPTX_STREAM(s) + 0x10) +#define CDNS_DP_STREAM_CONFIG(s) (CDNS_DPTX_STREAM(s) + 0x14) +#define CDNS_DP_STREAM_CONFIG_2(s) (CDNS_DPTX_STREAM(s) + 0x2c) +#define CDNS_DP_SC2_TU_VS_DIFF(x) ((x) << 8) + +#define CDNS_DP_HORIZONTAL(s) (CDNS_DPTX_STREAM(s) + 0x30) +#define CDNS_DP_H_HSYNC_WIDTH(x) (x) +#define CDNS_DP_H_H_TOTAL(x) ((x) << 16) + +#define CDNS_DP_VERTICAL_0(s) (CDNS_DPTX_STREAM(s) + 0x34) +#define CDNS_DP_V0_VHEIGHT(x) (x) +#define CDNS_DP_V0_VSTART(x) ((x) << 16) + +#define CDNS_DP_VERTICAL_1(s) (CDNS_DPTX_STREAM(s) + 0x38) +#define CDNS_DP_V1_VTOTAL(x) (x) +#define CDNS_DP_V1_VTOTAL_EVEN BIT(16) + +#define CDNS_DP_MST_SLOT_ALLOCATE(s) (CDNS_DPTX_STREAM(s) + 0x44) +#define CDNS_DP_S_ALLOC_START_SLOT(x) (x) +#define CDNS_DP_S_ALLOC_END_SLOT(x) ((x) << 8) + +#define CDNS_DP_RATE_GOVERNING(s) (CDNS_DPTX_STREAM(s) + 0x48) +#define CDNS_DP_RG_TARG_AV_SLOTS_Y(x) (x) +#define CDNS_DP_RG_TARG_AV_SLOTS_X(x) ((x) << 4) +#define CDNS_DP_RG_ENABLE BIT(10) + +#define CDNS_DP_FRAMER_PXL_REPR(s) (CDNS_DPTX_STREAM(s) + 0x4c) +#define CDNS_DP_FRAMER_6_BPC BIT(0) +#define CDNS_DP_FRAMER_8_BPC BIT(1) +#define CDNS_DP_FRAMER_10_BPC BIT(2) +#define CDNS_DP_FRAMER_12_BPC BIT(3) +#define CDNS_DP_FRAMER_16_BPC BIT(4) +#define CDNS_DP_FRAMER_PXL_FORMAT 0x8 +#define CDNS_DP_FRAMER_RGB BIT(0) +#define CDNS_DP_FRAMER_YCBCR444 BIT(1) +#define CDNS_DP_FRAMER_YCBCR422 BIT(2) +#define CDNS_DP_FRAMER_YCBCR420 BIT(3) +#define CDNS_DP_FRAMER_Y_ONLY BIT(4) + +#define CDNS_DP_FRAMER_SP(s) (CDNS_DPTX_STREAM(s) + 0x50) +#define CDNS_DP_FRAMER_VSYNC_POL_LOW BIT(0) +#define CDNS_DP_FRAMER_HSYNC_POL_LOW BIT(1) +#define CDNS_DP_FRAMER_INTERLACE BIT(2) + +#define CDNS_DP_LINE_THRESH(s) (CDNS_DPTX_STREAM(s) + 0x64) +#define CDNS_DP_ACTIVE_LINE_THRESH(x) (x) + +#define CDNS_DP_VB_ID(s) (CDNS_DPTX_STREAM(s) + 0x68) +#define CDNS_DP_VB_ID_INTERLACED BIT(2) +#define CDNS_DP_VB_ID_COMPRESSED BIT(6) + +#define CDNS_DP_FRONT_BACK_PORCH(s) (CDNS_DPTX_STREAM(s) + 0x78) +#define CDNS_DP_BACK_PORCH(x) (x) +#define CDNS_DP_FRONT_PORCH(x) ((x) << 16) + +#define CDNS_DP_BYTE_COUNT(s) (CDNS_DPTX_STREAM(s) + 0x7c) +#define CDNS_DP_BYTE_COUNT_BYTES_IN_CHUNK_SHIFT 16 + +/* mailbox */ +#define MAILBOX_RETRY_US 1000 +#define MAILBOX_TIMEOUT_US 2000000 + +#define MB_OPCODE_ID 0 +#define MB_MODULE_ID 1 +#define MB_SIZE_MSB_ID 2 +#define MB_SIZE_LSB_ID 3 +#define MB_DATA_ID 4 + +#define MB_MODULE_ID_DP_TX 0x01 +#define MB_MODULE_ID_HDCP_TX 0x07 +#define MB_MODULE_ID_HDCP_RX 0x08 +#define MB_MODULE_ID_HDCP_GENERAL 0x09 +#define MB_MODULE_ID_GENERAL 0x0a + +/* firmware and opcodes */ +#define FW_NAME "cadence/mhdp8546.bin" +#define CDNS_MHDP_IMEM 0x10000 + +#define GENERAL_MAIN_CONTROL 0x01 +#define GENERAL_TEST_ECHO 0x02 +#define GENERAL_BUS_SETTINGS 0x03 +#define GENERAL_TEST_ACCESS 0x04 +#define GENERAL_REGISTER_READ 0x07 + +#define DPTX_SET_POWER_MNG 0x00 +#define DPTX_GET_EDID 0x02 +#define DPTX_READ_DPCD 0x03 +#define DPTX_WRITE_DPCD 0x04 +#define DPTX_ENABLE_EVENT 0x05 +#define DPTX_WRITE_REGISTER 0x06 +#define DPTX_READ_REGISTER 0x07 +#define DPTX_WRITE_FIELD 0x08 +#define DPTX_READ_EVENT 0x0a +#define DPTX_GET_LAST_AUX_STAUS 0x0e +#define DPTX_HPD_STATE 0x11 +#define DPTX_ADJUST_LT 0x12 + +#define FW_STANDBY 0 +#define FW_ACTIVE 1 + +/* HPD */ +#define DPTX_READ_EVENT_HPD_TO_HIGH BIT(0) +#define DPTX_READ_EVENT_HPD_TO_LOW BIT(1) +#define DPTX_READ_EVENT_HPD_PULSE BIT(2) +#define DPTX_READ_EVENT_HPD_STATE BIT(3) + +/* general */ +#define CDNS_DP_TRAINING_PATTERN_4 0x7 + +#define CDNS_KEEP_ALIVE_TIMEOUT 2000 + +#define CDNS_VOLT_SWING(x) ((x) & GENMASK(1, 0)) +#define CDNS_FORCE_VOLT_SWING BIT(2) + +#define CDNS_PRE_EMPHASIS(x) ((x) & GENMASK(1, 0)) +#define CDNS_FORCE_PRE_EMPHASIS BIT(2) + +#define CDNS_SUPPORT_TPS(x) BIT((x) - 1) + +#define CDNS_FAST_LINK_TRAINING BIT(0) + +#define CDNS_LANE_MAPPING_TYPE_C_LANE_0(x) ((x) & GENMASK(1, 0)) +#define CDNS_LANE_MAPPING_TYPE_C_LANE_1(x) ((x) & GENMASK(3, 2)) +#define CDNS_LANE_MAPPING_TYPE_C_LANE_2(x) ((x) & GENMASK(5, 4)) +#define CDNS_LANE_MAPPING_TYPE_C_LANE_3(x) ((x) & GENMASK(7, 6)) +#define CDNS_LANE_MAPPING_NORMAL 0xe4 +#define CDNS_LANE_MAPPING_FLIPPED 0x1b + +#define CDNS_DP_MAX_NUM_LANES 4 +#define CDNS_DP_TEST_VSC_SDP BIT(6) /* 1.3+ */ +#define CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY BIT(7) + +#define CDNS_MHDP_MAX_STREAMS 4 + +#define DP_LINK_CAP_ENHANCED_FRAMING BIT(0) + +struct cdns_mhdp_link { + unsigned char revision; + unsigned int rate; + unsigned int num_lanes; + unsigned long capabilities; +}; + +struct cdns_mhdp_host { + unsigned int link_rate; + u8 lanes_cnt; + u8 volt_swing; + u8 pre_emphasis; + u8 pattern_supp; + u8 lane_mapping; + bool fast_link; + bool enhanced; + bool scrambler; + bool ssc; +}; + +struct cdns_mhdp_sink { + unsigned int link_rate; + u8 lanes_cnt; + u8 pattern_supp; + bool fast_link; + bool enhanced; + bool ssc; +}; + +struct cdns_mhdp_display_fmt { + u32 color_format; + u32 bpc; + bool y_only; +}; + +/* + * These enums present MHDP hw initialization state + * Legal state transitions are: + * MHDP_HW_READY <-> MHDP_HW_STOPPED + */ +enum mhdp_hw_state { + MHDP_HW_READY = 1, /* HW ready, FW active */ + MHDP_HW_STOPPED /* Driver removal FW to be stopped */ +}; + +struct cdns_mhdp_device; + +struct mhdp_platform_ops { + int (*init)(struct cdns_mhdp_device *mhdp); + void (*exit)(struct cdns_mhdp_device *mhdp); + void (*enable)(struct cdns_mhdp_device *mhdp); + void (*disable)(struct cdns_mhdp_device *mhdp); +}; + +struct cdns_mhdp_bridge_state { + struct drm_bridge_state base; + struct drm_display_mode *current_mode; +}; + +struct cdns_mhdp_platform_info { + const struct drm_bridge_timings *timings; + const struct mhdp_platform_ops *ops; +}; + +#define to_cdns_mhdp_bridge_state(s) \ + container_of(s, struct cdns_mhdp_bridge_state, base) + +struct cdns_mhdp_device { + void __iomem *regs; + void __iomem *j721e_regs; + + struct device *dev; + struct clk *clk; + struct phy *phy; + + const struct cdns_mhdp_platform_info *info; + + /* This is to protect mailbox communications with the firmware */ + struct mutex mbox_mutex; + + /* + * "link_mutex" protects the access to all the link parameters + * including the link training process. Link training will be + * invoked both from threaded interrupt handler and from atomic + * callbacks when link_up is not set. So this mutex protects + * flags such as link_up, bridge_enabled, link.num_lanes, + * link.rate etc. + */ + struct mutex link_mutex; + + struct drm_connector connector; + struct drm_bridge bridge; + + struct cdns_mhdp_link link; + struct drm_dp_aux aux; + + struct cdns_mhdp_host host; + struct cdns_mhdp_sink sink; + struct cdns_mhdp_display_fmt display_fmt; + u8 stream_id; + + bool link_up; + bool plugged; + + /* + * "start_lock" protects the access to bridge_attached and + * hw_state data members that control the delayed firmware + * loading and attaching the bridge. They are accessed from + * both the DRM core and cdns_mhdp_fw_cb(). In most cases just + * protecting the data members is enough, but the irq mask + * setting needs to be protected when enabling the FW. + */ + spinlock_t start_lock; + bool bridge_attached; + bool bridge_enabled; + enum mhdp_hw_state hw_state; + wait_queue_head_t fw_load_wq; + + /* Work struct to schedule a uevent on link train failure */ + struct work_struct modeset_retry_work; +}; + +#define connector_to_mhdp(x) container_of(x, struct cdns_mhdp_device, connector) +#define bridge_to_mhdp(x) container_of(x, struct cdns_mhdp_device, bridge) + +#endif diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c new file mode 100644 index 000000000000..dfe1b59514f7 --- /dev/null +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * TI j721e Cadence MHDP8546 DP wrapper + * + * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/ + * Author: Jyri Sarha <jsarha@ti.com> + */ + +#include <linux/io.h> +#include <linux/platform_device.h> + +#include "cdns-mhdp8546-j721e.h" + +#define REVISION 0x00 +#define DPTX_IPCFG 0x04 +#define ECC_MEM_CFG 0x08 +#define DPTX_DSC_CFG 0x0c +#define DPTX_SRC_CFG 0x10 +#define DPTX_VIF_SECURE_MODE_CFG 0x14 +#define DPTX_VIF_CONN_STATUS 0x18 +#define PHY_CLK_STATUS 0x1c + +#define DPTX_SRC_AIF_EN BIT(16) +#define DPTX_SRC_VIF_3_IN30B BIT(11) +#define DPTX_SRC_VIF_2_IN30B BIT(10) +#define DPTX_SRC_VIF_1_IN30B BIT(9) +#define DPTX_SRC_VIF_0_IN30B BIT(8) +#define DPTX_SRC_VIF_3_SEL_DPI5 BIT(7) +#define DPTX_SRC_VIF_3_SEL_DPI3 0 +#define DPTX_SRC_VIF_2_SEL_DPI4 BIT(6) +#define DPTX_SRC_VIF_2_SEL_DPI2 0 +#define DPTX_SRC_VIF_1_SEL_DPI3 BIT(5) +#define DPTX_SRC_VIF_1_SEL_DPI1 0 +#define DPTX_SRC_VIF_0_SEL_DPI2 BIT(4) +#define DPTX_SRC_VIF_0_SEL_DPI0 0 +#define DPTX_SRC_VIF_3_EN BIT(3) +#define DPTX_SRC_VIF_2_EN BIT(2) +#define DPTX_SRC_VIF_1_EN BIT(1) +#define DPTX_SRC_VIF_0_EN BIT(0) + +/* TODO turn DPTX_IPCFG fw_mem_clk_en at pm_runtime_suspend. */ + +static int cdns_mhdp_j721e_init(struct cdns_mhdp_device *mhdp) +{ + struct platform_device *pdev = to_platform_device(mhdp->dev); + + mhdp->j721e_regs = devm_platform_ioremap_resource(pdev, 1); + return PTR_ERR_OR_ZERO(mhdp->j721e_regs); +} + +static void cdns_mhdp_j721e_enable(struct cdns_mhdp_device *mhdp) +{ + /* + * Enable VIF_0 and select DPI2 as its input. DSS0 DPI0 is connected + * to eDP DPI2. This is the only supported SST configuration on + * J721E. + */ + writel(DPTX_SRC_VIF_0_EN | DPTX_SRC_VIF_0_SEL_DPI2, + mhdp->j721e_regs + DPTX_SRC_CFG); +} + +static void cdns_mhdp_j721e_disable(struct cdns_mhdp_device *mhdp) +{ + /* Put everything to defaults */ + writel(0, mhdp->j721e_regs + DPTX_DSC_CFG); +} + +const struct mhdp_platform_ops mhdp_ti_j721e_ops = { + .init = cdns_mhdp_j721e_init, + .enable = cdns_mhdp_j721e_enable, + .disable = cdns_mhdp_j721e_disable, +}; + +const struct drm_bridge_timings mhdp_ti_j721e_bridge_timings = { + .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE | + DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE | + DRM_BUS_FLAG_DE_HIGH, +}; diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h new file mode 100644 index 000000000000..97d20d115a24 --- /dev/null +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * TI j721e Cadence MHDP8546 DP wrapper + * + * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/ + * Author: Jyri Sarha <jsarha@ti.com> + */ + +#ifndef CDNS_MHDP8546_J721E_H +#define CDNS_MHDP8546_J721E_H + +#include "cdns-mhdp8546-core.h" + +struct mhdp_platform_ops; + +extern const struct mhdp_platform_ops mhdp_ti_j721e_ops; +extern const struct drm_bridge_timings mhdp_ti_j721e_bridge_timings; + +#endif /* !CDNS_MHDP8546_J721E_H */ diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c index f19d9f7a5db2..f52ccffc1bd1 100644 --- a/drivers/gpu/drm/bridge/lvds-codec.c +++ b/drivers/gpu/drm/bridge/lvds-codec.c @@ -10,13 +10,16 @@ #include <linux/of_device.h> #include <linux/of_graph.h> #include <linux/platform_device.h> +#include <linux/regulator/consumer.h> #include <drm/drm_bridge.h> #include <drm/drm_panel.h> struct lvds_codec { + struct device *dev; struct drm_bridge bridge; struct drm_bridge *panel_bridge; + struct regulator *vcc; struct gpio_desc *powerdown_gpio; u32 connector_type; }; @@ -38,6 +41,14 @@ static int lvds_codec_attach(struct drm_bridge *bridge, static void lvds_codec_enable(struct drm_bridge *bridge) { struct lvds_codec *lvds_codec = to_lvds_codec(bridge); + int ret; + + ret = regulator_enable(lvds_codec->vcc); + if (ret) { + dev_err(lvds_codec->dev, + "Failed to enable regulator \"vcc\": %d\n", ret); + return; + } if (lvds_codec->powerdown_gpio) gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 0); @@ -46,9 +57,15 @@ static void lvds_codec_enable(struct drm_bridge *bridge) static void lvds_codec_disable(struct drm_bridge *bridge) { struct lvds_codec *lvds_codec = to_lvds_codec(bridge); + int ret; if (lvds_codec->powerdown_gpio) gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 1); + + ret = regulator_disable(lvds_codec->vcc); + if (ret) + dev_err(lvds_codec->dev, + "Failed to disable regulator \"vcc\": %d\n", ret); } static const struct drm_bridge_funcs funcs = { @@ -63,12 +80,24 @@ static int lvds_codec_probe(struct platform_device *pdev) struct device_node *panel_node; struct drm_panel *panel; struct lvds_codec *lvds_codec; + int ret; lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL); if (!lvds_codec) return -ENOMEM; + lvds_codec->dev = &pdev->dev; lvds_codec->connector_type = (uintptr_t)of_device_get_match_data(dev); + + lvds_codec->vcc = devm_regulator_get(lvds_codec->dev, "power"); + if (IS_ERR(lvds_codec->vcc)) { + ret = PTR_ERR(lvds_codec->vcc); + if (ret != -EPROBE_DEFER) + dev_err(lvds_codec->dev, + "Unable to get \"vcc\" supply: %d\n", ret); + return ret; + } + lvds_codec->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH); if (IS_ERR(lvds_codec->powerdown_gpio)) diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c index 9f7b7a9c53c5..7bd0affa057a 100644 --- a/drivers/gpu/drm/bridge/parade-ps8640.c +++ b/drivers/gpu/drm/bridge/parade-ps8640.c @@ -65,6 +65,7 @@ struct ps8640 { struct regulator_bulk_data supplies[2]; struct gpio_desc *gpio_reset; struct gpio_desc *gpio_powerdown; + bool powered; }; static inline struct ps8640 *bridge_to_ps8640(struct drm_bridge *e) @@ -91,13 +92,15 @@ static int ps8640_bridge_vdo_control(struct ps8640 *ps_bridge, return 0; } -static void ps8640_pre_enable(struct drm_bridge *bridge) +static void ps8640_bridge_poweron(struct ps8640 *ps_bridge) { - struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); struct i2c_client *client = ps_bridge->page[PAGE2_TOP_CNTL]; unsigned long timeout; int ret, status; + if (ps_bridge->powered) + return; + ret = regulator_bulk_enable(ARRAY_SIZE(ps_bridge->supplies), ps_bridge->supplies); if (ret < 0) { @@ -152,10 +155,6 @@ static void ps8640_pre_enable(struct drm_bridge *bridge) goto err_regulators_disable; } - ret = ps8640_bridge_vdo_control(ps_bridge, ENABLE); - if (ret) - goto err_regulators_disable; - /* Switch access edp panel's edid through i2c */ ret = i2c_smbus_write_byte_data(client, PAGE2_I2C_BYPASS, I2C_BYPASS_EN); @@ -164,6 +163,8 @@ static void ps8640_pre_enable(struct drm_bridge *bridge) goto err_regulators_disable; } + ps_bridge->powered = true; + return; err_regulators_disable: @@ -171,12 +172,12 @@ err_regulators_disable: ps_bridge->supplies); } -static void ps8640_post_disable(struct drm_bridge *bridge) +static void ps8640_bridge_poweroff(struct ps8640 *ps_bridge) { - struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); int ret; - ps8640_bridge_vdo_control(ps_bridge, DISABLE); + if (!ps_bridge->powered) + return; gpiod_set_value(ps_bridge->gpio_reset, 1); gpiod_set_value(ps_bridge->gpio_powerdown, 1); @@ -184,6 +185,28 @@ static void ps8640_post_disable(struct drm_bridge *bridge) ps_bridge->supplies); if (ret < 0) DRM_ERROR("cannot disable regulators %d\n", ret); + + ps_bridge->powered = false; +} + +static void ps8640_pre_enable(struct drm_bridge *bridge) +{ + struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); + int ret; + + ps8640_bridge_poweron(ps_bridge); + + ret = ps8640_bridge_vdo_control(ps_bridge, ENABLE); + if (ret < 0) + ps8640_bridge_poweroff(ps_bridge); +} + +static void ps8640_post_disable(struct drm_bridge *bridge) +{ + struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); + + ps8640_bridge_vdo_control(ps_bridge, DISABLE); + ps8640_bridge_poweroff(ps_bridge); } static int ps8640_bridge_attach(struct drm_bridge *bridge, @@ -249,9 +272,34 @@ static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); + bool poweroff = !ps_bridge->powered; + struct edid *edid; + + /* + * When we end calling get_edid() triggered by an ioctl, i.e + * + * drm_mode_getconnector (ioctl) + * -> drm_helper_probe_single_connector_modes + * -> drm_bridge_connector_get_modes + * -> ps8640_bridge_get_edid + * + * We need to make sure that what we need is enabled before reading + * EDID, for this chip, we need to do a full poweron, otherwise it will + * fail. + */ + drm_bridge_chain_pre_enable(bridge); - return drm_get_edid(connector, + edid = drm_get_edid(connector, ps_bridge->page[PAGE0_DP_CNTL]->adapter); + + /* + * If we call the get_edid() function without having enabled the chip + * before, return the chip to its original power state. + */ + if (poweroff) + drm_bridge_chain_post_disable(bridge); + + return edid; } static const struct drm_bridge_funcs ps8640_bridge_funcs = { diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c index d580b2aa4ce9..6b268f9445b3 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c @@ -89,7 +89,9 @@ #define VID_MODE_TYPE_NON_BURST_SYNC_EVENTS 0x1 #define VID_MODE_TYPE_BURST 0x2 #define VID_MODE_TYPE_MASK 0x3 +#define ENABLE_LOW_POWER_CMD BIT(15) #define VID_MODE_VPG_ENABLE BIT(16) +#define VID_MODE_VPG_MODE BIT(20) #define VID_MODE_VPG_HORIZONTAL BIT(24) #define DSI_VID_PKT_SIZE 0x3c @@ -220,6 +222,21 @@ #define PHY_STATUS_TIMEOUT_US 10000 #define CMD_PKT_STATUS_TIMEOUT_US 20000 +#ifdef CONFIG_DEBUG_FS +#define VPG_DEFS(name, dsi) \ + ((void __force *)&((*dsi).vpg_defs.name)) + +#define REGISTER(name, mask, dsi) \ + { #name, VPG_DEFS(name, dsi), mask, dsi } + +struct debugfs_entries { + const char *name; + bool *reg; + u32 mask; + struct dw_mipi_dsi *dsi; +}; +#endif /* CONFIG_DEBUG_FS */ + struct dw_mipi_dsi { struct drm_bridge bridge; struct mipi_dsi_host dsi_host; @@ -237,9 +254,12 @@ struct dw_mipi_dsi { #ifdef CONFIG_DEBUG_FS struct dentry *debugfs; - - bool vpg; - bool vpg_horizontal; + struct debugfs_entries *debugfs_vpg; + struct { + bool vpg; + bool vpg_horizontal; + bool vpg_ber_pattern; + } vpg_defs; #endif /* CONFIG_DEBUG_FS */ struct dw_mipi_dsi *master; /* dual-dsi master ptr */ @@ -360,13 +380,28 @@ static void dw_mipi_message_config(struct dw_mipi_dsi *dsi, bool lpm = msg->flags & MIPI_DSI_MSG_USE_LPM; u32 val = 0; + /* + * TODO dw drv improvements + * largest packet sizes during hfp or during vsa/vpb/vfp + * should be computed according to byte lane, lane number and only + * if sending lp cmds in high speed is enable (PHY_TXREQUESTCLKHS) + */ + dsi_write(dsi, DSI_DPI_LP_CMD_TIM, OUTVACT_LPCMD_TIME(16) + | INVACT_LPCMD_TIME(4)); + if (msg->flags & MIPI_DSI_MSG_REQ_ACK) val |= ACK_RQST_EN; if (lpm) val |= CMD_MODE_ALL_LP; - dsi_write(dsi, DSI_LPCLK_CTRL, lpm ? 0 : PHY_TXREQUESTCLKHS); dsi_write(dsi, DSI_CMD_MODE_CFG, val); + + val = dsi_read(dsi, DSI_VID_MODE_CFG); + if (lpm) + val |= ENABLE_LOW_POWER_CMD; + else + val &= ~ENABLE_LOW_POWER_CMD; + dsi_write(dsi, DSI_VID_MODE_CFG, val); } static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 hdr_val) @@ -529,9 +564,11 @@ static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi) val |= VID_MODE_TYPE_NON_BURST_SYNC_EVENTS; #ifdef CONFIG_DEBUG_FS - if (dsi->vpg) { + if (dsi->vpg_defs.vpg) { val |= VID_MODE_VPG_ENABLE; - val |= dsi->vpg_horizontal ? VID_MODE_VPG_HORIZONTAL : 0; + val |= dsi->vpg_defs.vpg_horizontal ? + VID_MODE_VPG_HORIZONTAL : 0; + val |= dsi->vpg_defs.vpg_ber_pattern ? VID_MODE_VPG_MODE : 0; } #endif /* CONFIG_DEBUG_FS */ @@ -541,16 +578,22 @@ static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi) static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi, unsigned long mode_flags) { + u32 val; + dsi_write(dsi, DSI_PWR_UP, RESET); if (mode_flags & MIPI_DSI_MODE_VIDEO) { dsi_write(dsi, DSI_MODE_CFG, ENABLE_VIDEO_MODE); dw_mipi_dsi_video_mode_config(dsi); - dsi_write(dsi, DSI_LPCLK_CTRL, PHY_TXREQUESTCLKHS); } else { dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE); } + val = PHY_TXREQUESTCLKHS; + if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) + val |= AUTO_CLKLANE_CTRL; + dsi_write(dsi, DSI_LPCLK_CTRL, val); + dsi_write(dsi, DSI_PWR_UP, POWERUP); } @@ -562,15 +605,30 @@ static void dw_mipi_dsi_disable(struct dw_mipi_dsi *dsi) static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi) { + const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops; + unsigned int esc_rate; /* in MHz */ + u32 esc_clk_division; + int ret; + /* * The maximum permitted escape clock is 20MHz and it is derived from - * lanebyteclk, which is running at "lane_mbps / 8". Thus we want: - * - * (lane_mbps >> 3) / esc_clk_division < 20 + * lanebyteclk, which is running at "lane_mbps / 8". + */ + if (phy_ops->get_esc_clk_rate) { + ret = phy_ops->get_esc_clk_rate(dsi->plat_data->priv_data, + &esc_rate); + if (ret) + DRM_DEBUG_DRIVER("Phy get_esc_clk_rate() failed\n"); + } else + esc_rate = 20; /* Default to 20MHz */ + + /* + * We want : + * (lane_mbps >> 3) / esc_clk_division < X * which is: - * (lane_mbps >> 3) / 20 > esc_clk_division + * (lane_mbps >> 3) / X > esc_clk_division */ - u32 esc_clk_division = (dsi->lane_mbps >> 3) / 20 + 1; + esc_clk_division = (dsi->lane_mbps >> 3) / esc_rate + 1; dsi_write(dsi, DSI_PWR_UP, RESET); @@ -611,14 +669,6 @@ static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi, dsi_write(dsi, DSI_DPI_VCID, DPI_VCID(dsi->channel)); dsi_write(dsi, DSI_DPI_COLOR_CODING, color); dsi_write(dsi, DSI_DPI_CFG_POL, val); - /* - * TODO dw drv improvements - * largest packet sizes during hfp or during vsa/vpb/vfp - * should be computed according to byte lane, lane number and only - * if sending lp cmds in high speed is enable (PHY_TXREQUESTCLKHS) - */ - dsi_write(dsi, DSI_DPI_LP_CMD_TIM, OUTVACT_LPCMD_TIME(4) - | INVACT_LPCMD_TIME(4)); } static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi) @@ -964,6 +1014,66 @@ static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = { #ifdef CONFIG_DEBUG_FS +static int dw_mipi_dsi_debugfs_write(void *data, u64 val) +{ + struct debugfs_entries *vpg = data; + struct dw_mipi_dsi *dsi; + u32 mode_cfg; + + if (!vpg) + return -ENODEV; + + dsi = vpg->dsi; + + *vpg->reg = (bool)val; + + mode_cfg = dsi_read(dsi, DSI_VID_MODE_CFG); + + if (*vpg->reg) + mode_cfg |= vpg->mask; + else + mode_cfg &= ~vpg->mask; + + dsi_write(dsi, DSI_VID_MODE_CFG, mode_cfg); + + return 0; +} + +static int dw_mipi_dsi_debugfs_show(void *data, u64 *val) +{ + struct debugfs_entries *vpg = data; + + if (!vpg) + return -ENODEV; + + *val = *vpg->reg; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_mipi_dsi_debugfs_show, + dw_mipi_dsi_debugfs_write, "%llu\n"); + +static void debugfs_create_files(void *data) +{ + struct dw_mipi_dsi *dsi = data; + struct debugfs_entries debugfs[] = { + REGISTER(vpg, VID_MODE_VPG_ENABLE, dsi), + REGISTER(vpg_horizontal, VID_MODE_VPG_HORIZONTAL, dsi), + REGISTER(vpg_ber_pattern, VID_MODE_VPG_MODE, dsi), + }; + int i; + + dsi->debugfs_vpg = kmemdup(debugfs, sizeof(debugfs), GFP_KERNEL); + if (!dsi->debugfs_vpg) + return; + + for (i = 0; i < ARRAY_SIZE(debugfs); i++) + debugfs_create_file(dsi->debugfs_vpg[i].name, 0644, + dsi->debugfs, &dsi->debugfs_vpg[i], + &fops_x32); +} + static void dw_mipi_dsi_debugfs_init(struct dw_mipi_dsi *dsi) { dsi->debugfs = debugfs_create_dir(dev_name(dsi->dev), NULL); @@ -972,14 +1082,13 @@ static void dw_mipi_dsi_debugfs_init(struct dw_mipi_dsi *dsi) return; } - debugfs_create_bool("vpg", 0660, dsi->debugfs, &dsi->vpg); - debugfs_create_bool("vpg_horizontal", 0660, dsi->debugfs, - &dsi->vpg_horizontal); + debugfs_create_files(dsi); } static void dw_mipi_dsi_debugfs_remove(struct dw_mipi_dsi *dsi) { debugfs_remove_recursive(dsi->debugfs); + kfree(dsi->debugfs_vpg); } #else diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c index d951cdc58297..2272adcc5b4a 100644 --- a/drivers/gpu/drm/bridge/tc358775.c +++ b/drivers/gpu/drm/bridge/tc358775.c @@ -485,7 +485,7 @@ static void tc_bridge_enable(struct drm_bridge *bridge) val |= TC358775_LVCFG_PCLKDIV(DIVIDE_BY_6); } else { val |= TC358775_LVCFG_PCLKDIV(DIVIDE_BY_3); - }; + } d2l_write(tc->i2c, LVCFG, val); } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 9e1ad493e689..f9170b4b22e7 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1115,9 +1115,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) * @old_state: atomic state object with old state structures * * This function updates all the various legacy modeset state pointers in - * connectors, encoders and CRTCs. It also updates the timestamping constants - * used for precise vblank timestamps by calling - * drm_calc_timestamping_constants(). + * connectors, encoders and CRTCs. * * Drivers can use this for building their own atomic commit if they don't have * a pure helper-based modeset implementation. @@ -1186,13 +1184,30 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, crtc->x = new_plane_state->src_x >> 16; crtc->y = new_plane_state->src_y >> 16; } + } +} +EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state); +/** + * drm_atomic_helper_calc_timestamping_constants - update vblank timestamping constants + * @state: atomic state object + * + * Updates the timestamping constants used for precise vblank timestamps + * by calling drm_calc_timestamping_constants() for all enabled crtcs in @state. + */ +void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state) +{ + struct drm_crtc_state *new_crtc_state; + struct drm_crtc *crtc; + int i; + + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { if (new_crtc_state->enable) drm_calc_timestamping_constants(crtc, &new_crtc_state->adjusted_mode); } } -EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state); +EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants); static void crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) @@ -1276,6 +1291,7 @@ void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, disable_outputs(dev, old_state); drm_atomic_helper_update_legacy_modeset_state(dev, old_state); + drm_atomic_helper_calc_timestamping_constants(old_state); crtc_set_mode(dev, old_state); } diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 03e01b000f7a..0fe3c496002a 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -127,7 +127,7 @@ drm_clflush_sg(struct sg_table *st) struct sg_page_iter sg_iter; mb(); /*CLFLUSH is ordered only by using memory barriers*/ - for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) + for_each_sgtable_page(st, &sg_iter, 0) drm_clflush_page(sg_page_iter_page(&sg_iter)); mb(); /*Make sure that all cache line entry is flushed*/ diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 3d48ad1c3682..717c4e7271b0 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -2289,7 +2289,7 @@ static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *conne static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode, - const struct list_head *export_list, + const struct list_head *modes, const struct drm_file *file_priv) { /* @@ -2305,15 +2305,17 @@ drm_mode_expose_to_userspace(const struct drm_display_mode *mode, * while preparing the list of user-modes. */ if (!file_priv->aspect_ratio_allowed) { - struct drm_display_mode *mode_itr; + const struct drm_display_mode *mode_itr; - list_for_each_entry(mode_itr, export_list, export_head) - if (drm_mode_match(mode_itr, mode, + list_for_each_entry(mode_itr, modes, head) { + if (mode_itr->expose_to_userspace && + drm_mode_match(mode_itr, mode, DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_CLOCK | DRM_MODE_MATCH_FLAGS | DRM_MODE_MATCH_3D_FLAGS)) return false; + } } return true; @@ -2333,7 +2335,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, struct drm_mode_modeinfo u_mode; struct drm_mode_modeinfo __user *mode_ptr; uint32_t __user *encoder_ptr; - LIST_HEAD(export_list); if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; @@ -2377,25 +2378,30 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, out_resp->connection = connector->status; /* delayed so we get modes regardless of pre-fill_modes state */ - list_for_each_entry(mode, &connector->modes, head) - if (drm_mode_expose_to_userspace(mode, &export_list, + list_for_each_entry(mode, &connector->modes, head) { + WARN_ON(mode->expose_to_userspace); + + if (drm_mode_expose_to_userspace(mode, &connector->modes, file_priv)) { - list_add_tail(&mode->export_head, &export_list); + mode->expose_to_userspace = true; mode_count++; } + } /* * This ioctl is called twice, once to determine how much space is * needed, and the 2nd time to fill it. - * The modes that need to be exposed to the user are maintained in the - * 'export_list'. When the ioctl is called first time to determine the, - * space, the export_list gets filled, to find the no.of modes. In the - * 2nd time, the user modes are filled, one by one from the export_list. */ if ((out_resp->count_modes >= mode_count) && mode_count) { copied = 0; mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr; - list_for_each_entry(mode, &export_list, export_head) { + list_for_each_entry(mode, &connector->modes, head) { + if (!mode->expose_to_userspace) + continue; + + /* Clear the tag for the next time around */ + mode->expose_to_userspace = false; + drm_mode_convert_to_umode(&u_mode, mode); /* * Reset aspect ratio flags of user-mode, if modes with @@ -2406,13 +2412,26 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, if (copy_to_user(mode_ptr + copied, &u_mode, sizeof(u_mode))) { ret = -EFAULT; + + /* + * Clear the tag for the rest of + * the modes for the next time around. + */ + list_for_each_entry_continue(mode, &connector->modes, head) + mode->expose_to_userspace = false; + mutex_unlock(&dev->mode_config.mutex); goto out; } copied++; } + } else { + /* Clear the tag for the next time around */ + list_for_each_entry(mode, &connector->modes, head) + mode->expose_to_userspace = false; } + out_resp->count_modes = mode_count; mutex_unlock(&dev->mode_config.mutex); diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c index 5d67a41f7c3a..3dd70d813f69 100644 --- a/drivers/gpu/drm/drm_debugfs_crc.c +++ b/drivers/gpu/drm/drm_debugfs_crc.c @@ -144,8 +144,10 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf, source[len - 1] = '\0'; ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt); - if (ret) + if (ret) { + kfree(source); return ret; + } spin_lock_irq(&crc->lock); diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 1e7c638873c8..90807a6b415c 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -363,6 +363,66 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, } EXPORT_SYMBOL(drm_dp_dpcd_read_link_status); +static bool is_edid_digital_input_dp(const struct edid *edid) +{ + return edid && edid->revision >= 4 && + edid->input & DRM_EDID_INPUT_DIGITAL && + (edid->input & DRM_EDID_DIGITAL_TYPE_MASK) == DRM_EDID_DIGITAL_TYPE_DP; +} + +/** + * drm_dp_downstream_is_type() - is the downstream facing port of certain type? + * @dpcd: DisplayPort configuration data + * @port_cap: port capabilities + * + * Caveat: Only works with DPCD 1.1+ port caps. + * + * Returns: whether the downstream facing port matches the type. + */ +bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], u8 type) +{ + return drm_dp_is_branch(dpcd) && + dpcd[DP_DPCD_REV] >= 0x11 && + (port_cap[0] & DP_DS_PORT_TYPE_MASK) == type; +} +EXPORT_SYMBOL(drm_dp_downstream_is_type); + +/** + * drm_dp_downstream_is_tmds() - is the downstream facing port TMDS? + * @dpcd: DisplayPort configuration data + * @port_cap: port capabilities + * @edid: EDID + * + * Returns: whether the downstream facing port is TMDS (HDMI/DVI). + */ +bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid) +{ + if (dpcd[DP_DPCD_REV] < 0x11) { + switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) { + case DP_DWN_STRM_PORT_TYPE_TMDS: + return true; + default: + return false; + } + } + + switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { + case DP_DS_PORT_TYPE_DP_DUALMODE: + if (is_edid_digital_input_dp(edid)) + return false; + fallthrough; + case DP_DS_PORT_TYPE_DVI: + case DP_DS_PORT_TYPE_HDMI: + return true; + default: + return false; + } +} +EXPORT_SYMBOL(drm_dp_downstream_is_tmds); + /** * drm_dp_send_real_edid_checksum() - send back real edid checksum value * @aux: DisplayPort AUX channel @@ -545,79 +605,191 @@ int drm_dp_read_downstream_info(struct drm_dp_aux *aux, ret = drm_dp_dpcd_read(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len); if (ret < 0) return ret; + if (ret != len) + return -EIO; + + DRM_DEBUG_KMS("%s: DPCD DFP: %*ph\n", + aux->name, len, downstream_ports); - return ret == len ? 0 : -EIO; + return 0; } EXPORT_SYMBOL(drm_dp_read_downstream_info); /** - * drm_dp_downstream_max_clock() - extract branch device max - * pixel rate for legacy VGA - * converter or max TMDS clock - * rate for others + * drm_dp_downstream_max_dotclock() - extract downstream facing port max dot clock * @dpcd: DisplayPort configuration data * @port_cap: port capabilities * - * See also: - * drm_dp_read_downstream_info() - * drm_dp_downstream_max_bpc() - * - * Returns: Max clock in kHz on success or 0 if max clock not defined + * Returns: Downstream facing port max dot clock in kHz on success, + * or 0 if max clock not defined */ -int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4]) +int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]) { - int type = port_cap[0] & DP_DS_PORT_TYPE_MASK; - bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] & - DP_DETAILED_CAP_INFO_AVAILABLE; + if (!drm_dp_is_branch(dpcd)) + return 0; - if (!detailed_cap_info) + if (dpcd[DP_DPCD_REV] < 0x11) return 0; - switch (type) { + switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { case DP_DS_PORT_TYPE_VGA: - return port_cap[1] * 8 * 1000; - case DP_DS_PORT_TYPE_DVI: - case DP_DS_PORT_TYPE_HDMI: + if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0) + return 0; + return port_cap[1] * 8000; + default: + return 0; + } +} +EXPORT_SYMBOL(drm_dp_downstream_max_dotclock); + +/** + * drm_dp_downstream_max_tmds_clock() - extract downstream facing port max TMDS clock + * @dpcd: DisplayPort configuration data + * @port_cap: port capabilities + * @edid: EDID + * + * Returns: HDMI/DVI downstream facing port max TMDS clock in kHz on success, + * or 0 if max TMDS clock not defined + */ +int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid) +{ + if (!drm_dp_is_branch(dpcd)) + return 0; + + if (dpcd[DP_DPCD_REV] < 0x11) { + switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) { + case DP_DWN_STRM_PORT_TYPE_TMDS: + return 165000; + default: + return 0; + } + } + + switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { case DP_DS_PORT_TYPE_DP_DUALMODE: + if (is_edid_digital_input_dp(edid)) + return 0; + /* + * It's left up to the driver to check the + * DP dual mode adapter's max TMDS clock. + * + * Unfortunatley it looks like branch devices + * may not fordward that the DP dual mode i2c + * access so we just usually get i2c nak :( + */ + fallthrough; + case DP_DS_PORT_TYPE_HDMI: + /* + * We should perhaps assume 165 MHz when detailed cap + * info is not available. But looks like many typical + * branch devices fall into that category and so we'd + * probably end up with users complaining that they can't + * get high resolution modes with their favorite dongle. + * + * So let's limit to 300 MHz instead since DPCD 1.4 + * HDMI 2.0 DFPs are required to have the detailed cap + * info. So it's more likely we're dealing with a HDMI 1.4 + * compatible* device here. + */ + if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0) + return 300000; + return port_cap[1] * 2500; + case DP_DS_PORT_TYPE_DVI: + if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0) + return 165000; + /* FIXME what to do about DVI dual link? */ return port_cap[1] * 2500; default: return 0; } } -EXPORT_SYMBOL(drm_dp_downstream_max_clock); +EXPORT_SYMBOL(drm_dp_downstream_max_tmds_clock); /** - * drm_dp_downstream_max_bpc() - extract branch device max - * bits per component + * drm_dp_downstream_min_tmds_clock() - extract downstream facing port min TMDS clock * @dpcd: DisplayPort configuration data * @port_cap: port capabilities + * @edid: EDID * - * See also: - * drm_dp_read_downstream_info() - * drm_dp_downstream_max_clock() + * Returns: HDMI/DVI downstream facing port min TMDS clock in kHz on success, + * or 0 if max TMDS clock not defined + */ +int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid) +{ + if (!drm_dp_is_branch(dpcd)) + return 0; + + if (dpcd[DP_DPCD_REV] < 0x11) { + switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) { + case DP_DWN_STRM_PORT_TYPE_TMDS: + return 25000; + default: + return 0; + } + } + + switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { + case DP_DS_PORT_TYPE_DP_DUALMODE: + if (is_edid_digital_input_dp(edid)) + return 0; + fallthrough; + case DP_DS_PORT_TYPE_DVI: + case DP_DS_PORT_TYPE_HDMI: + /* + * Unclear whether the protocol converter could + * utilize pixel replication. Assume it won't. + */ + return 25000; + default: + return 0; + } +} +EXPORT_SYMBOL(drm_dp_downstream_min_tmds_clock); + +/** + * drm_dp_downstream_max_bpc() - extract downstream facing port max + * bits per component + * @dpcd: DisplayPort configuration data + * @port_cap: downstream facing port capabilities + * @edid: EDID * * Returns: Max bpc on success or 0 if max bpc not defined */ int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4]) + const u8 port_cap[4], + const struct edid *edid) { - int type = port_cap[0] & DP_DS_PORT_TYPE_MASK; - bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] & - DP_DETAILED_CAP_INFO_AVAILABLE; - int bpc; - - if (!detailed_cap_info) + if (!drm_dp_is_branch(dpcd)) return 0; - switch (type) { - case DP_DS_PORT_TYPE_VGA: - case DP_DS_PORT_TYPE_DVI: - case DP_DS_PORT_TYPE_HDMI: + if (dpcd[DP_DPCD_REV] < 0x11) { + switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) { + case DP_DWN_STRM_PORT_TYPE_DP: + return 0; + default: + return 8; + } + } + + switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { + case DP_DS_PORT_TYPE_DP: + return 0; case DP_DS_PORT_TYPE_DP_DUALMODE: - bpc = port_cap[2] & DP_DS_MAX_BPC_MASK; + if (is_edid_digital_input_dp(edid)) + return 0; + fallthrough; + case DP_DS_PORT_TYPE_HDMI: + case DP_DS_PORT_TYPE_DVI: + case DP_DS_PORT_TYPE_VGA: + if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0) + return 8; - switch (bpc) { + switch (port_cap[2] & DP_DS_MAX_BPC_MASK) { case DP_DS_8BPC: return 8; case DP_DS_10BPC: @@ -626,15 +798,131 @@ int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], return 12; case DP_DS_16BPC: return 16; + default: + return 8; } - fallthrough; + break; default: - return 0; + return 8; } } EXPORT_SYMBOL(drm_dp_downstream_max_bpc); /** + * drm_dp_downstream_420_passthrough() - determine downstream facing port + * YCbCr 4:2:0 pass-through capability + * @dpcd: DisplayPort configuration data + * @port_cap: downstream facing port capabilities + * + * Returns: whether the downstream facing port can pass through YCbCr 4:2:0 + */ +bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]) +{ + if (!drm_dp_is_branch(dpcd)) + return false; + + if (dpcd[DP_DPCD_REV] < 0x13) + return false; + + switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { + case DP_DS_PORT_TYPE_DP: + return true; + case DP_DS_PORT_TYPE_HDMI: + if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0) + return false; + + return port_cap[3] & DP_DS_HDMI_YCBCR420_PASS_THROUGH; + default: + return false; + } +} +EXPORT_SYMBOL(drm_dp_downstream_420_passthrough); + +/** + * drm_dp_downstream_444_to_420_conversion() - determine downstream facing port + * YCbCr 4:4:4->4:2:0 conversion capability + * @dpcd: DisplayPort configuration data + * @port_cap: downstream facing port capabilities + * + * Returns: whether the downstream facing port can convert YCbCr 4:4:4 to 4:2:0 + */ +bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]) +{ + if (!drm_dp_is_branch(dpcd)) + return false; + + if (dpcd[DP_DPCD_REV] < 0x13) + return false; + + switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { + case DP_DS_PORT_TYPE_HDMI: + if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0) + return false; + + return port_cap[3] & DP_DS_HDMI_YCBCR444_TO_420_CONV; + default: + return false; + } +} +EXPORT_SYMBOL(drm_dp_downstream_444_to_420_conversion); + +/** + * drm_dp_downstream_mode() - return a mode for downstream facing port + * @dpcd: DisplayPort configuration data + * @port_cap: port capabilities + * + * Provides a suitable mode for downstream facing ports without EDID. + * + * Returns: A new drm_display_mode on success or NULL on failure + */ +struct drm_display_mode * +drm_dp_downstream_mode(struct drm_device *dev, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]) + +{ + u8 vic; + + if (!drm_dp_is_branch(dpcd)) + return NULL; + + if (dpcd[DP_DPCD_REV] < 0x11) + return NULL; + + switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { + case DP_DS_PORT_TYPE_NON_EDID: + switch (port_cap[0] & DP_DS_NON_EDID_MASK) { + case DP_DS_NON_EDID_720x480i_60: + vic = 6; + break; + case DP_DS_NON_EDID_720x480i_50: + vic = 21; + break; + case DP_DS_NON_EDID_1920x1080i_60: + vic = 5; + break; + case DP_DS_NON_EDID_1920x1080i_50: + vic = 20; + break; + case DP_DS_NON_EDID_1280x720_60: + vic = 4; + break; + case DP_DS_NON_EDID_1280x720_50: + vic = 19; + break; + default: + return NULL; + } + return drm_display_mode_from_cea_vic(dev, vic); + default: + return NULL; + } +} +EXPORT_SYMBOL(drm_dp_downstream_mode); + +/** * drm_dp_downstream_id() - identify branch device * @aux: DisplayPort AUX channel * @id: DisplayPort branch device id @@ -652,12 +940,15 @@ EXPORT_SYMBOL(drm_dp_downstream_id); * @m: pointer for debugfs file * @dpcd: DisplayPort configuration data * @port_cap: port capabilities + * @edid: EDID * @aux: DisplayPort AUX channel * */ void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4], struct drm_dp_aux *aux) + const u8 port_cap[4], + const struct edid *edid, + struct drm_dp_aux *aux) { bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE; @@ -715,16 +1006,19 @@ void drm_dp_downstream_debug(struct seq_file *m, seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]); if (detailed_cap_info) { - clk = drm_dp_downstream_max_clock(dpcd, port_cap); + clk = drm_dp_downstream_max_dotclock(dpcd, port_cap); + if (clk > 0) + seq_printf(m, "\t\tMax dot clock: %d kHz\n", clk); - if (clk > 0) { - if (type == DP_DS_PORT_TYPE_VGA) - seq_printf(m, "\t\tMax dot clock: %d kHz\n", clk); - else - seq_printf(m, "\t\tMax TMDS clock: %d kHz\n", clk); - } + clk = drm_dp_downstream_max_tmds_clock(dpcd, port_cap, edid); + if (clk > 0) + seq_printf(m, "\t\tMax TMDS clock: %d kHz\n", clk); + + clk = drm_dp_downstream_min_tmds_clock(dpcd, port_cap, edid); + if (clk > 0) + seq_printf(m, "\t\tMin TMDS clock: %d kHz\n", clk); - bpc = drm_dp_downstream_max_bpc(dpcd, port_cap); + bpc = drm_dp_downstream_max_bpc(dpcd, port_cap, edid); if (bpc > 0) seq_printf(m, "\t\tMax bpc: %d\n", bpc); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 17dbed0a9800..e87542533640 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -20,11 +20,13 @@ * OF THIS SOFTWARE. */ +#include <linux/bitfield.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/random.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/iopoll.h> @@ -423,6 +425,22 @@ drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req, memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes); idx += req->u.i2c_write.num_bytes; break; + case DP_QUERY_STREAM_ENC_STATUS: { + const struct drm_dp_query_stream_enc_status *msg; + + msg = &req->u.enc_status; + buf[idx] = msg->stream_id; + idx++; + memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id)); + idx += sizeof(msg->client_id); + buf[idx] = 0; + buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event); + buf[idx] |= msg->valid_stream_event ? BIT(2) : 0; + buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior); + buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0; + idx++; + } + break; } raw->cur_len = idx; } @@ -551,6 +569,20 @@ drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw, return -ENOMEM; } break; + case DP_QUERY_STREAM_ENC_STATUS: + req->u.enc_status.stream_id = buf[idx++]; + for (i = 0; i < sizeof(req->u.enc_status.client_id); i++) + req->u.enc_status.client_id[i] = buf[idx++]; + + req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0), + buf[idx]); + req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2), + buf[idx]); + req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3), + buf[idx]); + req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5), + buf[idx]); + break; } return 0; @@ -629,6 +661,16 @@ drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes, req->u.i2c_write.bytes); break; + case DP_QUERY_STREAM_ENC_STATUS: + P("stream_id=%u client_id=%*ph stream_event=%x " + "valid_event=%d stream_behavior=%x valid_behavior=%d", + req->u.enc_status.stream_id, + (int)ARRAY_SIZE(req->u.enc_status.client_id), + req->u.enc_status.client_id, req->u.enc_status.stream_event, + req->u.enc_status.valid_stream_event, + req->u.enc_status.stream_behavior, + req->u.enc_status.valid_stream_behavior); + break; default: P("???\n"); break; @@ -936,6 +978,42 @@ static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_ms return true; } +static bool +drm_dp_sideband_parse_query_stream_enc_status( + struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + struct drm_dp_query_stream_enc_status_ack_reply *reply; + + reply = &repmsg->u.enc_status; + + reply->stream_id = raw->msg[3]; + + reply->reply_signed = raw->msg[2] & BIT(0); + + /* + * NOTE: It's my impression from reading the spec that the below parsing + * is correct. However I noticed while testing with an HDCP 1.4 display + * through an HDCP 2.2 hub that only bit 3 was set. In that case, I + * would expect both bits to be set. So keep the parsing following the + * spec, but beware reality might not match the spec (at least for some + * configurations). + */ + reply->hdcp_1x_device_present = raw->msg[2] & BIT(4); + reply->hdcp_2x_device_present = raw->msg[2] & BIT(3); + + reply->query_capable_device_present = raw->msg[2] & BIT(5); + reply->legacy_device_present = raw->msg[2] & BIT(6); + reply->unauthorizable_device_present = raw->msg[2] & BIT(7); + + reply->auth_completed = !!(raw->msg[1] & BIT(3)); + reply->encryption_enabled = !!(raw->msg[1] & BIT(4)); + reply->repeater_present = !!(raw->msg[1] & BIT(5)); + reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6; + + return true; +} + static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, struct drm_dp_sideband_msg_reply_body *msg) { @@ -961,6 +1039,8 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, return drm_dp_sideband_parse_remote_dpcd_write(raw, msg); case DP_REMOTE_I2C_READ: return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg); + case DP_REMOTE_I2C_WRITE: + return true; /* since there's nothing to parse */ case DP_ENUM_PATH_RESOURCES: return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg); case DP_ALLOCATE_PAYLOAD: @@ -970,6 +1050,8 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg); case DP_CLEAR_PAYLOAD_ID_TABLE: return true; /* since there's nothing to parse */ + case DP_QUERY_STREAM_ENC_STATUS: + return drm_dp_sideband_parse_query_stream_enc_status(raw, msg); default: DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type, drm_dp_mst_req_type_str(msg->req_type)); @@ -1121,6 +1203,25 @@ static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg, msg->path_msg = true; } +static int +build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id, + u8 *q_id) +{ + struct drm_dp_sideband_msg_req_body req; + + req.req_type = DP_QUERY_STREAM_ENC_STATUS; + req.u.enc_status.stream_id = stream_id; + memcpy(req.u.enc_status.client_id, q_id, + sizeof(req.u.enc_status.client_id)); + req.u.enc_status.stream_event = 0; + req.u.enc_status.valid_stream_event = false; + req.u.enc_status.stream_behavior = 0; + req.u.enc_status.valid_stream_behavior = false; + + drm_dp_encode_sideband_req(&req, msg); + return 0; +} + static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_vcpi *vcpi) { @@ -3153,6 +3254,57 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, } EXPORT_SYMBOL(drm_dp_send_power_updown_phy); +int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + struct drm_dp_query_stream_enc_status_ack_reply *status) +{ + struct drm_dp_sideband_msg_tx *txmsg; + u8 nonce[7]; + int len, ret; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) + return -ENOMEM; + + port = drm_dp_mst_topology_get_port_validated(mgr, port); + if (!port) { + ret = -EINVAL; + goto out_get_port; + } + + get_random_bytes(nonce, sizeof(nonce)); + + /* + * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message + * transaction at the MST Branch device directly connected to the + * Source" + */ + txmsg->dst = mgr->mst_primary; + + len = build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce); + + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg); + if (ret < 0) { + goto out; + } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { + drm_dbg_kms(mgr->dev, "query encryption status nak received\n"); + ret = -ENXIO; + goto out; + } + + ret = 0; + memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status)); + +out: + drm_dp_mst_topology_put_port(port); +out_get_port: + kfree(txmsg); + return ret; +} +EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status); + static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr, int id, struct drm_dp_payload *payload) @@ -5349,29 +5501,29 @@ static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num) msgs[num - 1].len <= 0xff; } -/* I2C device */ -static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, - int num) +static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num) +{ + int i; + + for (i = 0; i < num - 1; i++) { + if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) || + msgs[i].len > 0xff) + return false; + } + + return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff; +} + +static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *port, + struct i2c_msg *msgs, int num) { - struct drm_dp_aux *aux = adapter->algo_data; - struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux); - struct drm_dp_mst_branch *mstb; struct drm_dp_mst_topology_mgr *mgr = port->mgr; unsigned int i; struct drm_dp_sideband_msg_req_body msg; struct drm_dp_sideband_msg_tx *txmsg = NULL; int ret; - mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); - if (!mstb) - return -EREMOTEIO; - - if (!remote_i2c_read_ok(msgs, num)) { - DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); - ret = -EIO; - goto out; - } - memset(&msg, 0, sizeof(msg)); msg.req_type = DP_REMOTE_I2C_READ; msg.u.i2c_read.num_transactions = num - 1; @@ -5412,6 +5564,78 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs } out: kfree(txmsg); + return ret; +} + +static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *port, + struct i2c_msg *msgs, int num) +{ + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + unsigned int i; + struct drm_dp_sideband_msg_req_body msg; + struct drm_dp_sideband_msg_tx *txmsg = NULL; + int ret; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) { + ret = -ENOMEM; + goto out; + } + for (i = 0; i < num; i++) { + memset(&msg, 0, sizeof(msg)); + msg.req_type = DP_REMOTE_I2C_WRITE; + msg.u.i2c_write.port_number = port->port_num; + msg.u.i2c_write.write_i2c_device_id = msgs[i].addr; + msg.u.i2c_write.num_bytes = msgs[i].len; + msg.u.i2c_write.bytes = msgs[i].buf; + + memset(txmsg, 0, sizeof(*txmsg)); + txmsg->dst = mstb; + + drm_dp_encode_sideband_req(&msg, txmsg); + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); + if (ret > 0) { + if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { + ret = -EREMOTEIO; + goto out; + } + } else { + goto out; + } + } + ret = num; +out: + kfree(txmsg); + return ret; +} + +/* I2C device */ +static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, int num) +{ + struct drm_dp_aux *aux = adapter->algo_data; + struct drm_dp_mst_port *port = + container_of(aux, struct drm_dp_mst_port, aux); + struct drm_dp_mst_branch *mstb; + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + int ret; + + mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); + if (!mstb) + return -EREMOTEIO; + + if (remote_i2c_read_ok(msgs, num)) { + ret = drm_dp_mst_i2c_read(mstb, port, msgs, num); + } else if (remote_i2c_write_ok(msgs, num)) { + ret = drm_dp_mst_i2c_write(mstb, port, msgs, num); + } else { + DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); + ret = -EIO; + } + drm_dp_mst_topology_put_mstb(mstb); return ret; } diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 13068fdf4331..cd162d406078 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -240,13 +240,13 @@ void drm_minor_release(struct drm_minor *minor) * DOC: driver instance overview * * A device instance for a drm driver is represented by &struct drm_device. This - * is initialized with drm_dev_init(), usually from bus-specific ->probe() - * callbacks implemented by the driver. The driver then needs to initialize all - * the various subsystems for the drm device like memory management, vblank - * handling, modesetting support and intial output configuration plus obviously - * initialize all the corresponding hardware bits. Finally when everything is up - * and running and ready for userspace the device instance can be published - * using drm_dev_register(). + * is allocated and initialized with devm_drm_dev_alloc(), usually from + * bus-specific ->probe() callbacks implemented by the driver. The driver then + * needs to initialize all the various subsystems for the drm device like memory + * management, vblank handling, modesetting support and initial output + * configuration plus obviously initialize all the corresponding hardware bits. + * Finally when everything is up and running and ready for userspace the device + * instance can be published using drm_dev_register(). * * There is also deprecated support for initalizing device instances using * bus-specific helpers and the &drm_driver.load callback. But due to @@ -274,7 +274,7 @@ void drm_minor_release(struct drm_minor *minor) * * The following example shows a typical structure of a DRM display driver. * The example focus on the probe() function and the other functions that is - * almost always present and serves as a demonstration of devm_drm_dev_init(). + * almost always present and serves as a demonstration of devm_drm_dev_alloc(). * * .. code-block:: c * @@ -294,22 +294,12 @@ void drm_minor_release(struct drm_minor *minor) * struct drm_device *drm; * int ret; * - * // devm_kzalloc() can't be used here because the drm_device ' - * // lifetime can exceed the device lifetime if driver unbind - * // happens when userspace still has open file descriptors. - * priv = kzalloc(sizeof(*priv), GFP_KERNEL); - * if (!priv) - * return -ENOMEM; - * + * priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver, + * struct driver_device, drm); + * if (IS_ERR(priv)) + * return PTR_ERR(priv); * drm = &priv->drm; * - * ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver); - * if (ret) { - * kfree(priv); - * return ret; - * } - * drmm_add_final_kfree(drm, priv); - * * ret = drmm_mode_config_init(drm); * if (ret) * return ret; @@ -550,9 +540,9 @@ static void drm_fs_inode_free(struct inode *inode) * following guidelines apply: * * - The entire device initialization procedure should be run from the - * &component_master_ops.master_bind callback, starting with drm_dev_init(), - * then binding all components with component_bind_all() and finishing with - * drm_dev_register(). + * &component_master_ops.master_bind callback, starting with + * devm_drm_dev_alloc(), then binding all components with + * component_bind_all() and finishing with drm_dev_register(). * * - The opaque pointer passed to all components through component_bind_all() * should point at &struct drm_device of the device instance, not some driver @@ -583,43 +573,9 @@ static void drm_dev_init_release(struct drm_device *dev, void *res) drm_legacy_destroy_members(dev); } -/** - * drm_dev_init - Initialise new DRM device - * @dev: DRM device - * @driver: DRM driver - * @parent: Parent device object - * - * Initialize a new DRM device. No device registration is done. - * Call drm_dev_register() to advertice the device to user space and register it - * with other core subsystems. This should be done last in the device - * initialization sequence to make sure userspace can't access an inconsistent - * state. - * - * The initial ref-count of the object is 1. Use drm_dev_get() and - * drm_dev_put() to take and drop further ref-counts. - * - * It is recommended that drivers embed &struct drm_device into their own device - * structure. - * - * Drivers that do not want to allocate their own device struct - * embedding &struct drm_device can call drm_dev_alloc() instead. For drivers - * that do embed &struct drm_device it must be placed first in the overall - * structure, and the overall structure must be allocated using kmalloc(): The - * drm core's release function unconditionally calls kfree() on the @dev pointer - * when the final reference is released. To override this behaviour, and so - * allow embedding of the drm_device inside the driver's device struct at an - * arbitrary offset, you must supply a &drm_driver.release callback and control - * the finalization explicitly. - * - * Note that drivers must call drmm_add_final_kfree() after this function has - * completed successfully. - * - * RETURNS: - * 0 on success, or error code on failure. - */ -int drm_dev_init(struct drm_device *dev, - struct drm_driver *driver, - struct device *parent) +static int drm_dev_init(struct drm_device *dev, + struct drm_driver *driver, + struct device *parent) { int ret; @@ -699,31 +655,15 @@ err: return ret; } -EXPORT_SYMBOL(drm_dev_init); static void devm_drm_dev_init_release(void *data) { drm_dev_put(data); } -/** - * devm_drm_dev_init - Resource managed drm_dev_init() - * @parent: Parent device object - * @dev: DRM device - * @driver: DRM driver - * - * Managed drm_dev_init(). The DRM device initialized with this function is - * automatically put on driver detach using drm_dev_put(). - * - * Note that drivers must call drmm_add_final_kfree() after this function has - * completed successfully. - * - * RETURNS: - * 0 on success, or error code on failure. - */ -int devm_drm_dev_init(struct device *parent, - struct drm_device *dev, - struct drm_driver *driver) +static int devm_drm_dev_init(struct device *parent, + struct drm_device *dev, + struct drm_driver *driver) { int ret; @@ -737,7 +677,6 @@ int devm_drm_dev_init(struct device *parent, return ret; } -EXPORT_SYMBOL(devm_drm_dev_init); void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver, size_t size, size_t offset) @@ -767,19 +706,9 @@ EXPORT_SYMBOL(__devm_drm_dev_alloc); * @driver: DRM driver to allocate device for * @parent: Parent device object * - * Allocate and initialize a new DRM device. No device registration is done. - * Call drm_dev_register() to advertice the device to user space and register it - * with other core subsystems. This should be done last in the device - * initialization sequence to make sure userspace can't access an inconsistent - * state. - * - * The initial ref-count of the object is 1. Use drm_dev_get() and - * drm_dev_put() to take and drop further ref-counts. - * - * Note that for purely virtual devices @parent can be NULL. - * - * Drivers that wish to subclass or embed &struct drm_device into their - * own struct should look at using drm_dev_init() instead. + * This is the deprecated version of devm_drm_dev_alloc(), which does not support + * subclassing through embedding the struct &drm_device in a driver private + * structure, and which does not support automatic cleanup through devres. * * RETURNS: * Pointer to new DRM device, or ERR_PTR on failure. diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 6840f0530a38..a82f37d44258 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3738,6 +3738,34 @@ drm_add_cmdb_modes(struct drm_connector *connector, u8 svd) bitmap_set(hdmi->y420_cmdb_modes, vic, 1); } +/** + * drm_display_mode_from_cea_vic() - return a mode for CEA VIC + * @dev: DRM device + * @vic: CEA VIC of the mode + * + * Creates a new mode matching the specified CEA VIC. + * + * Returns: A new drm_display_mode on success or NULL on failure + */ +struct drm_display_mode * +drm_display_mode_from_cea_vic(struct drm_device *dev, + u8 video_code) +{ + const struct drm_display_mode *cea_mode; + struct drm_display_mode *newmode; + + cea_mode = cea_mode_for_vic(video_code); + if (!cea_mode) + return NULL; + + newmode = drm_mode_duplicate(dev, cea_mode); + if (!newmode) + return NULL; + + return newmode; +} +EXPORT_SYMBOL(drm_display_mode_from_cea_vic); + static int do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len) { diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index df656366a530..2f5b0c2bb0fe 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -176,8 +176,7 @@ static int framebuffer_check(struct drm_device *dev, int i; /* check if the format is supported at all */ - info = __drm_format_info(r->pixel_format); - if (!info) { + if (!__drm_format_info(r->pixel_format)) { struct drm_format_name_buf format_name; DRM_DEBUG_KMS("bad framebuffer format %s\n", @@ -186,9 +185,6 @@ static int framebuffer_check(struct drm_device *dev, return -EINVAL; } - /* now let the driver pick its own format info */ - info = drm_get_format_info(dev, r); - if (r->width == 0) { DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width); return -EINVAL; @@ -199,6 +195,9 @@ static int framebuffer_check(struct drm_device *dev, return -EINVAL; } + /* now let the driver pick its own format info */ + info = drm_get_format_info(dev, r); + for (i = 0; i < info->num_planes; i++) { unsigned int width = fb_plane_width(r->width, info, i); unsigned int height = fb_plane_height(r->height, info, i); diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 822edeadbab3..59b9ca207b42 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -471,26 +471,9 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev, { struct drm_gem_cma_object *cma_obj; - if (sgt->nents != 1) { - /* check if the entries in the sg_table are contiguous */ - dma_addr_t next_addr = sg_dma_address(sgt->sgl); - struct scatterlist *s; - unsigned int i; - - for_each_sg(sgt->sgl, s, sgt->nents, i) { - /* - * sg_dma_address(s) is only valid for entries - * that have sg_dma_len(s) != 0 - */ - if (!sg_dma_len(s)) - continue; - - if (sg_dma_address(s) != next_addr) - return ERR_PTR(-EINVAL); - - next_addr = sg_dma_address(s) + sg_dma_len(s); - } - } + /* check if the entries in the sg_table are contiguous */ + if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) + return ERR_PTR(-EINVAL); /* Create a CMA GEM buffer. */ cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size); diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 4b7cfbac4daa..d77c9f8ff26c 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -126,8 +126,8 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj) drm_prime_gem_destroy(obj, shmem->sgt); } else { if (shmem->sgt) { - dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, - shmem->sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(obj->dev->dev, shmem->sgt, + DMA_BIDIRECTIONAL, 0); sg_free_table(shmem->sgt); kfree(shmem->sgt); } @@ -424,8 +424,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_object *obj) WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); - dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, - shmem->sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); sg_free_table(shmem->sgt); kfree(shmem->sgt); shmem->sgt = NULL; @@ -656,7 +655,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj) WARN_ON(shmem->base.import_attach); - return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT); + return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT); } EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); @@ -697,12 +696,17 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj) goto err_put_pages; } /* Map the pages for use by the h/w. */ - dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); + ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0); + if (ret) + goto err_free_sgt; shmem->sgt = sgt; return sgt; +err_free_sgt: + sg_free_table(sgt); + kfree(sgt); err_put_pages: drm_gem_shmem_put_pages(shmem); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c index 892b2288a104..0e4fb9ba43ad 100644 --- a/drivers/gpu/drm/drm_gem_ttm_helper.c +++ b/drivers/gpu/drm/drm_gem_ttm_helper.c @@ -43,12 +43,9 @@ void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent, drm_print_bits(p, bo->mem.placement, plname, ARRAY_SIZE(plname)); drm_printf(p, "\n"); - if (bo->mem.bus.is_iomem) { - drm_printf_indent(p, indent, "bus.base=%lx\n", - (unsigned long)bo->mem.bus.base); + if (bo->mem.bus.is_iomem) drm_printf_indent(p, indent, "bus.offset=%lx\n", (unsigned long)bo->mem.bus.offset); - } } EXPORT_SYMBOL(drm_gem_ttm_print_info); diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 545a877406f4..50cad0e4a92e 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -97,8 +97,8 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; * hardware's draing engine. * * To access a buffer object's memory from the DRM driver, call - * drm_gem_vram_kmap(). It (optionally) maps the buffer into kernel address - * space and returns the memory address. Use drm_gem_vram_kunmap() to + * drm_gem_vram_vmap(). It maps the buffer into kernel address + * space and returns the memory address. Use drm_gem_vram_vunmap() to * release the mapping. */ @@ -135,28 +135,28 @@ static void ttm_buffer_object_destroy(struct ttm_buffer_object *bo) static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo, unsigned long pl_flag) { + u32 invariant_flags = 0; unsigned int i; unsigned int c = 0; - u32 invariant_flags = pl_flag & TTM_PL_FLAG_TOPDOWN; + + if (pl_flag & DRM_GEM_VRAM_PL_FLAG_TOPDOWN) + pl_flag = TTM_PL_FLAG_TOPDOWN; gbo->placement.placement = gbo->placements; gbo->placement.busy_placement = gbo->placements; - if (pl_flag & TTM_PL_FLAG_VRAM) + if (pl_flag & DRM_GEM_VRAM_PL_FLAG_VRAM) { + gbo->placements[c].mem_type = TTM_PL_VRAM; gbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM | - invariant_flags; - - if (pl_flag & TTM_PL_FLAG_SYSTEM) - gbo->placements[c++].flags = TTM_PL_MASK_CACHING | - TTM_PL_FLAG_SYSTEM | invariant_flags; + } - if (!c) + if (pl_flag & DRM_GEM_VRAM_PL_FLAG_SYSTEM || !c) { + gbo->placements[c].mem_type = TTM_PL_SYSTEM; gbo->placements[c++].flags = TTM_PL_MASK_CACHING | - TTM_PL_FLAG_SYSTEM | invariant_flags; + } gbo->placement.num_placement = c; gbo->placement.num_busy_placement = c; @@ -167,6 +167,10 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo, } } +/* + * Note that on error, drm_gem_vram_init will free the buffer object. + */ + static int drm_gem_vram_init(struct drm_device *dev, struct drm_gem_vram_object *gbo, size_t size, unsigned long pg_align) @@ -176,32 +180,37 @@ static int drm_gem_vram_init(struct drm_device *dev, int ret; size_t acc_size; - if (WARN_ONCE(!vmm, "VRAM MM not initialized")) + if (WARN_ONCE(!vmm, "VRAM MM not initialized")) { + kfree(gbo); return -EINVAL; + } bdev = &vmm->bdev; gbo->bo.base.funcs = &drm_gem_vram_object_funcs; ret = drm_gem_object_init(dev, &gbo->bo.base, size); - if (ret) + if (ret) { + kfree(gbo); return ret; + } acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo)); gbo->bo.bdev = bdev; - drm_gem_vram_placement(gbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); + drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM | + DRM_GEM_VRAM_PL_FLAG_SYSTEM); ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device, &gbo->placement, pg_align, false, acc_size, NULL, NULL, ttm_buffer_object_destroy); if (ret) - goto err_drm_gem_object_release; + /* + * A failing ttm_bo_init will call ttm_buffer_object_destroy + * to release gbo->bo.base and kfree gbo. + */ + return ret; return 0; - -err_drm_gem_object_release: - drm_gem_object_release(&gbo->bo.base); - return ret; } /** @@ -235,13 +244,9 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, ret = drm_gem_vram_init(dev, gbo, size, pg_align); if (ret < 0) - goto err_kfree; + return ERR_PTR(ret); return gbo; - -err_kfree: - kfree(gbo); - return ERR_PTR(ret); } EXPORT_SYMBOL(drm_gem_vram_create); @@ -436,39 +441,6 @@ out: return kmap->virtual; } -/** - * drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space - * @gbo: the GEM VRAM object - * @map: establish a mapping if necessary - * @is_iomem: returns true if the mapped memory is I/O memory, or false \ - otherwise; can be NULL - * - * This function maps the buffer object into the kernel's address space - * or returns the current mapping. If the parameter map is false, the - * function only queries the current mapping, but does not establish a - * new one. - * - * Returns: - * The buffers virtual address if mapped, or - * NULL if not mapped, or - * an ERR_PTR()-encoded error code otherwise. - */ -void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map, - bool *is_iomem) -{ - int ret; - void *virtual; - - ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); - if (ret) - return ERR_PTR(ret); - virtual = drm_gem_vram_kmap_locked(gbo, map, is_iomem); - ttm_bo_unreserve(&gbo->bo); - - return virtual; -} -EXPORT_SYMBOL(drm_gem_vram_kmap); - static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo) { if (WARN_ON_ONCE(!gbo->kmap_use_count)) @@ -485,22 +457,6 @@ static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo) } /** - * drm_gem_vram_kunmap() - Unmaps a GEM VRAM object - * @gbo: the GEM VRAM object - */ -void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo) -{ - int ret; - - ret = ttm_bo_reserve(&gbo->bo, false, false, NULL); - if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret)) - return; - drm_gem_vram_kunmap_locked(gbo); - ttm_bo_unreserve(&gbo->bo); -} -EXPORT_SYMBOL(drm_gem_vram_kunmap); - -/** * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address * space * @gbo: The GEM VRAM object to map @@ -511,9 +467,6 @@ EXPORT_SYMBOL(drm_gem_vram_kunmap); * permanently. Call drm_gem_vram_vunmap() with the returned address to * unmap and unpin the GEM VRAM object. * - * If you have special requirements for the pinning or mapping operations, - * call drm_gem_vram_pin() and drm_gem_vram_kmap() directly. - * * Returns: * The buffer's virtual address on success, or * an ERR_PTR()-encoded error code otherwise. @@ -647,7 +600,7 @@ static bool drm_is_gem_vram(struct ttm_buffer_object *bo) static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo, struct ttm_placement *pl) { - drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM); + drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM); *pl = gbo->placement; } @@ -967,16 +920,13 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = { * TTM TT */ -static void backend_func_destroy(struct ttm_tt *tt) +static void bo_driver_ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *tt) { + ttm_tt_destroy_common(bdev, tt); ttm_tt_fini(tt); kfree(tt); } -static struct ttm_backend_func backend_func = { - .destroy = backend_func_destroy -}; - /* * TTM BO device */ @@ -991,8 +941,6 @@ static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo, if (!tt) return NULL; - tt->func = &backend_func; - ret = ttm_tt_init(tt, bo, page_flags); if (ret < 0) goto err_ttm_tt_init; @@ -1042,8 +990,7 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, case TTM_PL_SYSTEM: /* nothing to do */ break; case TTM_PL_VRAM: - mem->bus.offset = mem->start << PAGE_SHIFT; - mem->bus.base = vmm->vram_base; + mem->bus.offset = (mem->start << PAGE_SHIFT) + vmm->vram_base; mem->bus.is_iomem = true; break; default: @@ -1055,6 +1002,7 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, static struct ttm_bo_driver bo_driver = { .ttm_tt_create = bo_driver_ttm_tt_create, + .ttm_tt_destroy = bo_driver_ttm_tt_destroy, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = bo_driver_evict_flags, .move_notify = bo_driver_move_notify, @@ -1110,9 +1058,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, return ret; ret = ttm_range_man_init(&vmm->bdev, TTM_PL_VRAM, - TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC, - TTM_PL_FLAG_WC, false, - vram_size >> PAGE_SHIFT); + false, vram_size >> PAGE_SHIFT); if (ret) return ret; diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 8e01caaf95cc..b65865c630b0 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -95,6 +95,7 @@ void drm_minor_release(struct drm_minor *minor); /* drm_managed.c */ void drm_managed_release(struct drm_device *dev); +void drmm_add_final_kfree(struct drm_device *dev, void *container); /* drm_vblank.c */ static inline bool drm_vblank_passed(u64 seq, u64 ref) diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c index 1e1356560c2e..37d7db6223be 100644 --- a/drivers/gpu/drm/drm_managed.c +++ b/drivers/gpu/drm/drm_managed.c @@ -27,7 +27,7 @@ * be done directly with drmm_kmalloc() and the related functions. Everything * will be released on the final drm_dev_put() in reverse order of how the * release actions have been added and memory has been allocated since driver - * loading started with drm_dev_init(). + * loading started with devm_drm_dev_alloc(). * * Note that release actions and managed memory can also be added and removed * during the lifetime of the driver, all the functions are fully concurrent @@ -125,18 +125,6 @@ static void add_dr(struct drm_device *dev, struct drmres *dr) dr, dr->node.name, (unsigned long) dr->node.size); } -/** - * drmm_add_final_kfree - add release action for the final kfree() - * @dev: DRM device - * @container: pointer to the kmalloc allocation containing @dev - * - * Since the allocation containing the struct &drm_device must be allocated - * before it can be initialized with drm_dev_init() there's no way to allocate - * that memory with drmm_kmalloc(). To side-step this chicken-egg problem the - * pointer for this final kfree() must be specified by calling this function. It - * will be released in the final drm_dev_put() for @dev, after all other release - * actions installed through drmm_add_action() have been processed. - */ void drmm_add_final_kfree(struct drm_device *dev, void *container) { WARN_ON(dev->managed.final_kfree); @@ -144,7 +132,6 @@ void drmm_add_final_kfree(struct drm_device *dev, void *container) WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container))); dev->managed.final_kfree = container; } -EXPORT_SYMBOL(drmm_add_final_kfree); int __drmm_add_action(struct drm_device *dev, drmres_release_t action, diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 1693aa7c14b5..11fe9ff76fd5 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -617,6 +617,7 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, { struct drm_gem_object *obj = attach->dmabuf->priv; struct sg_table *sgt; + int ret; if (WARN_ON(dir == DMA_NONE)) return ERR_PTR(-EINVAL); @@ -626,11 +627,12 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, else sgt = obj->dev->driver->gem_prime_get_sg_table(obj); - if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, - DMA_ATTR_SKIP_CPU_SYNC)) { + ret = dma_map_sgtable(attach->dev, sgt, dir, + DMA_ATTR_SKIP_CPU_SYNC); + if (ret) { sg_free_table(sgt); kfree(sgt); - sgt = ERR_PTR(-ENOMEM); + sgt = ERR_PTR(ret); } return sgt; @@ -652,8 +654,7 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, if (!sgt) return; - dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, - DMA_ATTR_SKIP_CPU_SYNC); + dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); sg_free_table(sgt); kfree(sgt); } @@ -802,9 +803,11 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { * * This is useful for implementing &drm_gem_object_funcs.get_sg_table. */ -struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages) +struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, + struct page **pages, unsigned int nr_pages) { struct sg_table *sg = NULL; + size_t max_segment = 0; int ret; sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); @@ -813,8 +816,13 @@ struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_page goto out; } - ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, - nr_pages << PAGE_SHIFT, GFP_KERNEL); + if (dev) + max_segment = dma_max_mapping_size(dev->dev); + if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT) + max_segment = SCATTERLIST_MAX_SEGMENT; + ret = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0, + nr_pages << PAGE_SHIFT, + max_segment, GFP_KERNEL); if (ret) goto out; @@ -826,6 +834,37 @@ out: EXPORT_SYMBOL(drm_prime_pages_to_sg); /** + * drm_prime_get_contiguous_size - returns the contiguous size of the buffer + * @sgt: sg_table describing the buffer to check + * + * This helper calculates the contiguous size in the DMA address space + * of the the buffer described by the provided sg_table. + * + * This is useful for implementing + * &drm_gem_object_funcs.gem_prime_import_sg_table. + */ +unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt) +{ + dma_addr_t expected = sg_dma_address(sgt->sgl); + struct scatterlist *sg; + unsigned long size = 0; + int i; + + for_each_sgtable_dma_sg(sgt, sg, i) { + unsigned int len = sg_dma_len(sg); + + if (!len) + break; + if (sg_dma_address(sg) != expected) + break; + expected += len; + size += len; + } + return size; +} +EXPORT_SYMBOL(drm_prime_get_contiguous_size); + +/** * drm_gem_prime_export - helper library implementation of the export callback * @obj: GEM object to export * @flags: flags like DRM_CLOEXEC and DRM_RDWR @@ -959,45 +998,26 @@ EXPORT_SYMBOL(drm_gem_prime_import); int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, dma_addr_t *addrs, int max_entries) { - unsigned count; - struct scatterlist *sg; - struct page *page; - u32 page_len, page_index; - dma_addr_t addr; - u32 dma_len, dma_index; - - /* - * Scatterlist elements contains both pages and DMA addresses, but - * one shoud not assume 1:1 relation between them. The sg->length is - * the size of the physical memory chunk described by the sg->page, - * while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk - * described by the sg_dma_address(sg). - */ - page_index = 0; - dma_index = 0; - for_each_sg(sgt->sgl, sg, sgt->nents, count) { - page_len = sg->length; - page = sg_page(sg); - dma_len = sg_dma_len(sg); - addr = sg_dma_address(sg); - - while (pages && page_len > 0) { - if (WARN_ON(page_index >= max_entries)) + struct sg_dma_page_iter dma_iter; + struct sg_page_iter page_iter; + struct page **p = pages; + dma_addr_t *a = addrs; + + if (pages) { + for_each_sgtable_page(sgt, &page_iter, 0) { + if (WARN_ON(p - pages >= max_entries)) return -1; - pages[page_index] = page; - page++; - page_len -= PAGE_SIZE; - page_index++; + *p++ = sg_page_iter_page(&page_iter); } - while (addrs && dma_len > 0) { - if (WARN_ON(dma_index >= max_entries)) + } + if (addrs) { + for_each_sgtable_dma_page(sgt, &dma_iter, 0) { + if (WARN_ON(a - addrs >= max_entries)) return -1; - addrs[dma_index] = addr; - addr += PAGE_SIZE; - dma_len -= PAGE_SIZE; - dma_index++; + *a++ = sg_page_iter_dma_address(&dma_iter); } } + return 0; } EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index b18e1efbbae1..f135b79593dd 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -674,7 +674,7 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants); * * Note that atomic drivers must call drm_calc_timestamping_constants() before * enabling a CRTC. The atomic helpers already take care of that in - * drm_atomic_helper_update_legacy_modeset_state(). + * drm_atomic_helper_calc_timestamping_constants(). * * Returns: * @@ -819,7 +819,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_internal); * * Note that atomic drivers must call drm_calc_timestamping_constants() before * enabling a CRTC. The atomic helpers already take care of that in - * drm_atomic_helper_update_legacy_modeset_state(). + * drm_atomic_helper_calc_timestamping_constants(). * * Returns: * diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index f06e19e7be04..d1533bdc1335 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -27,7 +27,7 @@ static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj) * because display controller, GPU, etc. are not coherent. */ if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) - dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); + dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0); } static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj) @@ -51,7 +51,7 @@ static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj * discard those writes. */ if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) - dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0); } /* called with etnaviv_obj->lock held */ @@ -103,7 +103,8 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj) int npages = etnaviv_obj->base.size >> PAGE_SHIFT; struct sg_table *sgt; - sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages); + sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev, + etnaviv_obj->pages, npages); if (IS_ERR(sgt)) { dev_err(dev->dev, "failed to allocate sgt: %ld\n", PTR_ERR(sgt)); @@ -404,9 +405,8 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, } if (etnaviv_obj->flags & ETNA_BO_CACHED) { - dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl, - etnaviv_obj->sgt->nents, - etnaviv_op_to_dma_dir(op)); + dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt, + etnaviv_op_to_dma_dir(op)); etnaviv_obj->last_cpu_prep_op = op; } @@ -421,8 +421,7 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj) if (etnaviv_obj->flags & ETNA_BO_CACHED) { /* fini without a prep is almost certainly a userspace error */ WARN_ON(etnaviv_obj->last_cpu_prep_op == 0); - dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl, - etnaviv_obj->sgt->nents, + dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt, etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op)); etnaviv_obj->last_cpu_prep_op = 0; } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 6d9e5c3c4dd5..4aa3426a9ba4 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -19,7 +19,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */ return ERR_PTR(-EINVAL); - return drm_prime_pages_to_sg(etnaviv_obj->pages, npages); + return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages); } void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 3607d348c298..15d9fa3879e5 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -73,13 +73,13 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, struct sg_table *sgt, unsigned len, int prot) { struct scatterlist *sg; unsigned int da = iova; - unsigned int i, j; + unsigned int i; int ret; if (!context || !sgt) return -EINVAL; - for_each_sg(sgt->sgl, sg, sgt->nents, i) { + for_each_sgtable_dma_sg(sgt, sg, i) { u32 pa = sg_dma_address(sg) - sg->offset; size_t bytes = sg_dma_len(sg) + sg->offset; @@ -95,14 +95,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, return 0; fail: - da = iova; - - for_each_sg(sgt->sgl, sg, i, j) { - size_t bytes = sg_dma_len(sg) + sg->offset; - - etnaviv_context_unmap(context, da, bytes); - da += bytes; - } + etnaviv_context_unmap(context, iova, da - iova); return ret; } @@ -113,7 +106,7 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova, unsigned int da = iova; int i; - for_each_sg(sgt->sgl, sg, sgt->nents, i) { + for_each_sgtable_dma_sg(sgt, sg, i) { size_t bytes = sg_dma_len(sg) + sg->offset; etnaviv_context_unmap(context, da, bytes); diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c index 58b89ec11b0e..5887f7f52f96 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dma.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c @@ -31,23 +31,6 @@ #define EXYNOS_DEV_ADDR_START 0x20000000 #define EXYNOS_DEV_ADDR_SIZE 0x40000000 -static inline int configure_dma_max_seg_size(struct device *dev) -{ - if (!dev->dma_parms) - dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL); - if (!dev->dma_parms) - return -ENOMEM; - - dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); - return 0; -} - -static inline void clear_dma_max_seg_size(struct device *dev) -{ - kfree(dev->dma_parms); - dev->dma_parms = NULL; -} - /* * drm_iommu_attach_device- attach device to iommu mapping * @@ -69,10 +52,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev, return -EINVAL; } - ret = configure_dma_max_seg_size(subdrv_dev); - if (ret) - return ret; - + dma_set_max_seg_size(subdrv_dev, DMA_BIT_MASK(32)); if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { /* * Keep the original DMA mapping of the sub-device and @@ -89,9 +69,6 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev, ret = iommu_attach_device(priv->mapping, subdrv_dev); } - if (ret) - clear_dma_max_seg_size(subdrv_dev); - return ret; } @@ -114,8 +91,6 @@ static void drm_iommu_detach_device(struct drm_device *drm_dev, arm_iommu_attach_device(subdrv_dev, *dma_priv); } else if (IS_ENABLED(CONFIG_IOMMU_DMA)) iommu_detach_device(priv->mapping, subdrv_dev); - - clear_dma_max_seg_size(subdrv_dev); } int exynos_drm_register_dma(struct drm_device *drm, struct device *dev, diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 1a1a2853a842..5b9666fc7af1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1760,11 +1760,8 @@ static int exynos_dsi_probe(struct platform_device *pdev) dsi->supplies[1].supply = "vddio"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies), dsi->supplies); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_info(dev, "failed to get regulators: %d\n", ret); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "failed to get regulators\n"); dsi->clks = devm_kcalloc(dev, dsi->driver_data->num_clks, sizeof(*dsi->clks), diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 03be31427181..967a5cdc120e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -395,8 +395,8 @@ static void g2d_userptr_put_dma_addr(struct g2d_data *g2d, return; out: - dma_unmap_sg(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt->sgl, - g2d_userptr->sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt, + DMA_BIDIRECTIONAL, 0); pages = frame_vector_pages(g2d_userptr->vec); if (!IS_ERR(pages)) { @@ -511,10 +511,10 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d, g2d_userptr->sgt = sgt; - if (!dma_map_sg(to_dma_dev(g2d->drm_dev), sgt->sgl, sgt->nents, - DMA_BIDIRECTIONAL)) { + ret = dma_map_sgtable(to_dma_dev(g2d->drm_dev), sgt, + DMA_BIDIRECTIONAL, 0); + if (ret) { DRM_DEV_ERROR(g2d->dev, "failed to map sgt with dma region.\n"); - ret = -ENOMEM; goto err_sg_free_table; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index efa476858db5..1716a023bca0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -431,27 +431,10 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, { struct exynos_drm_gem *exynos_gem; - if (sgt->nents < 1) + /* check if the entries in the sg_table are contiguous */ + if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) { + DRM_ERROR("buffer chunks must be mapped contiguously"); return ERR_PTR(-EINVAL); - - /* - * Check if the provided buffer has been mapped as contiguous - * into DMA address space. - */ - if (sgt->nents > 1) { - dma_addr_t next_addr = sg_dma_address(sgt->sgl); - struct scatterlist *s; - unsigned int i; - - for_each_sg(sgt->sgl, s, sgt->nents, i) { - if (!sg_dma_len(s)) - break; - if (sg_dma_address(s) != next_addr) { - DRM_ERROR("buffer chunks must be mapped contiguously"); - return ERR_PTR(-EINVAL); - } - next_addr = sg_dma_address(s) + sg_dma_len(s); - } } exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size); diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index c5ba32fca5f3..dc01c188c0e0 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1797,11 +1797,8 @@ static int hdmi_resources_init(struct hdmi_context *hdata) hdata->regul_bulk[i].supply = supply[i]; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), hdata->regul_bulk); - if (ret) { - if (ret != -EPROBE_DEFER) - DRM_DEV_ERROR(dev, "failed to get regulators\n"); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "failed to get regulators\n"); hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en"); diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index da02d7e8a8f5..54d9876b5305 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -164,7 +164,7 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma) return 0; } -static struct fb_ops psbfb_ops = { +static const struct fb_ops psbfb_ops = { .owner = THIS_MODULE, DRM_FB_HELPER_DEFAULT_OPS, .fb_setcolreg = psbfb_setcolreg, @@ -175,7 +175,7 @@ static struct fb_ops psbfb_ops = { .fb_sync = psbfb_sync, }; -static struct fb_ops psbfb_roll_ops = { +static const struct fb_ops psbfb_roll_ops = { .owner = THIS_MODULE, DRM_FB_HELPER_DEFAULT_OPS, .fb_setcolreg = psbfb_setcolreg, @@ -186,7 +186,7 @@ static struct fb_ops psbfb_roll_ops = { .fb_mmap = psbfb_mmap, }; -static struct fb_ops psbfb_unaccel_ops = { +static const struct fb_ops psbfb_unaccel_ops = { .owner = THIS_MODULE, DRM_FB_HELPER_DEFAULT_OPS, .fb_setcolreg = psbfb_setcolreg, diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 303c2d483c6e..88250860f8e4 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -853,11 +853,11 @@ static void i810_dma_quiescent(struct drm_device *dev) i810_wait_ring(dev, dev_priv->ring.Size - 8); } -static int i810_flush_queue(struct drm_device *dev) +static void i810_flush_queue(struct drm_device *dev) { drm_i810_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; - int i, ret = 0; + int i; RING_LOCALS; i810_kernel_lost_context(dev); @@ -882,7 +882,7 @@ static int i810_flush_queue(struct drm_device *dev) DRM_DEBUG("still on client\n"); } - return ret; + return; } /* Must be called with the lock held */ diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index bda4c0e408f8..e5574e506a5c 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -234,6 +234,7 @@ i915-y += \ display/intel_ddi.o \ display/intel_dp.o \ display/intel_dp_aux_backlight.o \ + display/intel_dp_hdcp.o \ display/intel_dp_link_training.o \ display/intel_dp_mst.o \ display/intel_dsi.o \ diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index f4053dd6bde9..520715b7d5b5 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -1646,6 +1646,7 @@ static const struct drm_encoder_funcs gen11_dsi_encoder_funcs = { }; static const struct drm_connector_funcs gen11_dsi_connector_funcs = { + .detect = intel_panel_detect, .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_connector_destroy, diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index 630f49b7aa01..86be032bcf96 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -527,8 +527,6 @@ void intel_atomic_state_clear(struct drm_atomic_state *s) intel_atomic_clear_global_state(state); state->dpll_set = state->modeset = false; - state->global_state_changed = false; - state->active_pipes = 0; } struct intel_crtc_state * @@ -542,40 +540,3 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state, return to_intel_crtc_state(crtc_state); } - -int _intel_atomic_lock_global_state(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc *crtc; - - state->global_state_changed = true; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - int ret; - - ret = drm_modeset_lock(&crtc->base.mutex, - state->base.acquire_ctx); - if (ret) - return ret; - } - - return 0; -} - -int _intel_atomic_serialize_global_state(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc *crtc; - - state->global_state_changed = true; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - struct intel_crtc_state *crtc_state; - - crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); - } - - return 0; -} diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h index 11146292b06f..285de07011dc 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.h +++ b/drivers/gpu/drm/i915/display/intel_atomic.h @@ -56,8 +56,4 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state); -int _intel_atomic_lock_global_state(struct intel_atomic_state *state); - -int _intel_atomic_serialize_global_state(struct intel_atomic_state *state); - #endif /* __INTEL_ATOMIC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index ad4aa66fd676..f7de55707746 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -958,13 +958,8 @@ static int glk_force_audio_cdclk_commit(struct intel_atomic_state *state, if (IS_ERR(cdclk_state)) return PTR_ERR(cdclk_state); - cdclk_state->force_min_cdclk_changed = true; cdclk_state->force_min_cdclk = enable ? 2 * 96000 : 0; - ret = intel_atomic_lock_global_state(&cdclk_state->base); - if (ret) - return ret; - return drm_atomic_commit(&state->base); } diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index a0a41ec5c341..4716484af62d 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1656,6 +1656,8 @@ static enum port dvo_port_to_port(struct drm_i915_private *dev_priv, [PORT_E] = { DVO_PORT_HDMIE, DVO_PORT_DPE, DVO_PORT_CRT }, [PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1 }, [PORT_G] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1 }, + [PORT_H] = { DVO_PORT_HDMIH, DVO_PORT_DPH, -1 }, + [PORT_I] = { DVO_PORT_HDMII, DVO_PORT_DPI, -1 }, }; /* * Bspec lists the ports as A, B, C, D - however internally in our @@ -2133,7 +2135,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv) INIT_LIST_HEAD(&dev_priv->vbt.display_devices); - if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) { + if (!HAS_DISPLAY(dev_priv)) { drm_dbg_kms(&dev_priv->drm, "Skipping VBT init due to disabled display.\n"); return; @@ -2650,6 +2652,12 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, case DP_AUX_G: aux_ch = AUX_CH_G; break; + case DP_AUX_H: + aux_ch = AUX_CH_H; + break; + case DP_AUX_I: + aux_ch = AUX_CH_I; + break; default: MISSING_CASE(info->alternate_aux_channel); aux_ch = AUX_CH_A; diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 577c4441f32d..cb93f6cf6d37 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -2426,7 +2426,6 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa if (!cdclk_state) return NULL; - cdclk_state->force_min_cdclk_changed = false; cdclk_state->pipe = INVALID_PIPE; return &cdclk_state->base; @@ -2501,6 +2500,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) if (ret) return ret; } else if (old_cdclk_state->active_pipes != new_cdclk_state->active_pipes || + old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk || intel_cdclk_changed(&old_cdclk_state->logical, &new_cdclk_state->logical)) { ret = intel_atomic_lock_global_state(&new_cdclk_state->base); diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index 6b31fde4be16..b34eb00fb327 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -49,7 +49,6 @@ struct intel_cdclk_state { /* forced minimum cdclk for glk+ audio w/a */ int force_min_cdclk; - bool force_min_cdclk_changed; /* bitmask of active pipes */ u8 active_pipes; diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 5b4510ce5693..4934edd51cb0 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -833,6 +833,9 @@ intel_crt_detect(struct drm_connector *connector, connector->base.id, connector->name, force); + if (!INTEL_DISPLAY_ENABLED(dev_priv)) + return connector_status_disconnected; + if (dev_priv->params.load_detect_test) { wakeref = intel_display_power_get(dev_priv, intel_encoder->power_domain); diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 19ac6b2a664c..4d06178cd76c 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -572,13 +572,13 @@ static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_dp[] = { /* NT mV Trans mV db */ { 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ { 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */ - { 0xC, 0x64, 0x30, 0x00, 0x0F }, /* 350 700 6.0 */ - { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */ + { 0xC, 0x64, 0x34, 0x00, 0x0B }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 350 900 8.2 */ { 0xA, 0x46, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x64, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */ + { 0xC, 0x64, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */ { 0xC, 0x61, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */ - { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x38, 0x00, 0x07 }, /* 600 900 3.5 */ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ }; @@ -1074,12 +1074,28 @@ static const struct cnl_ddi_buf_trans * ehl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate, int *n_entries) { - if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + switch (type) { + case INTEL_OUTPUT_HDMI: + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); + return icl_combo_phy_ddi_translations_hdmi; + case INTEL_OUTPUT_EDP: + if (dev_priv->vbt.edp.low_vswing) { + if (rate > 540000) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3); + return icl_combo_phy_ddi_translations_edp_hbr3; + } else { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); + return icl_combo_phy_ddi_translations_edp_hbr2; + } + } + /* fall through */ + default: + /* All combo DP and eDP ports that do not support low_vswing */ *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_dp); return ehl_combo_phy_ddi_translations_dp; } - - return icl_get_combo_buf_trans(encoder, type, rate, n_entries); } static const struct cnl_ddi_buf_trans * @@ -1088,30 +1104,44 @@ tgl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.hobl) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - if (!intel_dp->hobl_failed && rate <= 540000) { - /* Same table applies to TGL, RKL and DG1 */ - *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_edp_hbr2_hobl); - return tgl_combo_phy_ddi_translations_edp_hbr2_hobl; + switch (type) { + case INTEL_OUTPUT_HDMI: + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); + return icl_combo_phy_ddi_translations_hdmi; + case INTEL_OUTPUT_EDP: + if (dev_priv->vbt.edp.hobl) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + if (!intel_dp->hobl_failed && rate <= 540000) { + /* Same table applies to TGL, RKL and DG1 */ + *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_edp_hbr2_hobl); + return tgl_combo_phy_ddi_translations_edp_hbr2_hobl; + } } - } - if (type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_EDP) { - return icl_get_combo_buf_trans(encoder, type, rate, n_entries); - } else if (rate > 270000) { - if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) { - *n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2); - return tgl_uy_combo_phy_ddi_translations_dp_hbr2; + if (rate > 540000) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3); + return icl_combo_phy_ddi_translations_edp_hbr3; + } else if (dev_priv->vbt.edp.low_vswing) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); + return icl_combo_phy_ddi_translations_edp_hbr2; + } + /* fall through */ + default: + /* All combo DP and eDP ports that do not support low_vswing */ + if (rate > 270000) { + if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) { + *n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2); + return tgl_uy_combo_phy_ddi_translations_dp_hbr2; + } + + *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2); + return tgl_combo_phy_ddi_translations_dp_hbr2; } - *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2); - return tgl_combo_phy_ddi_translations_dp_hbr2; + *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr); + return tgl_combo_phy_ddi_translations_dp_hbr; } - - *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr); - return tgl_combo_phy_ddi_translations_dp_hbr; } static const struct tgl_dkl_phy_ddi_buf_trans * @@ -1791,6 +1821,8 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); + drm_WARN_ON(crtc->base.dev, ctl & TRANS_DDI_HDCP_SIGNALLING); + ctl &= ~TRANS_DDI_FUNC_ENABLE; if (IS_GEN_RANGE(dev_priv, 8, 10)) @@ -1818,12 +1850,12 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state } int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, + enum transcoder cpu_transcoder, bool enable) { struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); intel_wakeref_t wakeref; - enum pipe pipe = 0; int ret = 0; u32 tmp; @@ -1832,19 +1864,12 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, if (drm_WARN_ON(dev, !wakeref)) return -ENXIO; - if (drm_WARN_ON(dev, - !intel_encoder->get_hw_state(intel_encoder, &pipe))) { - ret = -EIO; - goto out; - } - - tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe)); + tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); if (enable) tmp |= TRANS_DDI_HDCP_SIGNALLING; else tmp &= ~TRANS_DDI_HDCP_SIGNALLING; - intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), tmp); -out: + intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), tmp); intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); return ret; } @@ -3445,6 +3470,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_init_dp_buf_reg(encoder); if (!is_mst) intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + intel_dp_configure_protocol_converter(intel_dp); intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); intel_dp_sink_set_fec_ready(intel_dp, crtc_state); @@ -3556,19 +3582,17 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state, intel_ddi_pre_enable_hdmi(state, encoder, crtc_state, conn_state); } else { - struct intel_lspcon *lspcon = - enc_to_intel_lspcon(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); intel_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); - if (lspcon->active) { - struct intel_digital_port *dig_port = - enc_to_dig_port(encoder); + /* FIXME precompute everything properly */ + /* FIXME how do we turn infoframes off again? */ + if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink) dig_port->set_infoframes(encoder, crtc_state->has_infoframe, crtc_state, conn_state); - } } } @@ -4012,18 +4036,19 @@ static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state, intel_psr_update(intel_dp, crtc_state, conn_state); intel_dp_set_infoframes(encoder, true, crtc_state, conn_state); - intel_edp_drrs_enable(intel_dp, crtc_state); + intel_edp_drrs_update(intel_dp, crtc_state); intel_panel_update_backlight(state, encoder, crtc_state, conn_state); } -static void intel_ddi_update_pipe(struct intel_atomic_state *state, - struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) +void intel_ddi_update_pipe(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { - if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && + !intel_encoder_is_mst(encoder)) intel_ddi_update_pipe_dp(state, encoder, crtc_state, conn_state); @@ -4949,6 +4974,57 @@ static bool hti_uses_phy(struct drm_i915_private *i915, enum phy phy) i915->hti_state & HDPORT_PHY_USED_HDMI(phy)); } +static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv, + enum port port) +{ + if (port >= PORT_D) + return HPD_PORT_TC1 + port - PORT_D; + else + return HPD_PORT_A + port - PORT_A; +} + +static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv, + enum port port) +{ + if (HAS_PCH_TGP(dev_priv)) + return tgl_hpd_pin(dev_priv, port); + + if (port >= PORT_D) + return HPD_PORT_C + port - PORT_D; + else + return HPD_PORT_A + port - PORT_A; +} + +static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv, + enum port port) +{ + if (port >= PORT_C) + return HPD_PORT_TC1 + port - PORT_C; + else + return HPD_PORT_A + port - PORT_A; +} + +static enum hpd_pin ehl_hpd_pin(struct drm_i915_private *dev_priv, + enum port port) +{ + if (port == PORT_D) + return HPD_PORT_A; + + if (HAS_PCH_MCC(dev_priv)) + return icl_hpd_pin(dev_priv, port); + + return HPD_PORT_A + port - PORT_A; +} + +static enum hpd_pin cnl_hpd_pin(struct drm_i915_private *dev_priv, + enum port port) +{ + if (port == PORT_F) + return HPD_PORT_E; + + return HPD_PORT_A + port - PORT_A; +} + void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) { struct intel_digital_port *dig_port; @@ -5001,6 +5077,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs, DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port)); + mutex_init(&dig_port->hdcp_mutex); + dig_port->num_hdcp_streams = 0; + encoder->hotplug = intel_ddi_hotplug; encoder->compute_output_type = intel_ddi_compute_output_type; encoder->compute_config = intel_ddi_compute_config; @@ -5022,6 +5101,19 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->cloneable = 0; encoder->pipe_mask = ~0; + if (IS_ROCKETLAKE(dev_priv)) + encoder->hpd_pin = rkl_hpd_pin(dev_priv, port); + else if (INTEL_GEN(dev_priv) >= 12) + encoder->hpd_pin = tgl_hpd_pin(dev_priv, port); + else if (IS_ELKHARTLAKE(dev_priv)) + encoder->hpd_pin = ehl_hpd_pin(dev_priv, port); + else if (IS_GEN(dev_priv, 11)) + encoder->hpd_pin = icl_hpd_pin(dev_priv, port); + else if (IS_GEN(dev_priv, 10)) + encoder->hpd_pin = cnl_hpd_pin(dev_priv, port); + else + encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); + if (INTEL_GEN(dev_priv) >= 11) dig_port->saved_port_bits = intel_de_read(dev_priv, DDI_BUF_CTL(port)) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index 077e9dbbe367..f5fb62fc9400 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -16,6 +16,7 @@ struct intel_crtc_state; struct intel_dp; struct intel_dpll_hw_state; struct intel_encoder; +enum transcoder; void intel_ddi_fdi_post_disable(struct intel_atomic_state *state, struct intel_encoder *intel_encoder, @@ -43,6 +44,7 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, u32 bxt_signal_levels(struct intel_dp *intel_dp); u32 ddi_signal_levels(struct intel_dp *intel_dp); int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, + enum transcoder cpu_transcoder, bool enable); void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index ddb588f863f8..631b4338224e 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -67,6 +67,7 @@ #include "intel_bw.h" #include "intel_cdclk.h" #include "intel_color.h" +#include "intel_csr.h" #include "intel_display_types.h" #include "intel_dp_link_training.h" #include "intel_fbc.h" @@ -7331,6 +7332,10 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port) return POWER_DOMAIN_PORT_DDI_F_LANES; case PORT_G: return POWER_DOMAIN_PORT_DDI_G_LANES; + case PORT_H: + return POWER_DOMAIN_PORT_DDI_H_LANES; + case PORT_I: + return POWER_DOMAIN_PORT_DDI_I_LANES; default: MISSING_CASE(port); return POWER_DOMAIN_PORT_OTHER; @@ -7356,6 +7361,10 @@ intel_aux_power_domain(struct intel_digital_port *dig_port) return POWER_DOMAIN_AUX_F_TBT; case AUX_CH_G: return POWER_DOMAIN_AUX_G_TBT; + case AUX_CH_H: + return POWER_DOMAIN_AUX_H_TBT; + case AUX_CH_I: + return POWER_DOMAIN_AUX_I_TBT; default: MISSING_CASE(dig_port->aux_ch); return POWER_DOMAIN_AUX_C_TBT; @@ -7387,6 +7396,10 @@ intel_legacy_aux_to_power_domain(enum aux_ch aux_ch) return POWER_DOMAIN_AUX_F; case AUX_CH_G: return POWER_DOMAIN_AUX_G; + case AUX_CH_H: + return POWER_DOMAIN_AUX_H; + case AUX_CH_I: + return POWER_DOMAIN_AUX_I; default: MISSING_CASE(aux_ch); return POWER_DOMAIN_AUX_A; @@ -8155,7 +8168,7 @@ static void compute_m_n(unsigned int m, unsigned int n, * which the devices expect also in synchronous clock mode. */ if (constant_n) - *ret_n = 0x8000; + *ret_n = DP_LINK_CONSTANT_N_VALUE; else *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); @@ -13471,12 +13484,6 @@ encoder_retry: "hw max bpp: %i, pipe bpp: %i, dithering: %i\n", base_bpp, pipe_config->pipe_bpp, pipe_config->dither); - /* - * Make drm_calc_timestamping_constants in - * drm_atomic_helper_update_legacy_modeset_state() happy - */ - pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode; - return 0; } @@ -14297,7 +14304,6 @@ verify_crtc_state(struct intel_crtc *crtc, struct intel_encoder *encoder; struct intel_crtc_state *pipe_config = old_crtc_state; struct drm_atomic_state *state = old_crtc_state->uapi.state; - bool active; __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi); intel_crtc_free_hw_state(old_crtc_state); @@ -14307,16 +14313,19 @@ verify_crtc_state(struct intel_crtc *crtc, drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name); - active = dev_priv->display.get_pipe_config(crtc, pipe_config); + pipe_config->hw.enable = new_crtc_state->hw.enable; + + pipe_config->hw.active = + dev_priv->display.get_pipe_config(crtc, pipe_config); /* we keep both pipes enabled on 830 */ - if (IS_I830(dev_priv)) - active = new_crtc_state->hw.active; + if (IS_I830(dev_priv) && pipe_config->hw.active) + pipe_config->hw.active = new_crtc_state->hw.active; - I915_STATE_WARN(new_crtc_state->hw.active != active, + I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active, "crtc active state doesn't match with hw state " "(expected %i, found %i)\n", - new_crtc_state->hw.active, active); + new_crtc_state->hw.active, pipe_config->hw.active); I915_STATE_WARN(crtc->active != new_crtc_state->hw.active, "transitional active state does not match atomic hw state " @@ -14325,6 +14334,7 @@ verify_crtc_state(struct intel_crtc *crtc, for_each_encoder_on_crtc(dev, &crtc->base, encoder) { enum pipe pipe; + bool active; active = encoder->get_hw_state(encoder, &pipe); I915_STATE_WARN(active != new_crtc_state->hw.active, @@ -14636,16 +14646,8 @@ u8 intel_calc_active_pipes(struct intel_atomic_state *state, static int intel_modeset_checks(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - int ret; state->modeset = true; - state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes); - - if (state->active_pipes != dev_priv->active_pipes) { - ret = _intel_atomic_lock_global_state(state); - if (ret) - return ret; - } if (IS_HASWELL(dev_priv)) return hsw_mode_set_planes_workaround(state); @@ -14789,7 +14791,8 @@ static int intel_atomic_check_cdclk(struct intel_atomic_state *state, bool *need_cdclk_calc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_cdclk_state *new_cdclk_state; + const struct intel_cdclk_state *old_cdclk_state; + const struct intel_cdclk_state *new_cdclk_state; struct intel_plane_state *plane_state; struct intel_bw_state *new_bw_state; struct intel_plane *plane; @@ -14808,9 +14811,11 @@ static int intel_atomic_check_cdclk(struct intel_atomic_state *state, return ret; } + old_cdclk_state = intel_atomic_get_old_cdclk_state(state); new_cdclk_state = intel_atomic_get_new_cdclk_state(state); - if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed) + if (new_cdclk_state && + old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk) *need_cdclk_calc = true; ret = dev_priv->display.bw_calc_min_cdclk(state); @@ -15757,14 +15762,6 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state) plane->frontbuffer_bit); } -static void assert_global_state_locked(struct drm_i915_private *dev_priv) -{ - struct intel_crtc *crtc; - - for_each_intel_crtc(&dev_priv->drm, crtc) - drm_modeset_lock_assert_held(&crtc->base.mutex); -} - static int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, bool nonblock) @@ -15840,12 +15837,6 @@ static int intel_atomic_commit(struct drm_device *dev, intel_shared_dpll_swap_state(state); intel_atomic_track_fbs(state); - if (state->global_state_changed) { - assert_global_state_locked(dev_priv); - - dev_priv->active_pipes = state->active_pipes; - } - drm_atomic_state_get(&state->base); INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); @@ -16892,7 +16883,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_pps_init(dev_priv); - if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) + if (!HAS_DISPLAY(dev_priv)) return; if (IS_ROCKETLAKE(dev_priv)) { @@ -17878,6 +17869,27 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915) { int ret; + if (i915_inject_probe_failure(i915)) + return -ENODEV; + + if (HAS_DISPLAY(i915)) { + ret = drm_vblank_init(&i915->drm, + INTEL_NUM_PIPES(i915)); + if (ret) + return ret; + } + + intel_bios_init(i915); + + ret = intel_vga_register(i915); + if (ret) + goto cleanup_bios; + + /* FIXME: completely on the wrong abstraction layer */ + intel_power_domains_init_hw(i915, false); + + intel_csr_ucode_init(i915); + i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI | WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); @@ -17886,15 +17898,15 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915) ret = intel_cdclk_init(i915); if (ret) - return ret; + goto cleanup_vga_client_pw_domain_csr; ret = intel_dbuf_init(i915); if (ret) - return ret; + goto cleanup_vga_client_pw_domain_csr; ret = intel_bw_init(i915); if (ret) - return ret; + goto cleanup_vga_client_pw_domain_csr; init_llist_head(&i915->atomic_helper.free_list); INIT_WORK(&i915->atomic_helper.free_work, @@ -17905,10 +17917,19 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915) intel_fbc_init(i915); return 0; + +cleanup_vga_client_pw_domain_csr: + intel_csr_ucode_fini(i915); + intel_power_domains_driver_remove(i915); + intel_vga_unregister(i915); +cleanup_bios: + intel_bios_driver_remove(i915); + + return ret; } -/* part #2: call after irq install */ -int intel_modeset_init(struct drm_i915_private *i915) +/* part #2: call after irq install, but before gem init */ +int intel_modeset_init_nogem(struct drm_i915_private *i915) { struct drm_device *dev = &i915->drm; enum pipe pipe; @@ -17925,7 +17946,7 @@ int intel_modeset_init(struct drm_i915_private *i915) INTEL_NUM_PIPES(i915), INTEL_NUM_PIPES(i915) > 1 ? "s" : ""); - if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) { + if (HAS_DISPLAY(i915)) { for_each_pipe(i915, pipe) { ret = intel_crtc_init(i915, pipe); if (ret) { @@ -18007,6 +18028,30 @@ int intel_modeset_init(struct drm_i915_private *i915) return 0; } +/* part #3: call after gem init */ +int intel_modeset_init(struct drm_i915_private *i915) +{ + int ret; + + intel_overlay_setup(i915); + + if (!HAS_DISPLAY(i915)) + return 0; + + ret = intel_fbdev_init(&i915->drm); + if (ret) + return ret; + + /* Only enable hotplug handling once the fbdev is fully set up. */ + intel_hpd_init(i915); + + intel_init_ipc(i915); + + intel_psr_set_force_mode_changed(i915->psr.dp); + + return 0; +} + void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); @@ -18891,6 +18936,18 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) intel_fbc_cleanup_cfb(i915); } +/* part #3: call after gem init */ +void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915) +{ + intel_csr_ucode_fini(i915); + + intel_power_domains_driver_remove(i915); + + intel_vga_unregister(i915); + + intel_bios_driver_remove(i915); +} + #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) struct intel_display_error_state { @@ -18951,7 +19008,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder)); - if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) + if (!HAS_DISPLAY(dev_priv)) return NULL; error = kzalloc(sizeof(*error), GFP_ATOMIC); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index e890c8fb779b..d10b7c8cde3f 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -272,8 +272,6 @@ enum dpio_phy { DPIO_PHY2, }; -#define I915_NUM_PHYS_VLV 2 - enum aux_ch { AUX_CH_A, AUX_CH_B, @@ -282,6 +280,8 @@ enum aux_ch { AUX_CH_E, /* ICL+ */ AUX_CH_F, AUX_CH_G, + AUX_CH_H, + AUX_CH_I, }; #define aux_ch_name(a) ((a) + 'A') @@ -629,9 +629,11 @@ intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, /* modesetting */ void intel_modeset_init_hw(struct drm_i915_private *i915); int intel_modeset_init_noirq(struct drm_i915_private *i915); +int intel_modeset_init_nogem(struct drm_i915_private *i915); int intel_modeset_init(struct drm_i915_private *i915); void intel_modeset_driver_remove(struct drm_i915_private *i915); void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915); +void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915); void intel_display_resume(struct drm_device *dev); void intel_init_pch_refclk(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index f549381048b3..0bf31f9a8af5 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -601,6 +601,11 @@ static void intel_hdcp_info(struct seq_file *m, { bool hdcp_cap, hdcp2_cap; + if (!intel_connector->hdcp.shim) { + seq_puts(m, "No Connector Support"); + goto out; + } + hdcp_cap = intel_hdcp_capable(intel_connector); hdcp2_cap = intel_hdcp2_capable(intel_connector); @@ -612,6 +617,7 @@ static void intel_hdcp_info(struct seq_file *m, if (!hdcp_cap && !hdcp2_cap) seq_puts(m, "None"); +out: seq_puts(m, "\n"); } @@ -620,6 +626,7 @@ static void intel_dp_info(struct seq_file *m, { struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); + const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr; seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); @@ -627,11 +634,7 @@ static void intel_dp_info(struct seq_file *m, intel_panel_info(m, &intel_connector->panel); drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, - &intel_dp->aux); - if (intel_connector->hdcp.shim) { - seq_puts(m, "\tHDCP version: "); - intel_hdcp_info(m, intel_connector); - } + edid ? edid->data : NULL, &intel_dp->aux); } static void intel_dp_mst_info(struct seq_file *m, @@ -649,10 +652,6 @@ static void intel_hdmi_info(struct seq_file *m, struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder); seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); - if (intel_connector->hdcp.shim) { - seq_puts(m, "\tHDCP version: "); - intel_hdcp_info(m, intel_connector); - } } static void intel_lvds_info(struct seq_file *m, @@ -708,6 +707,9 @@ static void intel_connector_info(struct seq_file *m, break; } + seq_puts(m, "\tHDCP version: "); + intel_hdcp_info(m, intel_connector); + seq_printf(m, "\tmodes:\n"); list_for_each_entry(mode, &connector->modes, head) intel_seq_print_mode(m, 2, mode); @@ -1069,10 +1071,18 @@ static void drrs_status_per_crtc(struct seq_file *m, drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { + bool supported = false; + if (connector->state->crtc != &intel_crtc->base) continue; seq_printf(m, "%s:\n", connector->name); + + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && + drrs->type == SEAMLESS_DRRS_SUPPORT) + supported = true; + + seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported)); } drm_connector_list_iter_end(&conn_iter); @@ -1083,7 +1093,7 @@ static void drrs_status_per_crtc(struct seq_file *m, mutex_lock(&drrs->mutex); /* DRRS Supported */ - seq_puts(m, "\tDRRS Supported: Yes\n"); + seq_puts(m, "\tDRRS Enabled: Yes\n"); /* disable_drrs() will make drrs->dp NULL */ if (!drrs->dp) { @@ -1118,7 +1128,7 @@ static void drrs_status_per_crtc(struct seq_file *m, mutex_unlock(&drrs->mutex); } else { /* DRRS not supported. Print the VBT parameter*/ - seq_puts(m, "\tDRRS Supported : No"); + seq_puts(m, "\tDRRS Enabled : No"); } seq_puts(m, "\n"); } @@ -2029,10 +2039,6 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) if (connector->status != connector_status_connected) return -ENODEV; - /* HDCP is supported by connector */ - if (!intel_connector->hdcp.shim) - return -EINVAL; - seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id); intel_hdcp_info(m, intel_connector); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 7946c6af4b1e..7277e58b01f1 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -5263,7 +5263,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask; int config, i; - if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0)) + if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0)) /* Wa_1409767108: tgl */ table = wa_1409767108_buddy_page_masks; else diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 9349b15afff6..3d4bf9b6a0a2 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -28,6 +28,7 @@ #include <linux/async.h> #include <linux/i2c.h> +#include <linux/pwm.h> #include <linux/sched/clock.h> #include <drm/drm_atomic.h> @@ -223,6 +224,7 @@ struct intel_panel { bool util_pin_active_low; /* bxt+ */ u8 controller; /* bxt+ only */ struct pwm_device *pwm; + struct pwm_state pwm_state; /* DPCD backlight */ u8 pwmgen_bit_count; @@ -314,10 +316,12 @@ struct intel_hdcp_shim { /* Enables HDCP signalling on the port */ int (*toggle_signalling)(struct intel_digital_port *dig_port, + enum transcoder cpu_transcoder, bool enable); /* Ensures the link is still protected */ - bool (*check_link)(struct intel_digital_port *dig_port); + bool (*check_link)(struct intel_digital_port *dig_port, + struct intel_connector *connector); /* Detects panel's hdcp capability. This is optional for HDMI. */ int (*hdcp_capable)(struct intel_digital_port *dig_port, @@ -479,8 +483,6 @@ struct intel_atomic_state { bool dpll_set, modeset; - u8 active_pipes; - struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS]; /* @@ -491,11 +493,6 @@ struct intel_atomic_state { bool rps_interactive; - /* - * active_pipes - */ - bool global_state_changed; - struct i915_sw_fence commit_ready; struct llist_node freed; @@ -1275,6 +1272,7 @@ struct intel_dp { u8 sink_count; bool link_mst; bool link_trained; + bool has_hdmi_sink; bool has_audio; bool reset_link_params; u8 dpcd[DP_RECEIVER_CAP_SIZE]; @@ -1376,6 +1374,14 @@ struct intel_dp { /* Displayport compliance testing */ struct intel_dp_compliance compliance; + /* Downstream facing port caps */ + struct { + int min_tmds_clock, max_tmds_clock; + int max_dotclock; + u8 max_bpc; + bool ycbcr_444_to_420; + } dfp; + /* Display stream compression testing */ bool force_dsc_en; @@ -1415,6 +1421,11 @@ struct intel_digital_port { enum phy_fia tc_phy_fia; u8 tc_phy_fia_idx; + /* protects num_hdcp_streams reference count */ + struct mutex hdcp_mutex; + /* the number of pipes using HDCP signalling out of this port */ + unsigned int num_hdcp_streams; + void (*write_infoframe)(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, @@ -1525,6 +1536,18 @@ static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder) } } +static inline bool intel_encoder_is_mst(struct intel_encoder *encoder) +{ + return encoder->type == INTEL_OUTPUT_DP_MST; +} + +static inline struct intel_dp_mst_encoder * +enc_to_mst(struct intel_encoder *encoder) +{ + return container_of(&encoder->base, struct intel_dp_mst_encoder, + base.base); +} + static inline struct intel_digital_port * enc_to_dig_port(struct intel_encoder *encoder) { @@ -1533,6 +1556,8 @@ enc_to_dig_port(struct intel_encoder *encoder) if (intel_encoder_is_dig_port(intel_encoder)) return container_of(&encoder->base, struct intel_digital_port, base.base); + else if (intel_encoder_is_mst(intel_encoder)) + return enc_to_mst(encoder)->primary; else return NULL; } @@ -1543,13 +1568,6 @@ intel_attached_dig_port(struct intel_connector *connector) return enc_to_dig_port(intel_attached_encoder(connector)); } -static inline struct intel_dp_mst_encoder * -enc_to_mst(struct intel_encoder *encoder) -{ - return container_of(&encoder->base, struct intel_dp_mst_encoder, - base.base); -} - static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder) { return &enc_to_dig_port(encoder)->dp; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 284b15f84592..bf1e9cf1c0f3 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -38,7 +38,6 @@ #include <drm/drm_crtc.h> #include <drm/drm_dp_helper.h> #include <drm/drm_edid.h> -#include <drm/drm_hdcp.h> #include <drm/drm_probe_helper.h> #include "i915_debugfs.h" @@ -248,29 +247,6 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes) return max_link_clock * max_lanes; } -static int -intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct intel_encoder *encoder = &dig_port->base; - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - int max_dotclk = dev_priv->max_dotclk_freq; - int ds_max_dotclk; - - int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; - - if (type != DP_DS_PORT_TYPE_VGA) - return max_dotclk; - - ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd, - intel_dp->downstream_ports); - - if (ds_max_dotclk != 0) - max_dotclk = min(max_dotclk, ds_max_dotclk); - - return max_dotclk; -} - static int cnl_max_source_rate(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); @@ -636,6 +612,34 @@ static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, } static enum drm_mode_status +intel_dp_mode_valid_downstream(struct intel_connector *connector, + const struct drm_display_mode *mode, + int target_clock) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + const struct drm_display_info *info = &connector->base.display_info; + int tmds_clock; + + if (intel_dp->dfp.max_dotclock && + target_clock > intel_dp->dfp.max_dotclock) + return MODE_CLOCK_HIGH; + + /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ + tmds_clock = target_clock; + if (drm_mode_is_420_only(info, mode)) + tmds_clock /= 2; + + if (intel_dp->dfp.min_tmds_clock && + tmds_clock < intel_dp->dfp.min_tmds_clock) + return MODE_CLOCK_LOW; + if (intel_dp->dfp.max_tmds_clock && + tmds_clock > intel_dp->dfp.max_tmds_clock) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static enum drm_mode_status intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { @@ -645,15 +649,14 @@ intel_dp_mode_valid(struct drm_connector *connector, struct drm_i915_private *dev_priv = to_i915(connector->dev); int target_clock = mode->clock; int max_rate, mode_rate, max_lanes, max_link_clock; - int max_dotclk; + int max_dotclk = dev_priv->max_dotclk_freq; u16 dsc_max_output_bpp = 0; u8 dsc_slice_count = 0; + enum drm_mode_status status; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; - max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); - if (intel_dp_is_edp(intel_dp) && fixed_mode) { if (mode->hdisplay > fixed_mode->hdisplay) return MODE_PANEL; @@ -709,6 +712,11 @@ intel_dp_mode_valid(struct drm_connector *connector, if (mode->flags & DRM_MODE_FLAG_DBLCLK) return MODE_H_ILLEGAL; + status = intel_dp_mode_valid_downstream(intel_connector, + mode, target_clock); + if (status != MODE_OK) + return status; + return intel_mode_valid_max_plane_size(dev_priv, mode); } @@ -1563,6 +1571,20 @@ intel_dp_aux_header(u8 txbuf[HEADER_SIZE], txbuf[3] = msg->size - 1; } +static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg) +{ + /* + * If we're trying to send the HDCP Aksv, we need to set a the Aksv + * select bit to inform the hardware to send the Aksv after our header + * since we can't access that data from software. + */ + if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE && + msg->address == DP_AUX_HDCP_AKSV) + return DP_AUX_CH_CTL_AUX_AKSV_SELECT; + + return 0; +} + static ssize_t intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { @@ -1570,6 +1592,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 txbuf[20], rxbuf[20]; size_t txsize, rxsize; + u32 flags = intel_dp_aux_xfer_flags(msg); int ret; intel_dp_aux_header(txbuf, msg); @@ -1590,7 +1613,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, - rxbuf, rxsize, 0); + rxbuf, rxsize, flags); if (ret > 0) { msg->reply = rxbuf[0] >> 4; @@ -1613,7 +1636,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) return -E2BIG; ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, - rxbuf, rxsize, 0); + rxbuf, rxsize, flags); if (ret > 0) { msg->reply = rxbuf[0] >> 4; /* @@ -1954,19 +1977,72 @@ static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); } -static int intel_dp_compute_bpp(struct intel_dp *intel_dp, - struct intel_crtc_state *pipe_config) +static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || + (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && + intel_dp->dfp.ycbcr_444_to_420); +} + +static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, int bpc) +{ + int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8; + + if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) + clock /= 2; + + return clock; +} + +static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, int bpc) +{ + int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc); + + if (intel_dp->dfp.min_tmds_clock && + tmds_clock < intel_dp->dfp.min_tmds_clock) + return false; + + if (intel_dp->dfp.max_tmds_clock && + tmds_clock > intel_dp->dfp.max_tmds_clock) + return false; + + return true; +} + +static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + int bpc) +{ + + return intel_hdmi_deep_color_possible(crtc_state, bpc, + intel_dp->has_hdmi_sink, + intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) && + intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc); +} + +static int intel_dp_max_bpp(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_connector *intel_connector = intel_dp->attached_connector; int bpp, bpc; - bpp = pipe_config->pipe_bpp; - bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports); + bpc = crtc_state->pipe_bpp / 3; - if (bpc > 0) - bpp = min(bpp, 3*bpc); + if (intel_dp->dfp.max_bpc) + bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); + if (intel_dp->dfp.min_tmds_clock) { + for (; bpc >= 10; bpc -= 2) { + if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc)) + break; + } + } + + bpp = bpc * 3; if (intel_dp_is_edp(intel_dp)) { /* Get bpp from vbt only for panels that dont have bpp in edid */ if (intel_connector->base.display_info.bpc == 0 && @@ -2288,7 +2364,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, limits.max_lane_count = intel_dp_max_lane_count(intel_dp); limits.min_bpp = intel_dp_min_bpp(pipe_config); - limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); + limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config); if (intel_dp_is_edp(intel_dp)) { /* @@ -2363,10 +2439,16 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp, const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - if (!drm_mode_is_420_only(info, adjusted_mode) || - !intel_dp_get_colorimetry_status(intel_dp) || - !connector->ycbcr_420_allowed) + if (!connector->ycbcr_420_allowed) + return 0; + + if (!drm_mode_is_420_only(info, adjusted_mode)) + return 0; + + if (intel_dp->dfp.ycbcr_444_to_420) { + crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; return 0; + } crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; @@ -2575,6 +2657,34 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); } +static void +intel_dp_drrs_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + int output_bpp, bool constant_n) +{ + struct intel_connector *intel_connector = intel_dp->attached_connector; + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + /* + * DRRS and PSR can't be enable together, so giving preference to PSR + * as it allows more power-savings by complete shutting down display, + * so to guarantee this, intel_dp_drrs_compute_config() must be called + * after intel_psr_compute_config(). + */ + if (pipe_config->has_psr) + return; + + if (!intel_connector->panel.downclock_mode || + dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) + return; + + pipe_config->has_drrs = true; + intel_link_compute_m_n(output_bpp, pipe_config->lane_count, + intel_connector->panel.downclock_mode->clock, + pipe_config->port_clock, &pipe_config->dp_m2_n2, + constant_n, pipe_config->fec_enable); +} + int intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, @@ -2605,7 +2715,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, if (ret) return ret; - pipe_config->has_drrs = false; if (!intel_dp_port_has_audio(dev_priv, port)) pipe_config->has_audio = false; else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) @@ -2657,21 +2766,12 @@ intel_dp_compute_config(struct intel_encoder *encoder, &pipe_config->dp_m_n, constant_n, pipe_config->fec_enable); - if (intel_connector->panel.downclock_mode != NULL && - dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { - pipe_config->has_drrs = true; - intel_link_compute_m_n(output_bpp, - pipe_config->lane_count, - intel_connector->panel.downclock_mode->clock, - pipe_config->port_clock, - &pipe_config->dp_m2_n2, - constant_n, pipe_config->fec_enable); - } - if (!HAS_DDI(dev_priv)) intel_dp_set_clock(encoder, pipe_config); intel_psr_compute_config(intel_dp, pipe_config); + intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp, + constant_n); intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); @@ -3752,6 +3852,43 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp, intel_de_posting_read(dev_priv, intel_dp->output_reg); } +void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + u8 tmp; + + if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) + return; + + if (!drm_dp_is_branch(intel_dp->dpcd)) + return; + + tmp = intel_dp->has_hdmi_sink ? + DP_HDMI_DVI_OUTPUT_CONFIG : 0; + + if (drm_dp_dpcd_writeb(&intel_dp->aux, + DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) + drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n", + enableddisabled(intel_dp->has_hdmi_sink)); + + tmp = intel_dp->dfp.ycbcr_444_to_420 ? + DP_CONVERSION_TO_YCBCR420_ENABLE : 0; + + if (drm_dp_dpcd_writeb(&intel_dp->aux, + DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) + drm_dbg_kms(&i915->drm, + "Failed to set protocol converter YCbCr 4:2:0 conversion mode to %s\n", + enableddisabled(intel_dp->dfp.ycbcr_444_to_420)); + + tmp = 0; + + if (drm_dp_dpcd_writeb(&intel_dp->aux, + DP_PROTOCOL_CONVERTER_CONTROL_2, tmp) <= 0) + drm_dbg_kms(&i915->drm, + "Failed to set protocol converter YCbCr 4:2:2 conversion mode to %s\n", + enableddisabled(false)); +} + static void intel_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, @@ -3789,6 +3926,7 @@ static void intel_enable_dp(struct intel_atomic_state *state, } intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + intel_dp_configure_protocol_converter(intel_dp); intel_dp_start_link_train(intel_dp); intel_dp_stop_link_train(intel_dp); @@ -6028,16 +6166,103 @@ intel_dp_get_edid(struct intel_dp *intel_dp) } static void +intel_dp_update_dfp(struct intel_dp *intel_dp, + const struct edid *edid) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_connector *connector = intel_dp->attached_connector; + + intel_dp->dfp.max_bpc = + drm_dp_downstream_max_bpc(intel_dp->dpcd, + intel_dp->downstream_ports, edid); + + intel_dp->dfp.max_dotclock = + drm_dp_downstream_max_dotclock(intel_dp->dpcd, + intel_dp->downstream_ports); + + intel_dp->dfp.min_tmds_clock = + drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, + intel_dp->downstream_ports, + edid); + intel_dp->dfp.max_tmds_clock = + drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, + intel_dp->downstream_ports, + edid); + + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d\n", + connector->base.base.id, connector->base.name, + intel_dp->dfp.max_bpc, + intel_dp->dfp.max_dotclock, + intel_dp->dfp.min_tmds_clock, + intel_dp->dfp.max_tmds_clock); +} + +static void +intel_dp_update_420(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_connector *connector = intel_dp->attached_connector; + bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420; + + /* No YCbCr output support on gmch platforms */ + if (HAS_GMCH(i915)) + return; + + /* + * ILK doesn't seem capable of DP YCbCr output. The + * displayed image is severly corrupted. SNB+ is fine. + */ + if (IS_GEN(i915, 5)) + return; + + is_branch = drm_dp_is_branch(intel_dp->dpcd); + ycbcr_420_passthrough = + drm_dp_downstream_420_passthrough(intel_dp->dpcd, + intel_dp->downstream_ports); + ycbcr_444_to_420 = + drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, + intel_dp->downstream_ports); + + if (INTEL_GEN(i915) >= 11) { + /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */ + intel_dp->dfp.ycbcr_444_to_420 = + ycbcr_444_to_420 && !ycbcr_420_passthrough; + + connector->base.ycbcr_420_allowed = + !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough; + } else { + /* 4:4:4->4:2:0 conversion is the only way */ + intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420; + + connector->base.ycbcr_420_allowed = ycbcr_444_to_420; + } + + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", + connector->base.base.id, connector->base.name, + yesno(connector->base.ycbcr_420_allowed), + yesno(intel_dp->dfp.ycbcr_444_to_420)); +} + +static void intel_dp_set_edid(struct intel_dp *intel_dp) { - struct intel_connector *intel_connector = intel_dp->attached_connector; + struct intel_connector *connector = intel_dp->attached_connector; struct edid *edid; intel_dp_unset_edid(intel_dp); edid = intel_dp_get_edid(intel_dp); - intel_connector->detect_edid = edid; + connector->detect_edid = edid; + + intel_dp_update_dfp(intel_dp, edid); + intel_dp_update_420(intel_dp); + + if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { + intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid); + intel_dp->has_audio = drm_detect_monitor_audio(edid); + } - intel_dp->has_audio = drm_detect_monitor_audio(edid); drm_dp_cec_set_edid(&intel_dp->aux, edid); intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); } @@ -6045,14 +6270,23 @@ intel_dp_set_edid(struct intel_dp *intel_dp) static void intel_dp_unset_edid(struct intel_dp *intel_dp) { - struct intel_connector *intel_connector = intel_dp->attached_connector; + struct intel_connector *connector = intel_dp->attached_connector; drm_dp_cec_unset_edid(&intel_dp->aux); - kfree(intel_connector->detect_edid); - intel_connector->detect_edid = NULL; + kfree(connector->detect_edid); + connector->detect_edid = NULL; + intel_dp->has_hdmi_sink = false; intel_dp->has_audio = false; intel_dp->edid_quirks = 0; + + intel_dp->dfp.max_bpc = 0; + intel_dp->dfp.max_dotclock = 0; + intel_dp->dfp.min_tmds_clock = 0; + intel_dp->dfp.max_tmds_clock = 0; + + intel_dp->dfp.ycbcr_444_to_420 = false; + connector->base.ycbcr_420_allowed = false; } static int @@ -6071,6 +6305,9 @@ intel_dp_detect(struct drm_connector *connector, drm_WARN_ON(&dev_priv->drm, !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); + if (!INTEL_DISPLAY_ENABLED(dev_priv)) + return connector_status_disconnected; + /* Can't disconnect eDP */ if (intel_dp_is_edp(intel_dp)) status = edp_detect(intel_dp); @@ -6211,7 +6448,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) } /* if eDP has no EDID, fall back to fixed mode */ - if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) && + if (intel_dp_is_edp(intel_attached_dp(intel_connector)) && intel_connector->panel.fixed_mode) { struct drm_display_mode *mode; @@ -6223,6 +6460,19 @@ static int intel_dp_get_modes(struct drm_connector *connector) } } + if (!edid) { + struct intel_dp *intel_dp = intel_attached_dp(intel_connector); + struct drm_display_mode *mode; + + mode = drm_dp_downstream_mode(connector->dev, + intel_dp->dpcd, + intel_dp->downstream_ports); + if (mode) { + drm_mode_probed_add(connector, mode); + return 1; + } + } + return 0; } @@ -6308,628 +6558,6 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) edp_panel_vdd_off_sync(intel_dp); } -static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout) -{ - long ret; - -#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count)) - ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C, - msecs_to_jiffies(timeout)); - - if (!ret) - DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n"); -} - -static -int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port, - u8 *an) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&dig_port->base.base)); - static const struct drm_dp_aux_msg msg = { - .request = DP_AUX_NATIVE_WRITE, - .address = DP_AUX_HDCP_AKSV, - .size = DRM_HDCP_KSV_LEN, - }; - u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0; - ssize_t dpcd_ret; - int ret; - - /* Output An first, that's easy */ - dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN, - an, DRM_HDCP_AN_LEN); - if (dpcd_ret != DRM_HDCP_AN_LEN) { - drm_dbg_kms(&i915->drm, - "Failed to write An over DP/AUX (%zd)\n", - dpcd_ret); - return dpcd_ret >= 0 ? -EIO : dpcd_ret; - } - - /* - * Since Aksv is Oh-So-Secret, we can't access it in software. So in - * order to get it on the wire, we need to create the AUX header as if - * we were writing the data, and then tickle the hardware to output the - * data once the header is sent out. - */ - intel_dp_aux_header(txbuf, &msg); - - ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size, - rxbuf, sizeof(rxbuf), - DP_AUX_CH_CTL_AUX_AKSV_SELECT); - if (ret < 0) { - drm_dbg_kms(&i915->drm, - "Write Aksv over DP/AUX failed (%d)\n", ret); - return ret; - } else if (ret == 0) { - drm_dbg_kms(&i915->drm, "Aksv write over DP/AUX was empty\n"); - return -EIO; - } - - reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK; - if (reply != DP_AUX_NATIVE_REPLY_ACK) { - drm_dbg_kms(&i915->drm, - "Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n", - reply); - return -EIO; - } - return 0; -} - -static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port, - u8 *bksv) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - ssize_t ret; - - ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, - DRM_HDCP_KSV_LEN); - if (ret != DRM_HDCP_KSV_LEN) { - drm_dbg_kms(&i915->drm, - "Read Bksv from DP/AUX failed (%zd)\n", ret); - return ret >= 0 ? -EIO : ret; - } - return 0; -} - -static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port, - u8 *bstatus) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - ssize_t ret; - - /* - * For some reason the HDMI and DP HDCP specs call this register - * definition by different names. In the HDMI spec, it's called BSTATUS, - * but in DP it's called BINFO. - */ - ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO, - bstatus, DRM_HDCP_BSTATUS_LEN); - if (ret != DRM_HDCP_BSTATUS_LEN) { - drm_dbg_kms(&i915->drm, - "Read bstatus from DP/AUX failed (%zd)\n", ret); - return ret >= 0 ? -EIO : ret; - } - return 0; -} - -static -int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port, - u8 *bcaps) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - ssize_t ret; - - ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS, - bcaps, 1); - if (ret != 1) { - drm_dbg_kms(&i915->drm, - "Read bcaps from DP/AUX failed (%zd)\n", ret); - return ret >= 0 ? -EIO : ret; - } - - return 0; -} - -static -int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port, - bool *repeater_present) -{ - ssize_t ret; - u8 bcaps; - - ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps); - if (ret) - return ret; - - *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT; - return 0; -} - -static -int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port, - u8 *ri_prime) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - ssize_t ret; - - ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, - ri_prime, DRM_HDCP_RI_LEN); - if (ret != DRM_HDCP_RI_LEN) { - drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n", - ret); - return ret >= 0 ? -EIO : ret; - } - return 0; -} - -static -int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port, - bool *ksv_ready) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - ssize_t ret; - u8 bstatus; - - ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, - &bstatus, 1); - if (ret != 1) { - drm_dbg_kms(&i915->drm, - "Read bstatus from DP/AUX failed (%zd)\n", ret); - return ret >= 0 ? -EIO : ret; - } - *ksv_ready = bstatus & DP_BSTATUS_READY; - return 0; -} - -static -int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port, - int num_downstream, u8 *ksv_fifo) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - ssize_t ret; - int i; - - /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */ - for (i = 0; i < num_downstream; i += 3) { - size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN; - ret = drm_dp_dpcd_read(&dig_port->dp.aux, - DP_AUX_HDCP_KSV_FIFO, - ksv_fifo + i * DRM_HDCP_KSV_LEN, - len); - if (ret != len) { - drm_dbg_kms(&i915->drm, - "Read ksv[%d] from DP/AUX failed (%zd)\n", - i, ret); - return ret >= 0 ? -EIO : ret; - } - } - return 0; -} - -static -int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port, - int i, u32 *part) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - ssize_t ret; - - if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) - return -EINVAL; - - ret = drm_dp_dpcd_read(&dig_port->dp.aux, - DP_AUX_HDCP_V_PRIME(i), part, - DRM_HDCP_V_PRIME_PART_LEN); - if (ret != DRM_HDCP_V_PRIME_PART_LEN) { - drm_dbg_kms(&i915->drm, - "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); - return ret >= 0 ? -EIO : ret; - } - return 0; -} - -static -int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *dig_port, - bool enable) -{ - /* Not used for single stream DisplayPort setups */ - return 0; -} - -static -bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - ssize_t ret; - u8 bstatus; - - ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, - &bstatus, 1); - if (ret != 1) { - drm_dbg_kms(&i915->drm, - "Read bstatus from DP/AUX failed (%zd)\n", ret); - return false; - } - - return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ)); -} - -static -int intel_dp_hdcp_capable(struct intel_digital_port *dig_port, - bool *hdcp_capable) -{ - ssize_t ret; - u8 bcaps; - - ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps); - if (ret) - return ret; - - *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE; - return 0; -} - -struct hdcp2_dp_errata_stream_type { - u8 msg_id; - u8 stream_type; -} __packed; - -struct hdcp2_dp_msg_data { - u8 msg_id; - u32 offset; - bool msg_detectable; - u32 timeout; - u32 timeout2; /* Added for non_paired situation */ -}; - -static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = { - { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 }, - { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET, - false, HDCP_2_2_CERT_TIMEOUT_MS, 0 }, - { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET, - false, 0, 0 }, - { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET, - false, 0, 0 }, - { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET, - true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS, - HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS }, - { HDCP_2_2_AKE_SEND_PAIRING_INFO, - DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true, - HDCP_2_2_PAIRING_TIMEOUT_MS, 0 }, - { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 }, - { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET, - false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 }, - { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false, - 0, 0 }, - { HDCP_2_2_REP_SEND_RECVID_LIST, - DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true, - HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 }, - { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false, - 0, 0 }, - { HDCP_2_2_REP_STREAM_MANAGE, - DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false, - 0, 0 }, - { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET, - false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 }, -/* local define to shovel this through the write_2_2 interface */ -#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50 - { HDCP_2_2_ERRATA_DP_STREAM_TYPE, - DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false, - 0, 0 }, -}; - -static int -intel_dp_hdcp2_read_rx_status(struct intel_digital_port *dig_port, - u8 *rx_status) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - ssize_t ret; - - ret = drm_dp_dpcd_read(&dig_port->dp.aux, - DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, - HDCP_2_2_DP_RXSTATUS_LEN); - if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { - drm_dbg_kms(&i915->drm, - "Read bstatus from DP/AUX failed (%zd)\n", ret); - return ret >= 0 ? -EIO : ret; - } - - return 0; -} - -static -int hdcp2_detect_msg_availability(struct intel_digital_port *dig_port, - u8 msg_id, bool *msg_ready) -{ - u8 rx_status; - int ret; - - *msg_ready = false; - ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status); - if (ret < 0) - return ret; - - switch (msg_id) { - case HDCP_2_2_AKE_SEND_HPRIME: - if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status)) - *msg_ready = true; - break; - case HDCP_2_2_AKE_SEND_PAIRING_INFO: - if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status)) - *msg_ready = true; - break; - case HDCP_2_2_REP_SEND_RECVID_LIST: - if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) - *msg_ready = true; - break; - default: - DRM_ERROR("Unidentified msg_id: %d\n", msg_id); - return -EINVAL; - } - - return 0; -} - -static ssize_t -intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *dig_port, - const struct hdcp2_dp_msg_data *hdcp2_msg_data) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct intel_dp *dp = &dig_port->dp; - struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; - u8 msg_id = hdcp2_msg_data->msg_id; - int ret, timeout; - bool msg_ready = false; - - if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired) - timeout = hdcp2_msg_data->timeout2; - else - timeout = hdcp2_msg_data->timeout; - - /* - * There is no way to detect the CERT, LPRIME and STREAM_READY - * availability. So Wait for timeout and read the msg. - */ - if (!hdcp2_msg_data->msg_detectable) { - mdelay(timeout); - ret = 0; - } else { - /* - * As we want to check the msg availability at timeout, Ignoring - * the timeout at wait for CP_IRQ. - */ - intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout); - ret = hdcp2_detect_msg_availability(dig_port, - msg_id, &msg_ready); - if (!msg_ready) - ret = -ETIMEDOUT; - } - - if (ret) - drm_dbg_kms(&i915->drm, - "msg_id %d, ret %d, timeout(mSec): %d\n", - hdcp2_msg_data->msg_id, ret, timeout); - - return ret; -} - -static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++) - if (hdcp2_dp_msg_data[i].msg_id == msg_id) - return &hdcp2_dp_msg_data[i]; - - return NULL; -} - -static -int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port, - void *buf, size_t size) -{ - struct intel_dp *dp = &dig_port->dp; - struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; - unsigned int offset; - u8 *byte = buf; - ssize_t ret, bytes_to_write, len; - const struct hdcp2_dp_msg_data *hdcp2_msg_data; - - hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte); - if (!hdcp2_msg_data) - return -EINVAL; - - offset = hdcp2_msg_data->offset; - - /* No msg_id in DP HDCP2.2 msgs */ - bytes_to_write = size - 1; - byte++; - - hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count); - - while (bytes_to_write) { - len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ? - DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write; - - ret = drm_dp_dpcd_write(&dig_port->dp.aux, - offset, (void *)byte, len); - if (ret < 0) - return ret; - - bytes_to_write -= ret; - byte += ret; - offset += ret; - } - - return size; -} - -static -ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port) -{ - u8 rx_info[HDCP_2_2_RXINFO_LEN]; - u32 dev_cnt; - ssize_t ret; - - ret = drm_dp_dpcd_read(&dig_port->dp.aux, - DP_HDCP_2_2_REG_RXINFO_OFFSET, - (void *)rx_info, HDCP_2_2_RXINFO_LEN); - if (ret != HDCP_2_2_RXINFO_LEN) - return ret >= 0 ? -EIO : ret; - - dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | - HDCP_2_2_DEV_COUNT_LO(rx_info[1])); - - if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT) - dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT; - - ret = sizeof(struct hdcp2_rep_send_receiverid_list) - - HDCP_2_2_RECEIVER_IDS_MAX_LEN + - (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN); - - return ret; -} - -static -int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port, - u8 msg_id, void *buf, size_t size) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - unsigned int offset; - u8 *byte = buf; - ssize_t ret, bytes_to_recv, len; - const struct hdcp2_dp_msg_data *hdcp2_msg_data; - - hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id); - if (!hdcp2_msg_data) - return -EINVAL; - offset = hdcp2_msg_data->offset; - - ret = intel_dp_hdcp2_wait_for_msg(dig_port, hdcp2_msg_data); - if (ret < 0) - return ret; - - if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) { - ret = get_receiver_id_list_size(dig_port); - if (ret < 0) - return ret; - - size = ret; - } - bytes_to_recv = size - 1; - - /* DP adaptation msgs has no msg_id */ - byte++; - - while (bytes_to_recv) { - len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ? - DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv; - - ret = drm_dp_dpcd_read(&dig_port->dp.aux, offset, - (void *)byte, len); - if (ret < 0) { - drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n", - msg_id, ret); - return ret; - } - - bytes_to_recv -= ret; - byte += ret; - offset += ret; - } - byte = buf; - *byte = msg_id; - - return size; -} - -static -int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port, - bool is_repeater, u8 content_type) -{ - int ret; - struct hdcp2_dp_errata_stream_type stream_type_msg; - - if (is_repeater) - return 0; - - /* - * Errata for DP: As Stream type is used for encryption, Receiver - * should be communicated with stream type for the decryption of the - * content. - * Repeater will be communicated with stream type as a part of it's - * auth later in time. - */ - stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE; - stream_type_msg.stream_type = content_type; - - ret = intel_dp_hdcp2_write_msg(dig_port, &stream_type_msg, - sizeof(stream_type_msg)); - - return ret < 0 ? ret : 0; - -} - -static -int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port) -{ - u8 rx_status; - int ret; - - ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status); - if (ret) - return ret; - - if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status)) - ret = HDCP_REAUTH_REQUEST; - else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status)) - ret = HDCP_LINK_INTEGRITY_FAILURE; - else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) - ret = HDCP_TOPOLOGY_CHANGE; - - return ret; -} - -static -int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port, - bool *capable) -{ - u8 rx_caps[3]; - int ret; - - *capable = false; - ret = drm_dp_dpcd_read(&dig_port->dp.aux, - DP_HDCP_2_2_REG_RX_CAPS_OFFSET, - rx_caps, HDCP_2_2_RXCAPS_LEN); - if (ret != HDCP_2_2_RXCAPS_LEN) - return ret >= 0 ? -EIO : ret; - - if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL && - HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) - *capable = true; - - return 0; -} - -static const struct intel_hdcp_shim intel_dp_hdcp_shim = { - .write_an_aksv = intel_dp_hdcp_write_an_aksv, - .read_bksv = intel_dp_hdcp_read_bksv, - .read_bstatus = intel_dp_hdcp_read_bstatus, - .repeater_present = intel_dp_hdcp_repeater_present, - .read_ri_prime = intel_dp_hdcp_read_ri_prime, - .read_ksv_ready = intel_dp_hdcp_read_ksv_ready, - .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo, - .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, - .toggle_signalling = intel_dp_hdcp_toggle_signalling, - .check_link = intel_dp_hdcp_check_link, - .hdcp_capable = intel_dp_hdcp_capable, - .write_2_2_msg = intel_dp_hdcp2_write_msg, - .read_2_2_msg = intel_dp_hdcp2_read_msg, - .config_stream_type = intel_dp_hdcp2_config_stream_type, - .check_2_2_link = intel_dp_hdcp2_check_link, - .hdcp_2_2_capable = intel_dp_hdcp2_capable, - .protocol = HDCP_PROTOCOL_DP, -}; - static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -7640,6 +7268,15 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, refresh_rate); } +static void +intel_edp_drrs_enable_locked(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + dev_priv->drrs.busy_frontbuffer_bits = 0; + dev_priv->drrs.dp = intel_dp; +} + /** * intel_edp_drrs_enable - init drrs struct if supported * @intel_dp: DP struct @@ -7652,31 +7289,40 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp, { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - if (!crtc_state->has_drrs) { - drm_dbg_kms(&dev_priv->drm, "Panel doesn't support DRRS\n"); + if (!crtc_state->has_drrs) return; - } - if (dev_priv->psr.enabled) { - drm_dbg_kms(&dev_priv->drm, - "PSR enabled. Not enabling DRRS.\n"); - return; - } + drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n"); mutex_lock(&dev_priv->drrs.mutex); + if (dev_priv->drrs.dp) { - drm_dbg_kms(&dev_priv->drm, "DRRS already enabled\n"); + drm_warn(&dev_priv->drm, "DRRS already enabled\n"); goto unlock; } - dev_priv->drrs.busy_frontbuffer_bits = 0; - - dev_priv->drrs.dp = intel_dp; + intel_edp_drrs_enable_locked(intel_dp); unlock: mutex_unlock(&dev_priv->drrs.mutex); } +static void +intel_edp_drrs_disable_locked(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) { + int refresh; + + refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode); + intel_dp_set_drrs_state(dev_priv, crtc_state, refresh); + } + + dev_priv->drrs.dp = NULL; +} + /** * intel_edp_drrs_disable - Disable DRRS * @intel_dp: DP struct @@ -7697,16 +7343,45 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp, return; } - if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) - intel_dp_set_drrs_state(dev_priv, old_crtc_state, - drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); - - dev_priv->drrs.dp = NULL; + intel_edp_drrs_disable_locked(intel_dp, old_crtc_state); mutex_unlock(&dev_priv->drrs.mutex); cancel_delayed_work_sync(&dev_priv->drrs.work); } +/** + * intel_edp_drrs_update - Update DRRS state + * @intel_dp: Intel DP + * @crtc_state: new CRTC state + * + * This function will update DRRS states, disabling or enabling DRRS when + * executing fastsets. For full modeset, intel_edp_drrs_disable() and + * intel_edp_drrs_enable() should be called instead. + */ +void +intel_edp_drrs_update(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) + return; + + mutex_lock(&dev_priv->drrs.mutex); + + /* New state matches current one? */ + if (crtc_state->has_drrs == !!dev_priv->drrs.dp) + goto unlock; + + if (crtc_state->has_drrs) + intel_edp_drrs_enable_locked(intel_dp); + else + intel_edp_drrs_disable_locked(intel_dp, crtc_state); + +unlock: + mutex_unlock(&dev_priv->drrs.mutex); +} + static void intel_edp_drrs_downclock_work(struct work_struct *work) { struct drm_i915_private *dev_priv = @@ -8138,10 +7813,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, connector->interlace_allowed = true; connector->doublescan_allowed = 0; - if (INTEL_GEN(dev_priv) >= 11) - connector->ycbcr_420_allowed = true; - - intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); intel_connector->polled = DRM_CONNECTOR_POLL_HPD; intel_dp_aux_init(intel_dp); @@ -8166,7 +7837,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, intel_dp_add_properties(intel_dp, connector); if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { - int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim); + int ret = intel_dp_init_hdcp(dig_port, intel_connector); if (ret) drm_dbg_kms(&dev_priv->drm, "HDCP init failed, skipping.\n"); @@ -8210,6 +7881,8 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, intel_encoder = &dig_port->base; encoder = &intel_encoder->base; + mutex_init(&dig_port->hdcp_mutex); + if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port))) @@ -8284,6 +7957,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, } intel_encoder->cloneable = 0; intel_encoder->port = port; + intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); dig_port->hpd_pulse = intel_dp_hpd_pulse; diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 0a3af3410d52..08a1c0aa8b94 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -17,6 +17,7 @@ struct drm_encoder; struct drm_i915_private; struct drm_modeset_acquire_ctx; struct drm_dp_vsc_sdp; +struct intel_atomic_state; struct intel_connector; struct intel_crtc_state; struct intel_digital_port; @@ -50,6 +51,7 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, int intel_dp_retrain_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx); void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); +void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp); void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, bool enable); @@ -81,6 +83,8 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); void intel_edp_drrs_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); +void intel_edp_drrs_update(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits); void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, @@ -127,4 +131,12 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count) u32 intel_dp_mode_to_fec_clock(u32 mode_clock); +void intel_ddi_update_pipe(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); + +int intel_dp_init_hdcp(struct intel_digital_port *dig_port, + struct intel_connector *intel_connector); + #endif /* __INTEL_DP_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c new file mode 100644 index 000000000000..03424d20e9f7 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c @@ -0,0 +1,703 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2020 Google, Inc. + * + * Authors: + * Sean Paul <seanpaul@chromium.org> + */ + +#include <drm/drm_dp_helper.h> +#include <drm/drm_dp_mst_helper.h> +#include <drm/drm_hdcp.h> +#include <drm/drm_print.h> + +#include "intel_display_types.h" +#include "intel_ddi.h" +#include "intel_dp.h" +#include "intel_hdcp.h" + +static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout) +{ + long ret; + +#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count)) + ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C, + msecs_to_jiffies(timeout)); + + if (!ret) + DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n"); +} + +static +int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port, + u8 *an) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + u8 aksv[DRM_HDCP_KSV_LEN] = {}; + ssize_t dpcd_ret; + + /* Output An first, that's easy */ + dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN, + an, DRM_HDCP_AN_LEN); + if (dpcd_ret != DRM_HDCP_AN_LEN) { + drm_dbg_kms(&i915->drm, + "Failed to write An over DP/AUX (%zd)\n", + dpcd_ret); + return dpcd_ret >= 0 ? -EIO : dpcd_ret; + } + + /* + * Since Aksv is Oh-So-Secret, we can't access it in software. So we + * send an empty buffer of the correct length through the DP helpers. On + * the other side, in the transfer hook, we'll generate a flag based on + * the destination address which will tickle the hardware to output the + * Aksv on our behalf after the header is sent. + */ + dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AKSV, + aksv, DRM_HDCP_KSV_LEN); + if (dpcd_ret != DRM_HDCP_KSV_LEN) { + drm_dbg_kms(&i915->drm, + "Failed to write Aksv over DP/AUX (%zd)\n", + dpcd_ret); + return dpcd_ret >= 0 ? -EIO : dpcd_ret; + } + return 0; +} + +static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port, + u8 *bksv) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + ssize_t ret; + + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, + DRM_HDCP_KSV_LEN); + if (ret != DRM_HDCP_KSV_LEN) { + drm_dbg_kms(&i915->drm, + "Read Bksv from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port, + u8 *bstatus) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + ssize_t ret; + + /* + * For some reason the HDMI and DP HDCP specs call this register + * definition by different names. In the HDMI spec, it's called BSTATUS, + * but in DP it's called BINFO. + */ + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO, + bstatus, DRM_HDCP_BSTATUS_LEN); + if (ret != DRM_HDCP_BSTATUS_LEN) { + drm_dbg_kms(&i915->drm, + "Read bstatus from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static +int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port, + u8 *bcaps) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + ssize_t ret; + + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS, + bcaps, 1); + if (ret != 1) { + drm_dbg_kms(&i915->drm, + "Read bcaps from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + + return 0; +} + +static +int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port, + bool *repeater_present) +{ + ssize_t ret; + u8 bcaps; + + ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps); + if (ret) + return ret; + + *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT; + return 0; +} + +static +int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port, + u8 *ri_prime) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + ssize_t ret; + + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, + ri_prime, DRM_HDCP_RI_LEN); + if (ret != DRM_HDCP_RI_LEN) { + drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n", + ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static +int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port, + bool *ksv_ready) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + ssize_t ret; + u8 bstatus; + + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, + &bstatus, 1); + if (ret != 1) { + drm_dbg_kms(&i915->drm, + "Read bstatus from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + *ksv_ready = bstatus & DP_BSTATUS_READY; + return 0; +} + +static +int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port, + int num_downstream, u8 *ksv_fifo) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + ssize_t ret; + int i; + + /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */ + for (i = 0; i < num_downstream; i += 3) { + size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN; + ret = drm_dp_dpcd_read(&dig_port->dp.aux, + DP_AUX_HDCP_KSV_FIFO, + ksv_fifo + i * DRM_HDCP_KSV_LEN, + len); + if (ret != len) { + drm_dbg_kms(&i915->drm, + "Read ksv[%d] from DP/AUX failed (%zd)\n", + i, ret); + return ret >= 0 ? -EIO : ret; + } + } + return 0; +} + +static +int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port, + int i, u32 *part) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + ssize_t ret; + + if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) + return -EINVAL; + + ret = drm_dp_dpcd_read(&dig_port->dp.aux, + DP_AUX_HDCP_V_PRIME(i), part, + DRM_HDCP_V_PRIME_PART_LEN); + if (ret != DRM_HDCP_V_PRIME_PART_LEN) { + drm_dbg_kms(&i915->drm, + "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static +int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *dig_port, + enum transcoder cpu_transcoder, + bool enable) +{ + /* Not used for single stream DisplayPort setups */ + return 0; +} + +static +bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port, + struct intel_connector *connector) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + ssize_t ret; + u8 bstatus; + + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, + &bstatus, 1); + if (ret != 1) { + drm_dbg_kms(&i915->drm, + "Read bstatus from DP/AUX failed (%zd)\n", ret); + return false; + } + + return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ)); +} + +static +int intel_dp_hdcp_capable(struct intel_digital_port *dig_port, + bool *hdcp_capable) +{ + ssize_t ret; + u8 bcaps; + + ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps); + if (ret) + return ret; + + *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE; + return 0; +} + +struct hdcp2_dp_errata_stream_type { + u8 msg_id; + u8 stream_type; +} __packed; + +struct hdcp2_dp_msg_data { + u8 msg_id; + u32 offset; + bool msg_detectable; + u32 timeout; + u32 timeout2; /* Added for non_paired situation */ +}; + +static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = { + { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 }, + { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET, + false, HDCP_2_2_CERT_TIMEOUT_MS, 0 }, + { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET, + false, 0, 0 }, + { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET, + false, 0, 0 }, + { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET, + true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS, + HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS }, + { HDCP_2_2_AKE_SEND_PAIRING_INFO, + DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true, + HDCP_2_2_PAIRING_TIMEOUT_MS, 0 }, + { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 }, + { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET, + false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 }, + { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false, + 0, 0 }, + { HDCP_2_2_REP_SEND_RECVID_LIST, + DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true, + HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 }, + { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false, + 0, 0 }, + { HDCP_2_2_REP_STREAM_MANAGE, + DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false, + 0, 0 }, + { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET, + false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 }, +/* local define to shovel this through the write_2_2 interface */ +#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50 + { HDCP_2_2_ERRATA_DP_STREAM_TYPE, + DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false, + 0, 0 }, +}; + +static int +intel_dp_hdcp2_read_rx_status(struct intel_digital_port *dig_port, + u8 *rx_status) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + ssize_t ret; + + ret = drm_dp_dpcd_read(&dig_port->dp.aux, + DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, + HDCP_2_2_DP_RXSTATUS_LEN); + if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { + drm_dbg_kms(&i915->drm, + "Read bstatus from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + + return 0; +} + +static +int hdcp2_detect_msg_availability(struct intel_digital_port *dig_port, + u8 msg_id, bool *msg_ready) +{ + u8 rx_status; + int ret; + + *msg_ready = false; + ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status); + if (ret < 0) + return ret; + + switch (msg_id) { + case HDCP_2_2_AKE_SEND_HPRIME: + if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status)) + *msg_ready = true; + break; + case HDCP_2_2_AKE_SEND_PAIRING_INFO: + if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status)) + *msg_ready = true; + break; + case HDCP_2_2_REP_SEND_RECVID_LIST: + if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) + *msg_ready = true; + break; + default: + DRM_ERROR("Unidentified msg_id: %d\n", msg_id); + return -EINVAL; + } + + return 0; +} + +static ssize_t +intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *dig_port, + const struct hdcp2_dp_msg_data *hdcp2_msg_data) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_dp *dp = &dig_port->dp; + struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; + u8 msg_id = hdcp2_msg_data->msg_id; + int ret, timeout; + bool msg_ready = false; + + if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired) + timeout = hdcp2_msg_data->timeout2; + else + timeout = hdcp2_msg_data->timeout; + + /* + * There is no way to detect the CERT, LPRIME and STREAM_READY + * availability. So Wait for timeout and read the msg. + */ + if (!hdcp2_msg_data->msg_detectable) { + mdelay(timeout); + ret = 0; + } else { + /* + * As we want to check the msg availability at timeout, Ignoring + * the timeout at wait for CP_IRQ. + */ + intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout); + ret = hdcp2_detect_msg_availability(dig_port, + msg_id, &msg_ready); + if (!msg_ready) + ret = -ETIMEDOUT; + } + + if (ret) + drm_dbg_kms(&i915->drm, + "msg_id %d, ret %d, timeout(mSec): %d\n", + hdcp2_msg_data->msg_id, ret, timeout); + + return ret; +} + +static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++) + if (hdcp2_dp_msg_data[i].msg_id == msg_id) + return &hdcp2_dp_msg_data[i]; + + return NULL; +} + +static +int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port, + void *buf, size_t size) +{ + struct intel_dp *dp = &dig_port->dp; + struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; + unsigned int offset; + u8 *byte = buf; + ssize_t ret, bytes_to_write, len; + const struct hdcp2_dp_msg_data *hdcp2_msg_data; + + hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte); + if (!hdcp2_msg_data) + return -EINVAL; + + offset = hdcp2_msg_data->offset; + + /* No msg_id in DP HDCP2.2 msgs */ + bytes_to_write = size - 1; + byte++; + + hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count); + + while (bytes_to_write) { + len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ? + DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write; + + ret = drm_dp_dpcd_write(&dig_port->dp.aux, + offset, (void *)byte, len); + if (ret < 0) + return ret; + + bytes_to_write -= ret; + byte += ret; + offset += ret; + } + + return size; +} + +static +ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port) +{ + u8 rx_info[HDCP_2_2_RXINFO_LEN]; + u32 dev_cnt; + ssize_t ret; + + ret = drm_dp_dpcd_read(&dig_port->dp.aux, + DP_HDCP_2_2_REG_RXINFO_OFFSET, + (void *)rx_info, HDCP_2_2_RXINFO_LEN); + if (ret != HDCP_2_2_RXINFO_LEN) + return ret >= 0 ? -EIO : ret; + + dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | + HDCP_2_2_DEV_COUNT_LO(rx_info[1])); + + if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT) + dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT; + + ret = sizeof(struct hdcp2_rep_send_receiverid_list) - + HDCP_2_2_RECEIVER_IDS_MAX_LEN + + (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN); + + return ret; +} + +static +int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port, + u8 msg_id, void *buf, size_t size) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + unsigned int offset; + u8 *byte = buf; + ssize_t ret, bytes_to_recv, len; + const struct hdcp2_dp_msg_data *hdcp2_msg_data; + + hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id); + if (!hdcp2_msg_data) + return -EINVAL; + offset = hdcp2_msg_data->offset; + + ret = intel_dp_hdcp2_wait_for_msg(dig_port, hdcp2_msg_data); + if (ret < 0) + return ret; + + if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) { + ret = get_receiver_id_list_size(dig_port); + if (ret < 0) + return ret; + + size = ret; + } + bytes_to_recv = size - 1; + + /* DP adaptation msgs has no msg_id */ + byte++; + + while (bytes_to_recv) { + len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ? + DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv; + + ret = drm_dp_dpcd_read(&dig_port->dp.aux, offset, + (void *)byte, len); + if (ret < 0) { + drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n", + msg_id, ret); + return ret; + } + + bytes_to_recv -= ret; + byte += ret; + offset += ret; + } + byte = buf; + *byte = msg_id; + + return size; +} + +static +int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port, + bool is_repeater, u8 content_type) +{ + int ret; + struct hdcp2_dp_errata_stream_type stream_type_msg; + + if (is_repeater) + return 0; + + /* + * Errata for DP: As Stream type is used for encryption, Receiver + * should be communicated with stream type for the decryption of the + * content. + * Repeater will be communicated with stream type as a part of it's + * auth later in time. + */ + stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE; + stream_type_msg.stream_type = content_type; + + ret = intel_dp_hdcp2_write_msg(dig_port, &stream_type_msg, + sizeof(stream_type_msg)); + + return ret < 0 ? ret : 0; + +} + +static +int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port) +{ + u8 rx_status; + int ret; + + ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status); + if (ret) + return ret; + + if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status)) + ret = HDCP_REAUTH_REQUEST; + else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status)) + ret = HDCP_LINK_INTEGRITY_FAILURE; + else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) + ret = HDCP_TOPOLOGY_CHANGE; + + return ret; +} + +static +int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port, + bool *capable) +{ + u8 rx_caps[3]; + int ret; + + *capable = false; + ret = drm_dp_dpcd_read(&dig_port->dp.aux, + DP_HDCP_2_2_REG_RX_CAPS_OFFSET, + rx_caps, HDCP_2_2_RXCAPS_LEN); + if (ret != HDCP_2_2_RXCAPS_LEN) + return ret >= 0 ? -EIO : ret; + + if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL && + HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) + *capable = true; + + return 0; +} + +static const struct intel_hdcp_shim intel_dp_hdcp_shim = { + .write_an_aksv = intel_dp_hdcp_write_an_aksv, + .read_bksv = intel_dp_hdcp_read_bksv, + .read_bstatus = intel_dp_hdcp_read_bstatus, + .repeater_present = intel_dp_hdcp_repeater_present, + .read_ri_prime = intel_dp_hdcp_read_ri_prime, + .read_ksv_ready = intel_dp_hdcp_read_ksv_ready, + .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo, + .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, + .toggle_signalling = intel_dp_hdcp_toggle_signalling, + .check_link = intel_dp_hdcp_check_link, + .hdcp_capable = intel_dp_hdcp_capable, + .write_2_2_msg = intel_dp_hdcp2_write_msg, + .read_2_2_msg = intel_dp_hdcp2_read_msg, + .config_stream_type = intel_dp_hdcp2_config_stream_type, + .check_2_2_link = intel_dp_hdcp2_check_link, + .hdcp_2_2_capable = intel_dp_hdcp2_capable, + .protocol = HDCP_PROTOCOL_DP, +}; + +static int +intel_dp_mst_hdcp_toggle_signalling(struct intel_digital_port *dig_port, + enum transcoder cpu_transcoder, + bool enable) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + int ret; + + if (!enable) + usleep_range(6, 60); /* Bspec says >= 6us */ + + ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, + cpu_transcoder, enable); + if (ret) + drm_dbg_kms(&i915->drm, "%s HDCP signalling failed (%d)\n", + enable ? "Enable" : "Disable", ret); + return ret; +} + +static +bool intel_dp_mst_hdcp_check_link(struct intel_digital_port *dig_port, + struct intel_connector *connector) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_dp *intel_dp = &dig_port->dp; + struct drm_dp_query_stream_enc_status_ack_reply reply; + int ret; + + if (!intel_dp_hdcp_check_link(dig_port, connector)) + return false; + + ret = drm_dp_send_query_stream_enc_status(&intel_dp->mst_mgr, + connector->port, &reply); + if (ret) { + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] failed QSES ret=%d\n", + connector->base.base.id, connector->base.name, ret); + return false; + } + + return reply.auth_completed && reply.encryption_enabled; +} + +static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = { + .write_an_aksv = intel_dp_hdcp_write_an_aksv, + .read_bksv = intel_dp_hdcp_read_bksv, + .read_bstatus = intel_dp_hdcp_read_bstatus, + .repeater_present = intel_dp_hdcp_repeater_present, + .read_ri_prime = intel_dp_hdcp_read_ri_prime, + .read_ksv_ready = intel_dp_hdcp_read_ksv_ready, + .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo, + .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, + .toggle_signalling = intel_dp_mst_hdcp_toggle_signalling, + .check_link = intel_dp_mst_hdcp_check_link, + .hdcp_capable = intel_dp_hdcp_capable, + + .protocol = HDCP_PROTOCOL_DP, +}; + +int intel_dp_init_hdcp(struct intel_digital_port *dig_port, + struct intel_connector *intel_connector) +{ + struct drm_device *dev = intel_connector->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_encoder *intel_encoder = &dig_port->base; + enum port port = intel_encoder->port; + struct intel_dp *intel_dp = &dig_port->dp; + + if (!is_hdcp_supported(dev_priv, port)) + return 0; + + if (intel_connector->mst_port) + return intel_hdcp_init(intel_connector, port, + &intel_dp_mst_hdcp_shim); + else if (!intel_dp_is_edp(intel_dp)) + return intel_hdcp_init(intel_connector, port, + &intel_dp_hdcp_shim); + + return 0; +} diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index a2d91a499700..64d885539e94 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -37,6 +37,7 @@ #include "intel_dp.h" #include "intel_dp_mst.h" #include "intel_dpio_phy.h" +#include "intel_hdcp.h" static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, @@ -352,6 +353,8 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state, drm_dbg_kms(&i915->drm, "active links %d\n", intel_dp->active_mst_links); + intel_hdcp_disable(intel_mst->connector); + drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port); ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); @@ -556,6 +559,13 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, if (pipe_config->has_audio) intel_audio_codec_enable(encoder, pipe_config, conn_state); + + /* Enable hdcp if it's desired */ + if (conn_state->content_protection == + DRM_MODE_CONTENT_PROTECTION_DESIRED) + intel_hdcp_enable(to_intel_connector(conn_state->connector), + pipe_config->cpu_transcoder, + (u8)conn_state->hdcp_content_type); } static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, @@ -709,9 +719,13 @@ static int intel_dp_mst_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force) { + struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; + if (!INTEL_DISPLAY_ENABLED(i915)) + return connector_status_disconnected; + if (drm_connector_is_unregistered(connector)) return connector_status_disconnected; @@ -799,6 +813,14 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); + + /* TODO: Figure out how to make HDCP work on GEN12+ */ + if (INTEL_GEN(dev_priv) < 12) { + ret = intel_dp_init_hdcp(dig_port, intel_connector); + if (ret) + DRM_DEBUG_KMS("HDCP init failed, skipping.\n"); + } + /* * Reuse the prop from the SST connector because we're * not allowed to create new props after device registration. @@ -865,6 +887,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe intel_encoder->compute_config_late = intel_dp_mst_compute_config_late; intel_encoder->disable = intel_mst_disable_dp; intel_encoder->post_disable = intel_mst_post_disable_dp; + intel_encoder->update_pipe = intel_ddi_update_pipe; intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp; intel_encoder->pre_enable = intel_mst_pre_enable_dp; intel_encoder->enable = intel_mst_enable_dp; diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index c9013f8f766f..e08684e34078 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -147,6 +147,18 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, pll->info->name, onoff(state), onoff(cur_state)); } +static i915_reg_t +intel_combo_pll_enable_reg(struct drm_i915_private *i915, + struct intel_shared_dpll *pll) +{ + + if (IS_ELKHARTLAKE(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4)) + return MG_PLL_ENABLE(0); + + return CNL_DPLL_ENABLE(pll->info->id); + + +} /** * intel_prepare_shared_dpll - call a dpll's prepare hook * @crtc_state: CRTC, and its state, which has a shared dpll @@ -3842,12 +3854,7 @@ static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { - i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id); - - if (IS_ELKHARTLAKE(dev_priv) && - pll->info->id == DPLL_ID_EHL_DPLL4) { - enable_reg = MG_PLL_ENABLE(0); - } + i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll); return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg); } @@ -4045,11 +4052,10 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv, static void combo_pll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id); + i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll); if (IS_ELKHARTLAKE(dev_priv) && pll->info->id == DPLL_ID_EHL_DPLL4) { - enable_reg = MG_PLL_ENABLE(0); /* * We need to disable DC states when this DPLL is enabled. @@ -4157,19 +4163,14 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv, static void combo_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id); + i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll); - if (IS_ELKHARTLAKE(dev_priv) && - pll->info->id == DPLL_ID_EHL_DPLL4) { - enable_reg = MG_PLL_ENABLE(0); - icl_pll_disable(dev_priv, pll, enable_reg); + icl_pll_disable(dev_priv, pll, enable_reg); + if (IS_ELKHARTLAKE(dev_priv) && + pll->info->id == DPLL_ID_EHL_DPLL4) intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF, pll->wakeref); - return; - } - - icl_pll_disable(dev_priv, pll, enable_reg); } static void tbt_pll_disable(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index 307ed8ae9a19..237dbb1ba0ee 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -313,9 +313,15 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state, static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector, bool force) { + struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector)); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + + if (!INTEL_DISPLAY_ENABLED(i915)) + return connector_status_disconnected; + return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); } diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index bd39eb6a21b8..842c04e63214 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -451,8 +451,7 @@ int intel_fbdev_init(struct drm_device *dev) struct intel_fbdev *ifbdev; int ret; - if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv) || - !INTEL_DISPLAY_ENABLED(dev_priv))) + if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv))) return -ENODEV; ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index a8d119b6b45c..e6b8d6dfb598 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -834,7 +834,7 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv) unsigned int pin; int ret; - if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) + if (!HAS_DISPLAY(dev_priv)) return 0; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 1a0d49af2a08..5492076d1ae0 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -148,9 +148,8 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port, static bool hdcp_key_loadable(struct drm_i915_private *dev_priv) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; - struct i915_power_well *power_well; enum i915_power_well_id id; + intel_wakeref_t wakeref; bool enabled = false; /* @@ -162,17 +161,9 @@ static bool hdcp_key_loadable(struct drm_i915_private *dev_priv) else id = SKL_DISP_PW_1; - mutex_lock(&power_domains->lock); - /* PG1 (power well #1) needs to be enabled */ - for_each_power_well(dev_priv, power_well) { - if (power_well->desc->id == id) { - enabled = power_well->desc->ops->is_enabled(dev_priv, - power_well); - break; - } - } - mutex_unlock(&power_domains->lock); + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) + enabled = intel_display_power_well_is_enabled(dev_priv, id); /* * Another req for hdcp key loadability is enabled state of pll for @@ -713,7 +704,7 @@ static int intel_hdcp_auth(struct intel_connector *connector) intel_de_write(dev_priv, HDCP_REP_CTL, intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port)); - ret = shim->toggle_signalling(dig_port, true); + ret = shim->toggle_signalling(dig_port, cpu_transcoder, true); if (ret) return ret; @@ -801,6 +792,19 @@ static int _intel_hdcp_disable(struct intel_connector *connector) drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n", connector->base.name, connector->base.base.id); + /* + * If there are other connectors on this port using HDCP, don't disable + * it. Instead, toggle the HDCP signalling off on that particular + * connector/pipe and exit. + */ + if (dig_port->num_hdcp_streams > 0) { + ret = hdcp->shim->toggle_signalling(dig_port, + cpu_transcoder, false); + if (ret) + DRM_ERROR("Failed to disable HDCP signalling\n"); + return ret; + } + hdcp->hdcp_encrypted = false; intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0); if (intel_de_wait_for_clear(dev_priv, @@ -816,7 +820,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector) intel_de_write(dev_priv, HDCP_REP_CTL, intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl); - ret = hdcp->shim->toggle_signalling(dig_port, false); + ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false); if (ret) { drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n"); return ret; @@ -876,6 +880,34 @@ static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) return container_of(hdcp, struct intel_connector, hdcp); } +static void intel_hdcp_update_value(struct intel_connector *connector, + u64 value, bool update_property) +{ + struct drm_device *dev = connector->base.dev; + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); + struct intel_hdcp *hdcp = &connector->hdcp; + + drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex)); + + if (hdcp->value == value) + return; + + drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex)); + + if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { + if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0)) + dig_port->num_hdcp_streams--; + } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { + dig_port->num_hdcp_streams++; + } + + hdcp->value = value; + if (update_property) { + drm_connector_get(&connector->base); + schedule_work(&hdcp->prop_work); + } +} + /* Implements Part 3 of the HDCP authorization procedure */ static int intel_hdcp_check_link(struct intel_connector *connector) { @@ -887,6 +919,8 @@ static int intel_hdcp_check_link(struct intel_connector *connector) int ret = 0; mutex_lock(&hdcp->mutex); + mutex_lock(&dig_port->hdcp_mutex); + cpu_transcoder = hdcp->cpu_transcoder; /* Check_link valid only when HDCP1.4 is enabled */ @@ -903,15 +937,16 @@ static int intel_hdcp_check_link(struct intel_connector *connector) connector->base.name, connector->base.base.id, intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port))); ret = -ENXIO; - hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED, + true); goto out; } - if (hdcp->shim->check_link(dig_port)) { + if (hdcp->shim->check_link(dig_port, connector)) { if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { - hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED, true); } goto out; } @@ -923,20 +958,23 @@ static int intel_hdcp_check_link(struct intel_connector *connector) ret = _intel_hdcp_disable(connector); if (ret) { drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret); - hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED, + true); goto out; } ret = _intel_hdcp_enable(connector); if (ret) { drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret); - hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED, + true); goto out; } out: + mutex_unlock(&dig_port->hdcp_mutex); mutex_unlock(&hdcp->mutex); return ret; } @@ -962,6 +1000,8 @@ static void intel_hdcp_prop_work(struct work_struct *work) mutex_unlock(&hdcp->mutex); drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex); + + drm_connector_put(&connector->base); } bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port) @@ -1600,7 +1640,8 @@ static int hdcp2_enable_encryption(struct intel_connector *connector) intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS); if (hdcp->shim->toggle_signalling) { - ret = hdcp->shim->toggle_signalling(dig_port, true); + ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, + true); if (ret) { drm_err(&dev_priv->drm, "Failed to enable HDCP signalling. %d\n", @@ -1650,7 +1691,8 @@ static int hdcp2_disable_encryption(struct intel_connector *connector) drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout"); if (hdcp->shim->toggle_signalling) { - ret = hdcp->shim->toggle_signalling(dig_port, false); + ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, + false); if (ret) { drm_err(&dev_priv->drm, "Failed to disable HDCP signalling. %d\n", @@ -1766,16 +1808,18 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) "HDCP2.2 link stopped the encryption, %x\n", intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port))); ret = -ENXIO; - hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED, + true); goto out; } ret = hdcp->shim->check_2_2_link(dig_port); if (ret == HDCP_LINK_PROTECTED) { if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { - hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED, + true); } goto out; } @@ -1788,8 +1832,9 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) "HDCP2.2 Downstream topology change\n"); ret = hdcp2_authenticate_repeater_topology(connector); if (!ret) { - hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED, + true); goto out; } drm_dbg_kms(&dev_priv->drm, @@ -1807,8 +1852,8 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) drm_err(&dev_priv->drm, "[%s:%d] Failed to disable hdcp2.2 (%d)\n", connector->base.name, connector->base.base.id, ret); - hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED, true); goto out; } @@ -1818,8 +1863,9 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) "[%s:%d] Failed to enable hdcp2.2 (%d)\n", connector->base.name, connector->base.base.id, ret); - hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED, + true); goto out; } @@ -1835,6 +1881,9 @@ static void intel_hdcp_check_work(struct work_struct *work) check_work); struct intel_connector *connector = intel_hdcp_to_connector(hdcp); + if (drm_connector_is_unregistered(&connector->base)) + return; + if (!intel_hdcp2_check_link(connector)) schedule_delayed_work(&hdcp->check_work, DRM_HDCP2_CHECK_PERIOD_MS); @@ -1896,6 +1945,7 @@ static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder) } static int initialize_hdcp_port_data(struct intel_connector *connector, + enum port port, const struct intel_hdcp_shim *shim) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); @@ -1903,8 +1953,7 @@ static int initialize_hdcp_port_data(struct intel_connector *connector, struct hdcp_port_data *data = &hdcp->port_data; if (INTEL_GEN(dev_priv) < 12) - data->fw_ddi = - intel_get_mei_fw_ddi_index(intel_attached_encoder(connector)->port); + data->fw_ddi = intel_get_mei_fw_ddi_index(port); else /* * As per ME FW API expectation, for GEN 12+, fw_ddi is filled @@ -1974,14 +2023,14 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv) } } -static void intel_hdcp2_init(struct intel_connector *connector, +static void intel_hdcp2_init(struct intel_connector *connector, enum port port, const struct intel_hdcp_shim *shim) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; int ret; - ret = initialize_hdcp_port_data(connector, shim); + ret = initialize_hdcp_port_data(connector, port, shim); if (ret) { drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n"); return; @@ -1991,6 +2040,7 @@ static void intel_hdcp2_init(struct intel_connector *connector, } int intel_hdcp_init(struct intel_connector *connector, + enum port port, const struct intel_hdcp_shim *shim) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); @@ -2000,8 +2050,8 @@ int intel_hdcp_init(struct intel_connector *connector, if (!shim) return -EINVAL; - if (is_hdcp2_supported(dev_priv)) - intel_hdcp2_init(connector, shim); + if (is_hdcp2_supported(dev_priv) && !connector->mst_port) + intel_hdcp2_init(connector, port, shim); ret = drm_connector_attach_content_protection_property(&connector->base, @@ -2025,6 +2075,7 @@ int intel_hdcp_enable(struct intel_connector *connector, enum transcoder cpu_transcoder, u8 content_type) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS; int ret = -EINVAL; @@ -2033,14 +2084,14 @@ int intel_hdcp_enable(struct intel_connector *connector, return -ENOENT; mutex_lock(&hdcp->mutex); + mutex_lock(&dig_port->hdcp_mutex); drm_WARN_ON(&dev_priv->drm, hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); hdcp->content_type = content_type; + hdcp->cpu_transcoder = cpu_transcoder; - if (INTEL_GEN(dev_priv) >= 12) { - hdcp->cpu_transcoder = cpu_transcoder; + if (INTEL_GEN(dev_priv) >= 12) hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder); - } /* * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup @@ -2063,16 +2114,19 @@ int intel_hdcp_enable(struct intel_connector *connector, if (!ret) { schedule_delayed_work(&hdcp->check_work, check_link_interval); - hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED, + true); } + mutex_unlock(&dig_port->hdcp_mutex); mutex_unlock(&hdcp->mutex); return ret; } int intel_hdcp_disable(struct intel_connector *connector) { + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; int ret = 0; @@ -2080,15 +2134,20 @@ int intel_hdcp_disable(struct intel_connector *connector) return -ENOENT; mutex_lock(&hdcp->mutex); + mutex_lock(&dig_port->hdcp_mutex); - if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { - hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED; - if (hdcp->hdcp2_encrypted) - ret = _intel_hdcp2_disable(connector); - else if (hdcp->hdcp_encrypted) - ret = _intel_hdcp_disable(connector); - } + if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + goto out; + + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false); + if (hdcp->hdcp2_encrypted) + ret = _intel_hdcp2_disable(connector); + else if (hdcp->hdcp_encrypted) + ret = _intel_hdcp_disable(connector); +out: + mutex_unlock(&dig_port->hdcp_mutex); mutex_unlock(&hdcp->mutex); cancel_delayed_work_sync(&hdcp->check_work); return ret; @@ -2102,11 +2161,15 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_hdcp *hdcp = &connector->hdcp; - bool content_protection_type_changed = + bool content_protection_type_changed, desired_and_not_enabled = false; + + if (!connector->hdcp.shim) + return; + + content_protection_type_changed = (conn_state->hdcp_content_type != hdcp->content_type && conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED); - bool desired_and_not_enabled = false; /* * During the HDCP encryption session if Type change is requested, @@ -2159,12 +2222,39 @@ void intel_hdcp_component_fini(struct drm_i915_private *dev_priv) void intel_hdcp_cleanup(struct intel_connector *connector) { - if (!connector->hdcp.shim) + struct intel_hdcp *hdcp = &connector->hdcp; + + if (!hdcp->shim) return; - mutex_lock(&connector->hdcp.mutex); - kfree(connector->hdcp.port_data.streams); - mutex_unlock(&connector->hdcp.mutex); + /* + * If the connector is registered, it's possible userspace could kick + * off another HDCP enable, which would re-spawn the workers. + */ + drm_WARN_ON(connector->base.dev, + connector->base.registration_state == DRM_CONNECTOR_REGISTERED); + + /* + * Now that the connector is not registered, check_work won't be run, + * but cancel any outstanding instances of it + */ + cancel_delayed_work_sync(&hdcp->check_work); + + /* + * We don't cancel prop_work in the same way as check_work since it + * requires connection_mutex which could be held while calling this + * function. Instead, we rely on the connector references grabbed before + * scheduling prop_work to ensure the connector is alive when prop_work + * is run. So if we're in the destroy path (which is where this + * function should be called), we're "guaranteed" that prop_work is not + * active (tl;dr This Should Never Happen). + */ + drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work)); + + mutex_lock(&hdcp->mutex); + kfree(hdcp->port_data.streams); + hdcp->shim = NULL; + mutex_unlock(&hdcp->mutex); } void intel_hdcp_atomic_check(struct drm_connector *connector, diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h index 86bbaec120cc..1bbf5b67ed0a 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp.h @@ -22,7 +22,7 @@ enum transcoder; void intel_hdcp_atomic_check(struct drm_connector *connector, struct drm_connector_state *old_state, struct drm_connector_state *new_state); -int intel_hdcp_init(struct intel_connector *connector, +int intel_hdcp_init(struct intel_connector *connector, enum port port, const struct intel_hdcp_shim *hdcp_shim); int intel_hdcp_enable(struct intel_connector *connector, enum transcoder cpu_transcoder, u8 content_type); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index de2ce5632b94..3f2008d845c2 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -1477,7 +1477,8 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port, return ret; } -static int kbl_repositioning_enc_en_signal(struct intel_connector *connector) +static int kbl_repositioning_enc_en_signal(struct intel_connector *connector, + enum transcoder cpu_transcoder) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); @@ -1494,13 +1495,15 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector) usleep_range(25, 50); } - ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, false); + ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder, + false); if (ret) { drm_err(&dev_priv->drm, "Disable HDCP signalling failed (%d)\n", ret); return ret; } - ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, true); + ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder, + true); if (ret) { drm_err(&dev_priv->drm, "Enable HDCP signalling failed (%d)\n", ret); @@ -1512,6 +1515,7 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector) static int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port, + enum transcoder cpu_transcoder, bool enable) { struct intel_hdmi *hdmi = &dig_port->hdmi; @@ -1522,7 +1526,8 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port, if (!enable) usleep_range(6, 60); /* Bspec says >= 6us */ - ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, enable); + ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder, + enable); if (ret) { drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n", enable ? "Enable" : "Disable", ret); @@ -1534,17 +1539,17 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port, * opportunity and enc_en signalling in KABYLAKE. */ if (IS_KABYLAKE(dev_priv) && enable) - return kbl_repositioning_enc_en_signal(connector); + return kbl_repositioning_enc_en_signal(connector, + cpu_transcoder); return 0; } static -bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port) +bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port, + struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct intel_connector *connector = - dig_port->hdmi.attached_connector; enum port port = dig_port->base.port; enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; int ret; @@ -1572,13 +1577,14 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port) } static -bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port) +bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port, + struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int retry; for (retry = 0; retry < 3; retry++) - if (intel_hdmi_hdcp_check_link_once(dig_port)) + if (intel_hdmi_hdcp_check_link_once(dig_port, connector)) return true; drm_err(&i915->drm, "Link check failed\n"); @@ -2271,35 +2277,18 @@ intel_hdmi_mode_valid(struct drm_connector *connector, return intel_mode_valid_max_plane_size(dev_priv, mode); } -static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, - int bpc) +bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, + int bpc, bool has_hdmi_sink, bool ycbcr420_output) { - struct drm_i915_private *dev_priv = - to_i915(crtc_state->uapi.crtc->dev); struct drm_atomic_state *state = crtc_state->uapi.state; struct drm_connector_state *connector_state; struct drm_connector *connector; - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; int i; - if (HAS_GMCH(dev_priv)) - return false; - - if (bpc == 10 && INTEL_GEN(dev_priv) < 11) - return false; - if (crtc_state->pipe_bpp < bpc * 3) return false; - if (!crtc_state->has_hdmi_sink) - return false; - - /* - * HDMI deep color affects the clocks, so it's only possible - * when not cloning with other encoder types. - */ - if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI) + if (!has_hdmi_sink) return false; for_each_new_connector_in_state(state, connector, connector_state, i) { @@ -2308,7 +2297,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, if (connector_state->crtc != crtc_state->uapi.crtc) continue; - if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { + if (ycbcr420_output) { const struct drm_hdmi_info *hdmi = &info->hdmi; if (bpc == 12 && !(hdmi->y420_dc_modes & @@ -2327,6 +2316,30 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, } } + return true; +} + +static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, + int bpc) +{ + struct drm_i915_private *dev_priv = + to_i915(crtc_state->uapi.crtc->dev); + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + + if (HAS_GMCH(dev_priv)) + return false; + + if (bpc == 10 && INTEL_GEN(dev_priv) < 11) + return false; + + /* + * HDMI deep color affects the clocks, so it's only possible + * when not cloning with other encoder types. + */ + if (crtc_state->output_types != BIT(INTEL_OUTPUT_HDMI)) + return false; + /* Display Wa_1405510057:icl,ehl */ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && bpc == 10 && IS_GEN(dev_priv, 11) && @@ -2334,7 +2347,10 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, adjusted_mode->crtc_hblank_start) % 8 == 2) return false; - return true; + return intel_hdmi_deep_color_possible(crtc_state, bpc, + crtc_state->has_hdmi_sink, + crtc_state->output_format == + INTEL_OUTPUT_FORMAT_YCBCR420); } static int @@ -2459,6 +2475,23 @@ bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state, } } +static bool intel_hdmi_has_audio(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + const struct intel_digital_connector_state *intel_conn_state = + to_intel_digital_connector_state(conn_state); + + if (!crtc_state->has_hdmi_sink) + return false; + + if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) + return intel_hdmi->has_audio; + else + return intel_conn_state->force_audio == HDMI_AUDIO_ON; +} + int intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -2468,8 +2501,6 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct drm_connector *connector = conn_state->connector; struct drm_scdc *scdc = &connector->display_info.hdmi.scdc; - struct intel_digital_connector_state *intel_conn_state = - to_intel_digital_connector_state(conn_state); int ret; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) @@ -2495,13 +2526,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv)) pipe_config->has_pch_encoder = true; - if (pipe_config->has_hdmi_sink) { - if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) - pipe_config->has_audio = intel_hdmi->has_audio; - else - pipe_config->has_audio = - intel_conn_state->force_audio == HDMI_AUDIO_ON; - } + pipe_config->has_audio = + intel_hdmi_has_audio(encoder, pipe_config, conn_state); ret = intel_hdmi_compute_clock(encoder, pipe_config); if (ret) @@ -2667,6 +2693,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + if (!INTEL_DISPLAY_ENABLED(dev_priv)) + return connector_status_disconnected; + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); if (INTEL_GEN(dev_priv) >= 11 && @@ -3250,7 +3279,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) connector->ycbcr_420_allowed = true; - intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); intel_connector->polled = DRM_CONNECTOR_POLL_HPD; if (HAS_DDI(dev_priv)) @@ -3264,7 +3292,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, intel_hdmi->attached_connector = intel_connector; if (is_hdcp_supported(dev_priv, port)) { - int ret = intel_hdcp_init(intel_connector, + int ret = intel_hdcp_init(intel_connector, port, &intel_hdmi_hdcp_shim); if (ret) drm_dbg_kms(&dev_priv->drm, @@ -3335,6 +3363,8 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, intel_encoder = &dig_port->base; + mutex_init(&dig_port->hdcp_mutex); + drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port)); @@ -3382,6 +3412,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, intel_encoder->pipe_mask = ~0; } intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG; + intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); /* * BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems * to work on real hardware. And since g4x can send infoframes to diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index 5b348dcab77a..15eb0ccde76e 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -48,5 +48,7 @@ void intel_read_infoframe(struct intel_encoder *encoder, union hdmi_infoframe *frame); bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); +bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, int bpc, + bool has_hdmi_sink, bool ycbcr420_output); #endif /* __INTEL_HDMI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 3f1d7b804a66..5c58c1ed6493 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -81,33 +81,12 @@ * * It is only valid and used by digital port encoder. * - * Return pin that is associatade with @port and HDP_NONE if no pin is - * hard associated with that @port. + * Return pin that is associatade with @port. */ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, enum port port) { - enum phy phy = intel_port_to_phy(dev_priv, port); - - /* - * RKL + TGP PCH is a special case; we effectively choose the hpd_pin - * based on the DDI rather than the PHY (i.e., the last two outputs - * shold be HPD_PORT_{D,E} rather than {C,D}. Note that this differs - * from the behavior of both TGL+TGP and RKL+CMP. - */ - if (IS_ROCKETLAKE(dev_priv) && HAS_PCH_TGP(dev_priv)) - return HPD_PORT_A + port - PORT_A; - - switch (phy) { - case PHY_F: - return IS_CNL_WITH_PORT_F(dev_priv) ? HPD_PORT_E : HPD_PORT_F; - case PHY_A ... PHY_E: - case PHY_G ... PHY_I: - return HPD_PORT_A + phy - PHY_A; - default: - MISSING_CASE(phy); - return HPD_NONE; - } + return HPD_PORT_A + port - PORT_A; } #define HPD_STORM_DETECT_PERIOD 1000 @@ -503,7 +482,6 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, * only the one of them (DP) will have ->hpd_pulse(). */ for_each_intel_encoder(&dev_priv->drm, encoder) { - bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder); enum port port = encoder->port; bool long_hpd; @@ -511,7 +489,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (!(BIT(pin) & pin_mask)) continue; - if (!has_hpd_pulse) + if (!intel_encoder_has_hpd_pulse(encoder)) continue; long_hpd = long_mask & BIT(pin); diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index 1888611244db..e65c2de522c3 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -456,12 +456,6 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, return 0; } -static enum drm_connector_status -intel_lvds_detect(struct drm_connector *connector, bool force) -{ - return connector_status_connected; -} - /* * Return the list of DDC modes if available, or the BIOS fixed mode otherwise. */ @@ -490,7 +484,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs }; static const struct drm_connector_funcs intel_lvds_connector_funcs = { - .detect = intel_lvds_detect, + .detect = intel_panel_detect, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 4072d7062efd..9f23bac0d792 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -40,8 +40,6 @@ #include "intel_dsi_dcs_backlight.h" #include "intel_panel.h" -#define CRC_PMIC_PWM_PERIOD_NS 21333 - void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode) @@ -594,10 +592,10 @@ static u32 bxt_get_backlight(struct intel_connector *connector) static u32 pwm_get_backlight(struct intel_connector *connector) { struct intel_panel *panel = &connector->panel; - int duty_ns; + struct pwm_state state; - duty_ns = pwm_get_duty_cycle(panel->backlight.pwm); - return DIV_ROUND_UP(duty_ns * 100, CRC_PMIC_PWM_PERIOD_NS); + pwm_get_state(panel->backlight.pwm, &state); + return pwm_get_relative_duty_cycle(&state, 100); } static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level) @@ -671,9 +669,9 @@ static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 static void pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; - int duty_ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100); - pwm_config(panel->backlight.pwm, duty_ns, CRC_PMIC_PWM_PERIOD_NS); + pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100); + pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); } static void @@ -842,10 +840,8 @@ static void pwm_disable_backlight(const struct drm_connector_state *old_conn_sta struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct intel_panel *panel = &connector->panel; - /* Disable the backlight */ - intel_panel_actually_set_backlight(old_conn_state, 0); - usleep_range(2000, 3000); - pwm_disable(panel->backlight.pwm); + panel->backlight.pwm_state.enabled = false; + pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); } void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state) @@ -1177,9 +1173,12 @@ static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state, { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_panel *panel = &connector->panel; + int level = panel->backlight.level; - pwm_enable(panel->backlight.pwm); - intel_panel_actually_set_backlight(conn_state, panel->backlight.level); + level = intel_panel_compute_brightness(connector, level); + pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100); + panel->backlight.pwm_state.enabled = true; + pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); } static void __intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, @@ -1543,18 +1542,9 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul); } -static u32 get_backlight_max_vbt(struct intel_connector *connector) +static u16 get_vbt_pwm_freq(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz; - u32 pwm; - - if (!panel->backlight.hz_to_pwm) { - drm_dbg_kms(&dev_priv->drm, - "backlight frequency conversion not supported\n"); - return 0; - } if (pwm_freq_hz) { drm_dbg_kms(&dev_priv->drm, @@ -1567,6 +1557,22 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector) pwm_freq_hz); } + return pwm_freq_hz; +} + +static u32 get_backlight_max_vbt(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv); + u32 pwm; + + if (!panel->backlight.hz_to_pwm) { + drm_dbg_kms(&dev_priv->drm, + "backlight frequency conversion not supported\n"); + return 0; + } + pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz); if (!pwm) { drm_dbg_kms(&dev_priv->drm, @@ -1891,8 +1897,7 @@ static int pwm_setup_backlight(struct intel_connector *connector, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_panel *panel = &connector->panel; const char *desc; - u32 level, ns; - int retval; + u32 level; /* Get the right PWM chip for DSI backlight according to VBT */ if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) { @@ -1910,30 +1915,28 @@ static int pwm_setup_backlight(struct intel_connector *connector, return -ENODEV; } - /* - * FIXME: pwm_apply_args() should be removed when switching to - * the atomic PWM API. - */ - pwm_apply_args(panel->backlight.pwm); - - panel->backlight.min = 0; /* 0% */ panel->backlight.max = 100; /* 100% */ - level = intel_panel_compute_brightness(connector, 100); - ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100); + panel->backlight.min = get_backlight_min_vbt(connector); - retval = pwm_config(panel->backlight.pwm, ns, CRC_PMIC_PWM_PERIOD_NS); - if (retval < 0) { - drm_err(&dev_priv->drm, "Failed to configure the pwm chip\n"); - pwm_put(panel->backlight.pwm); - panel->backlight.pwm = NULL; - return retval; - } + if (pwm_is_enabled(panel->backlight.pwm)) { + /* PWM is already enabled, use existing settings */ + pwm_get_state(panel->backlight.pwm, &panel->backlight.pwm_state); + + level = pwm_get_relative_duty_cycle(&panel->backlight.pwm_state, + 100); + level = intel_panel_compute_brightness(connector, level); + panel->backlight.level = clamp(level, panel->backlight.min, + panel->backlight.max); + panel->backlight.enabled = true; - level = DIV_ROUND_UP_ULL(pwm_get_duty_cycle(panel->backlight.pwm) * 100, - CRC_PMIC_PWM_PERIOD_NS); - panel->backlight.level = - intel_panel_compute_brightness(connector, level); - panel->backlight.enabled = panel->backlight.level != 0; + drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n", + NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period, + get_vbt_pwm_freq(dev_priv), level); + } else { + /* Set period from VBT frequency, leave other settings at 0. */ + panel->backlight.pwm_state.period = + NSEC_PER_SEC / get_vbt_pwm_freq(dev_priv); + } drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n", desc); @@ -2092,6 +2095,17 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel) } } +enum drm_connector_status +intel_panel_detect(struct drm_connector *connector, bool force) +{ + struct drm_i915_private *i915 = to_i915(connector->dev); + + if (!INTEL_DISPLAY_ENABLED(i915)) + return connector_status_disconnected; + + return connector_status_connected; +} + int intel_panel_init(struct intel_panel *panel, struct drm_display_mode *fixed_mode, struct drm_display_mode *downclock_mode) diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h index 968b95281cb4..5b813fe90557 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.h +++ b/drivers/gpu/drm/i915/display/intel_panel.h @@ -23,6 +23,8 @@ int intel_panel_init(struct intel_panel *panel, struct drm_display_mode *fixed_mode, struct drm_display_mode *downclock_mode); void intel_panel_fini(struct intel_panel *panel); +enum drm_connector_status +intel_panel_detect(struct drm_connector *connector, bool force); void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode); int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 2b004ee9619c..8a9d0bdde1bf 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -555,7 +555,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) if (dev_priv->psr.psr2_sel_fetch_enabled) { /* WA 1408330847 */ - if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) || + if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) || IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)) intel_de_rmw(dev_priv, CHICKEN_PAR1_1, DIS_RAM_BYPASS_PSR2_MAN_TRACK, @@ -1109,7 +1109,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) /* WA 1408330847 */ if (dev_priv->psr.psr2_sel_fetch_enabled && - (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) || + (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) || IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))) intel_de_rmw(dev_priv, CHICKEN_PAR1_1, DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0); diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 5e9fb349c829..4eaa4aa86ecd 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -2084,14 +2084,18 @@ intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo, static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector, bool force) { - u16 response; + struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status ret; + u16 response; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + if (!INTEL_DISPLAY_ENABLED(i915)) + return connector_status_disconnected; + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ATTACHED_DISPLAYS, &response, 2)) diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 6b72223981be..63040cb0d4e1 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -1626,8 +1626,7 @@ static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state, hscale = drm_rect_calc_hscale(&plane_state->uapi.src, &plane_state->uapi.dst, 0, INT_MAX); - if (hscale < 0x10000) - return pixel_rate; + hscale = max(hscale, 0x10000u); /* Decimation steps at 2x,4x,8x,16x */ decimate = ilog2(hscale >> 16); @@ -1640,8 +1639,8 @@ static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state, limit -= decimate; /* -10% for RGB */ - if (fb->format->cpp[0] >= 4) - limit--; /* -10% for RGB */ + if (!fb->format->is_yuv) + limit--; /* * We should also do -10% if sprite scaling is enabled @@ -2845,7 +2844,7 @@ static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv, { /* Wa_14010477008:tgl[a0..c0],rkl[all] */ if (IS_ROCKETLAKE(dev_priv) || - IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0)) + IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0)) return false; return plane_id < PLANE_SPRITE4; diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index 777032d9697b..7a7b99b015a5 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -1706,6 +1706,9 @@ intel_tv_detect(struct drm_connector *connector, drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] force=%d\n", connector->base.id, connector->name, force); + if (!INTEL_DISPLAY_ENABLED(i915)) + return connector_status_disconnected; + if (force) { struct intel_load_detect_pipe tmp; int ret; diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index 6faabd4f6d49..54bcc6a6947c 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -293,8 +293,12 @@ struct bdb_general_features { #define DVO_PORT_HDMIE 12 /* 193 */ #define DVO_PORT_DPF 13 /* N/A */ #define DVO_PORT_HDMIF 14 /* N/A */ -#define DVO_PORT_DPG 15 -#define DVO_PORT_HDMIG 16 +#define DVO_PORT_DPG 15 /* 217 */ +#define DVO_PORT_HDMIG 16 /* 217 */ +#define DVO_PORT_DPH 17 /* 217 */ +#define DVO_PORT_HDMIH 18 /* 217 */ +#define DVO_PORT_DPI 19 /* 217 */ +#define DVO_PORT_HDMII 20 /* 217 */ #define DVO_PORT_MIPIA 21 /* 171 */ #define DVO_PORT_MIPIB 22 /* 171 */ #define DVO_PORT_MIPIC 23 /* 171 */ @@ -330,6 +334,8 @@ enum vbt_gmbus_ddi { #define DP_AUX_E 0x50 #define DP_AUX_F 0x60 #define DP_AUX_G 0x70 +#define DP_AUX_H 0x80 +#define DP_AUX_I 0x90 #define VBT_DP_MAX_LINK_RATE_HBR3 0 #define VBT_DP_MAX_LINK_RATE_HBR2 1 diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 052e0b31a2da..5e5522923b1e 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -1585,6 +1585,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs }; static const struct drm_connector_funcs intel_dsi_connector_funcs = { + .detect = intel_panel_detect, .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_connector_destroy, diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index d0a514301575..4070b00c3690 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -483,7 +483,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) { drm_err(&dev_priv->drm, - "Cant get a suitable ratio from DSI PLL ratios\n"); + "Can't get a suitable ratio from DSI PLL ratios\n"); return -ECHRNG; } else drm_dbg_kms(&dev_priv->drm, "DSI PLL calculation is Done!!\n"); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index cf5ecbde9e06..4fd38101bb56 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -390,24 +390,6 @@ __context_engines_static(const struct i915_gem_context *ctx) return rcu_dereference_protected(ctx->engines, true); } -static bool __reset_engine(struct intel_engine_cs *engine) -{ - struct intel_gt *gt = engine->gt; - bool success = false; - - if (!intel_has_reset_engine(gt)) - return false; - - if (!test_and_set_bit(I915_RESET_ENGINE + engine->id, - >->reset.flags)) { - success = intel_engine_reset(engine, NULL) == 0; - clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, - >->reset.flags); - } - - return success; -} - static void __reset_context(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { @@ -431,12 +413,7 @@ static bool __cancel_engine(struct intel_engine_cs *engine) * kill the banned context, we fallback to doing a local reset * instead. */ - if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) && - !intel_engine_pulse(engine)) - return true; - - /* If we are unable to send a pulse, try resetting this engine. */ - return __reset_engine(engine); + return intel_engine_pulse(engine) == 0; } static bool @@ -460,8 +437,8 @@ __active_engine(struct i915_request *rq, struct intel_engine_cs **active) spin_lock(&locked->active.lock); } - if (!i915_request_completed(rq)) { - if (i915_request_is_active(rq) && rq->fence.error != -EIO) + if (i915_request_is_active(rq)) { + if (!i915_request_completed(rq)) *active = locked; ret = true; } @@ -479,13 +456,26 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce) if (!ce->timeline) return NULL; + /* + * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference + * to the request to prevent it being transferred to a new timeline + * (and onto a new timeline->requests list). + */ rcu_read_lock(); - list_for_each_entry_rcu(rq, &ce->timeline->requests, link) { - if (i915_request_is_active(rq) && i915_request_completed(rq)) - continue; + list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { + bool found; + + /* timeline is already completed upto this point? */ + if (!i915_request_get_rcu(rq)) + break; /* Check with the backend if the request is inflight */ - if (__active_engine(rq, &engine)) + found = true; + if (likely(rcu_access_pointer(rq->timeline) == ce->timeline)) + found = __active_engine(rq, &engine); + + i915_request_put(rq); + if (found) break; } rcu_read_unlock(); @@ -493,7 +483,7 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce) return engine; } -static void kill_engines(struct i915_gem_engines *engines) +static void kill_engines(struct i915_gem_engines *engines, bool ban) { struct i915_gem_engines_iter it; struct intel_context *ce; @@ -508,7 +498,7 @@ static void kill_engines(struct i915_gem_engines *engines) for_each_gem_engine(ce, engines, it) { struct intel_engine_cs *engine; - if (intel_context_set_banned(ce)) + if (ban && intel_context_set_banned(ce)) continue; /* @@ -521,7 +511,7 @@ static void kill_engines(struct i915_gem_engines *engines) engine = active_engine(ce); /* First attempt to gracefully cancel the context */ - if (engine && !__cancel_engine(engine)) + if (engine && !__cancel_engine(engine) && ban) /* * If we are unable to send a preemptive pulse to bump * the context from the GPU, we have to resort to a full @@ -531,8 +521,10 @@ static void kill_engines(struct i915_gem_engines *engines) } } -static void kill_stale_engines(struct i915_gem_context *ctx) +static void kill_context(struct i915_gem_context *ctx) { + bool ban = (!i915_gem_context_is_persistent(ctx) || + !ctx->i915->params.enable_hangcheck); struct i915_gem_engines *pos, *next; spin_lock_irq(&ctx->stale.lock); @@ -545,7 +537,7 @@ static void kill_stale_engines(struct i915_gem_context *ctx) spin_unlock_irq(&ctx->stale.lock); - kill_engines(pos); + kill_engines(pos, ban); spin_lock_irq(&ctx->stale.lock); GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence)); @@ -557,11 +549,6 @@ static void kill_stale_engines(struct i915_gem_context *ctx) spin_unlock_irq(&ctx->stale.lock); } -static void kill_context(struct i915_gem_context *ctx) -{ - kill_stale_engines(ctx); -} - static void engines_idle_release(struct i915_gem_context *ctx, struct i915_gem_engines *engines) { @@ -596,7 +583,7 @@ static void engines_idle_release(struct i915_gem_context *ctx, kill: if (list_empty(&engines->link)) /* raced, already closed */ - kill_engines(engines); + kill_engines(engines, true); i915_sw_fence_commit(&engines->fence); } @@ -654,9 +641,7 @@ static void context_close(struct i915_gem_context *ctx) * case we opt to forcibly kill off all remaining requests on * context close. */ - if (!i915_gem_context_is_persistent(ctx) || - !ctx->i915->params.enable_hangcheck) - kill_context(ctx); + kill_context(ctx); i915_gem_context_put(ctx); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 27fddc22a7c6..8dd295dbe241 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -48,12 +48,9 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme src = sg_next(src); } - if (!dma_map_sg_attrs(attachment->dev, - st->sgl, st->nents, dir, - DMA_ATTR_SKIP_CPU_SYNC)) { - ret = -ENOMEM; + ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC); + if (ret) goto err_free_sg; - } return st; @@ -73,9 +70,7 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, { struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); - dma_unmap_sg_attrs(attachment->dev, - sg->sgl, sg->nents, dir, - DMA_ATTR_SKIP_CPU_SYNC); + dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC); sg_free_table(sg); kfree(sg); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 5509946f1a1d..4b09bcd70cf4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -2267,8 +2267,8 @@ struct eb_parse_work { struct i915_vma *batch; struct i915_vma *shadow; struct i915_vma *trampoline; - unsigned int batch_offset; - unsigned int batch_length; + unsigned long batch_offset; + unsigned long batch_length; }; static int __eb_parse(struct dma_fence_work *work) @@ -2338,6 +2338,9 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb, struct eb_parse_work *pw; int err; + GEM_BUG_ON(overflows_type(eb->batch_start_offset, pw->batch_offset)); + GEM_BUG_ON(overflows_type(eb->batch_len, pw->batch_length)); + pw = kzalloc(sizeof(*pw), GFP_KERNEL); if (!pw) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c index d93eb36160c9..aee7ad3cc3c6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c @@ -364,7 +364,7 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src, vma[1] = i915_vma_instance(dst, vm, NULL); if (IS_ERR(vma[1])) - return PTR_ERR(vma); + return PTR_ERR(vma[1]); i915_gem_ww_ctx_init(&ww, true); intel_engine_pm_get(ce->engine); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index e8a083743e09..d6eeefab3d01 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -254,9 +254,35 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj, if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC) return NULL; + if (GEM_WARN_ON(type == I915_MAP_WC && + !static_cpu_has(X86_FEATURE_PAT))) + return NULL; + /* A single page can always be kmapped */ - if (n_pte == 1 && type == I915_MAP_WB) - return kmap(sg_page(sgt->sgl)); + if (n_pte == 1 && type == I915_MAP_WB) { + struct page *page = sg_page(sgt->sgl); + + /* + * On 32b, highmem using a finite set of indirect PTE (i.e. + * vmap) to provide virtual mappings of the high pages. + * As these are finite, map_new_virtual() must wait for some + * other kmap() to finish when it runs out. If we map a large + * number of objects, there is no method for it to tell us + * to release the mappings, and we deadlock. + * + * However, if we make an explicit vmap of the page, that + * uses a larger vmalloc arena, and also has the ability + * to tell us to release unwanted mappings. Most importantly, + * it will fail and propagate an error instead of waiting + * forever. + * + * So if the page is beyond the 32b boundary, make an explicit + * vmap. On 64b, this check will be optimised away as we can + * directly kmap any page on the system. + */ + if (!PageHighMem(page)) + return kmap(page); + } mem = stack; if (n_pte > ARRAY_SIZE(stack)) { diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 5daf4a2be422..1f35e71429b4 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1617,7 +1617,7 @@ int i915_gem_huge_page_mock_selftests(void) out_put: i915_vm_put(&ppgtt->vm); out_unlock: - drm_dev_put(&dev_priv->drm); + mock_destroy_device(dev_priv); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 99becb86abd3..d3f87dc4eda3 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -1997,7 +1997,7 @@ int i915_gem_context_mock_selftests(void) err = i915_subtests(tests, i915); - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c index 2a52b92586b9..0845ce1ae37c 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c @@ -272,7 +272,7 @@ int i915_gem_dmabuf_mock_selftests(void) err = i915_subtests(tests, i915); - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c index faa5b6d91795..bf853c40ec65 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c @@ -85,7 +85,7 @@ int i915_gem_object_mock_selftests(void) err = i915_subtests(tests, i915); - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c index a94243dc4c5c..8cee68c6a6dc 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c @@ -73,6 +73,6 @@ int i915_gem_phys_mock_selftests(void) err = i915_subtests(tests, i915); - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c index debaf7b18ab5..be30b27e2926 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c @@ -28,10 +28,9 @@ static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment, sg = sg_next(sg); } - if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { - err = -ENOMEM; + err = dma_map_sgtable(attachment->dev, st, dir, 0); + if (err) goto err_st; - } return st; @@ -46,7 +45,7 @@ static void mock_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *st, enum dma_data_direction dir) { - dma_unmap_sg(attachment->dev, st->sgl, st->nents, dir); + dma_unmap_sgtable(attachment->dev, st, dir, 0); sg_free_table(st); kfree(st); } diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index d301dda1b261..92a3f25c4006 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -472,6 +472,7 @@ retry: err = i915_gem_ww_ctx_backoff(&ww); if (!err) goto retry; + rq = ERR_PTR(err); } else { rq = ERR_PTR(err); } diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 08e2c000dcc3..7c3a1012e702 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -337,4 +337,13 @@ intel_engine_has_preempt_reset(const struct intel_engine_cs *engine) return intel_engine_has_preemption(engine); } +static inline bool +intel_engine_has_heartbeat(const struct intel_engine_cs *engine) +{ + if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL)) + return false; + + return READ_ONCE(engine->props.heartbeat_interval_ms); +} + #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c index 8ffdf676c0a0..5067d0524d4b 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c @@ -177,36 +177,82 @@ void intel_engine_init_heartbeat(struct intel_engine_cs *engine) INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat); } +static int __intel_engine_pulse(struct intel_engine_cs *engine) +{ + struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER }; + struct intel_context *ce = engine->kernel_context; + struct i915_request *rq; + + lockdep_assert_held(&ce->timeline->mutex); + GEM_BUG_ON(!intel_engine_has_preemption(engine)); + GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); + + intel_context_enter(ce); + rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN); + intel_context_exit(ce); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + __set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags); + idle_pulse(engine, rq); + + __i915_request_commit(rq); + __i915_request_queue(rq, &attr); + GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER); + + return 0; +} + +static unsigned long set_heartbeat(struct intel_engine_cs *engine, + unsigned long delay) +{ + unsigned long old; + + old = xchg(&engine->props.heartbeat_interval_ms, delay); + if (delay) + intel_engine_unpark_heartbeat(engine); + else + intel_engine_park_heartbeat(engine); + + return old; +} + int intel_engine_set_heartbeat(struct intel_engine_cs *engine, unsigned long delay) { - int err; + struct intel_context *ce = engine->kernel_context; + int err = 0; - /* Send one last pulse before to cleanup persistent hogs */ - if (!delay && IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) { - err = intel_engine_pulse(engine); - if (err) - return err; - } + if (!delay && !intel_engine_has_preempt_reset(engine)) + return -ENODEV; + + intel_engine_pm_get(engine); + + err = mutex_lock_interruptible(&ce->timeline->mutex); + if (err) + goto out_rpm; - WRITE_ONCE(engine->props.heartbeat_interval_ms, delay); + if (delay != engine->props.heartbeat_interval_ms) { + unsigned long saved = set_heartbeat(engine, delay); - if (intel_engine_pm_get_if_awake(engine)) { - if (delay) - intel_engine_unpark_heartbeat(engine); - else - intel_engine_park_heartbeat(engine); - intel_engine_pm_put(engine); + /* recheck current execution */ + if (intel_engine_has_preemption(engine)) { + err = __intel_engine_pulse(engine); + if (err) + set_heartbeat(engine, saved); + } } - return 0; + mutex_unlock(&ce->timeline->mutex); + +out_rpm: + intel_engine_pm_put(engine); + return err; } int intel_engine_pulse(struct intel_engine_cs *engine) { - struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER }; struct intel_context *ce = engine->kernel_context; - struct i915_request *rq; int err; if (!intel_engine_has_preemption(engine)) @@ -215,30 +261,12 @@ int intel_engine_pulse(struct intel_engine_cs *engine) if (!intel_engine_pm_get_if_awake(engine)) return 0; - if (mutex_lock_interruptible(&ce->timeline->mutex)) { - err = -EINTR; - goto out_rpm; - } - - intel_context_enter(ce); - rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN); - intel_context_exit(ce); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out_unlock; + err = -EINTR; + if (!mutex_lock_interruptible(&ce->timeline->mutex)) { + err = __intel_engine_pulse(engine); + mutex_unlock(&ce->timeline->mutex); } - __set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags); - idle_pulse(engine, rq); - - __i915_request_commit(rq); - __i915_request_queue(rq, &attr); - GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER); - err = 0; - -out_unlock: - mutex_unlock(&ce->timeline->mutex); -out_rpm: intel_engine_pm_put(engine); return err; } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c index 4b7671ac5dca..104cb30e8c13 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c @@ -134,6 +134,7 @@ static void pool_retire(struct i915_active *ref) /* Return this object to the shrinker pool */ i915_gem_object_make_purgeable(node->obj); + GEM_BUG_ON(node->age); spin_lock_irqsave(&pool->lock, flags); list_add_rcu(&node->link, list); WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */ @@ -155,6 +156,7 @@ node_create(struct intel_gt_buffer_pool *pool, size_t sz) if (!node) return ERR_PTR(-ENOMEM); + node->age = 0; node->pool = pool; i915_active_init(&node->active, pool_active, pool_retire); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index a3f72b75c61e..6c580d0d9ea8 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -70,6 +70,19 @@ const struct i915_rev_steppings kbl_revids[] = { [7] = { .gt_stepping = KBL_REVID_G0, .disp_stepping = KBL_REVID_C0 }, }; +const struct i915_rev_steppings tgl_uy_revids[] = { + [0] = { .gt_stepping = TGL_REVID_A0, .disp_stepping = TGL_REVID_A0 }, + [1] = { .gt_stepping = TGL_REVID_B0, .disp_stepping = TGL_REVID_C0 }, + [2] = { .gt_stepping = TGL_REVID_B1, .disp_stepping = TGL_REVID_C0 }, + [3] = { .gt_stepping = TGL_REVID_C0, .disp_stepping = TGL_REVID_D0 }, +}; + +/* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */ +const struct i915_rev_steppings tgl_revids[] = { + [0] = { .gt_stepping = TGL_REVID_A0, .disp_stepping = TGL_REVID_B0 }, + [1] = { .gt_stepping = TGL_REVID_B0, .disp_stepping = TGL_REVID_D0 }, +}; + static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name) { wal->name = name; @@ -1219,13 +1232,13 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) gen12_gt_workarounds_init(i915, wal); /* Wa_1409420604:tgl */ - if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) + if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) wa_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2, CPSSUNIT_CLKGATE_DIS); /* Wa_1607087056:tgl also know as BUG:1409180338 */ - if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) + if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); @@ -1660,7 +1673,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { struct drm_i915_private *i915 = engine->i915; - if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) { + if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) { /* * Wa_1607138336:tgl * Wa_1607063988:tgl @@ -1700,7 +1713,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) * Wa_1407928979:tgl A* * Wa_18011464164:tgl B0+ * Wa_22010931296:tgl B0+ - * Wa_14010919138:rkl + * Wa_14010919138:rkl,tgl */ wa_write_or(wal, GEN7_FF_THREAD_MODE, GEN12_FF_TESSELATION_DOP_GATE_DISABLE); @@ -1716,15 +1729,23 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN6_RC_SLEEP_PSMI_CONTROL, GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE | GEN8_RC_SEMA_IDLE_MSG_DISABLE); - } - if (IS_TIGERLAKE(i915)) { - /* Wa_1606700617:tgl */ + /* + * Wa_1606700617:tgl + * Wa_22010271021:tgl,rkl + */ wa_masked_en(wal, GEN9_CS_DEBUG_MODE1, FF_DOP_CLOCK_GATE_DISABLE); } + if (IS_GEN(i915, 12)) { + /* Wa_1406941453:gen12 */ + wa_masked_en(wal, + GEN10_SAMPLER_MODE, + ENABLE_SMALLPL); + } + if (IS_GEN(i915, 11)) { /* This is not an Wa. Enable for better image quality */ wa_masked_en(wal, diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index 96d164a3841d..19c2cb166e7c 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -158,7 +158,7 @@ out: __mock_hwsp_record(&state, na, NULL); kfree(state.history); err_put: - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index d0a599b51bfe..16b582cb97ed 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -936,7 +936,7 @@ static int cmd_reg_handler(struct parser_exec_state *s, return -EFAULT; } - if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) { + if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) { gvt_vgpu_err("%s access to non-render register (%x)\n", cmd, offset); return -EBADRQC; @@ -976,7 +976,7 @@ static int cmd_reg_handler(struct parser_exec_state *s, * inhibit context will restore with correct values */ if (IS_GEN(s->engine->i915, 9) && - intel_gvt_mmio_is_in_ctx(gvt, offset) && + intel_gvt_mmio_is_sr_in_ctx(gvt, offset) && !strncmp(cmd, "lri", 3)) { intel_gvt_hypervisor_read_gpa(s->vgpu, s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); @@ -992,8 +992,6 @@ static int cmd_reg_handler(struct parser_exec_state *s, } } - /* TODO: Update the global mask if this MMIO is a masked-MMIO */ - intel_gvt_mmio_set_cmd_accessed(gvt, offset); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index ff7f2515a6fe..9831361f181e 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -256,11 +256,11 @@ struct intel_gvt_mmio { /* This reg has been accessed by a VM */ #define F_ACCESSED (1 << 4) /* This reg has been accessed through GPU commands */ -#define F_CMD_ACCESSED (1 << 5) -/* This reg could be accessed by unaligned address */ #define F_UNALIGN (1 << 6) -/* This reg is saved/restored in context */ -#define F_IN_CTX (1 << 7) +/* This reg is in GVT's mmio save-restor list and in hardware + * logical context image + */ +#define F_SR_IN_CTX (1 << 7) struct gvt_mmio_block *mmio_block; unsigned int num_mmio_block; @@ -597,39 +597,42 @@ static inline void intel_gvt_mmio_set_accessed( } /** - * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command + * intel_gvt_mmio_is_cmd_accessible - if a MMIO could be accessed by command * @gvt: a GVT device * @offset: register offset * + * Returns: + * True if an MMIO is able to be accessed by GPU commands */ -static inline bool intel_gvt_mmio_is_cmd_access( +static inline bool intel_gvt_mmio_is_cmd_accessible( struct intel_gvt *gvt, unsigned int offset) { return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS; } /** - * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned + * intel_gvt_mmio_set_cmd_accessible - + * mark a MMIO could be accessible by command * @gvt: a GVT device * @offset: register offset * */ -static inline bool intel_gvt_mmio_is_unalign( +static inline void intel_gvt_mmio_set_cmd_accessible( struct intel_gvt *gvt, unsigned int offset) { - return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN; + gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESS; } /** - * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command + * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned * @gvt: a GVT device * @offset: register offset * */ -static inline void intel_gvt_mmio_set_cmd_accessed( +static inline bool intel_gvt_mmio_is_unalign( struct intel_gvt *gvt, unsigned int offset) { - gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED; + return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN; } /** @@ -648,30 +651,33 @@ static inline bool intel_gvt_mmio_has_mode_mask( } /** - * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask + * intel_gvt_mmio_is_sr_in_ctx - + * check if an MMIO has F_SR_IN_CTX mask * @gvt: a GVT device * @offset: register offset * * Returns: - * True if a MMIO has a in-context mask, false if it isn't. + * True if an MMIO has an F_SR_IN_CTX mask, false if it isn't. * */ -static inline bool intel_gvt_mmio_is_in_ctx( +static inline bool intel_gvt_mmio_is_sr_in_ctx( struct intel_gvt *gvt, unsigned int offset) { - return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX; + return gvt->mmio.mmio_attribute[offset >> 2] & F_SR_IN_CTX; } /** - * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context + * intel_gvt_mmio_set_sr_in_ctx - + * mask an MMIO in GVT's mmio save-restore list and also + * in hardware logical context image * @gvt: a GVT device * @offset: register offset * */ -static inline void intel_gvt_mmio_set_in_ctx( +static inline void intel_gvt_mmio_set_sr_in_ctx( struct intel_gvt *gvt, unsigned int offset) { - gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX; + gvt->mmio.mmio_attribute[offset >> 2] |= F_SR_IN_CTX; } void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 05f3bc98d242..3be37e6fe33d 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1892,7 +1892,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) struct drm_i915_private *dev_priv = gvt->gt->i915; int ret; - MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL, + MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); @@ -1900,7 +1900,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler); MMIO_D(SDEISR, D_ALL); - MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL); + MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL, gamw_echo_dev_rw_ia_write); @@ -1927,11 +1928,11 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL); MMIO_D(GEN7_CXT_SIZE, D_ALL); - MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL); - MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL); - MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL); - MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, mmio_read_from_hw, NULL); - MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL); + MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL); + MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL); + MMIO_RING_DFH(RING_CTL, D_ALL, 0, NULL, NULL); + MMIO_RING_DFH(RING_ACTHD, D_ALL, 0, mmio_read_from_hw, NULL); + MMIO_RING_GM(RING_START, D_ALL, NULL, NULL); /* RING MODE */ #define RING_REG(base) _MMIO((base) + 0x29c) @@ -2686,7 +2687,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); - MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL); + MMIO_RING_GM(RING_BBADDR, D_ALL, NULL, NULL); MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL); @@ -2771,7 +2772,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, intel_vgpu_reg_master_irq_handler); - MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, + MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, 0, mmio_read_from_hw, NULL); #define RING_REG(base) _MMIO((base) + 0xd0) @@ -2785,7 +2786,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) #undef RING_REG #define RING_REG(base) _MMIO((base) + 0x234) - MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS, + MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL); #undef RING_REG @@ -2820,7 +2821,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL); #undef RING_REG - MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write); + MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write); MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); @@ -2921,7 +2922,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DH(MMCD_MISC_CTRL, D_SKL_PLUS, NULL, NULL); + MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL); MMIO_D(DC_STATE_EN, D_SKL_PLUS); MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS); @@ -3137,7 +3138,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); - MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL); + MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL); MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS); return 0; @@ -3357,7 +3358,10 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt) gvt->mmio.mmio_attribute = NULL; } -/* Special MMIO blocks. */ +/* Special MMIO blocks. registers in MMIO block ranges should not be command + * accessible (should have no F_CMD_ACCESS flag). + * otherwise, need to update cmd_reg_handler in cmd_parser.c + */ static struct gvt_mmio_block mmio_blocks[] = { {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL}, {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL}, diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 291993615af9..b6811f6a230d 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -251,6 +251,9 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) /* set the bit 0:2(Core C-State ) to C0 */ vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0; + /* uc reset hw expect GS_MIA_IN_RESET */ + vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET; + if (IS_BROXTON(vgpu->gvt->gt->i915)) { vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1)); diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 86a60bdf0818..afe574d6b3b5 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -595,7 +595,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) i915_mmio_reg_valid(mmio->reg); mmio++) { if (mmio->in_context) { gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++; - intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg); + intel_gvt_mmio_set_sr_in_ctx(gvt, mmio->reg.reg); } } } diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 5ac4a999f05a..e88970256e8e 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -1136,7 +1136,7 @@ find_reg(const struct intel_engine_cs *engine, u32 addr) /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, struct drm_i915_gem_object *src_obj, - u32 offset, u32 length) + unsigned long offset, unsigned long length) { bool needs_clflush; void *dst, *src; @@ -1166,8 +1166,8 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, } } if (IS_ERR(src)) { + unsigned long x, n; void *ptr; - int x, n; /* * We can avoid clflushing partial cachelines before the write @@ -1184,7 +1184,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, ptr = dst; x = offset_in_page(offset); for (n = offset >> PAGE_SHIFT; length; n++) { - int len = min_t(int, length, PAGE_SIZE - x); + int len = min(length, PAGE_SIZE - x); src = kmap_atomic(i915_gem_object_get_page(src_obj, n)); if (needs_clflush) @@ -1414,8 +1414,8 @@ static bool shadow_needs_clflush(struct drm_i915_gem_object *obj) */ int intel_engine_cmd_parser(struct intel_engine_cs *engine, struct i915_vma *batch, - u32 batch_offset, - u32 batch_length, + unsigned long batch_offset, + unsigned long batch_length, struct i915_vma *shadow, bool trampoline) { diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 784219962193..ea469168cd44 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -326,6 +326,7 @@ static void print_context_stats(struct seq_file *m, } i915_gem_context_unlock_engines(ctx); + mutex_lock(&ctx->mutex); if (!IS_ERR_OR_NULL(ctx->file_priv)) { struct file_stats stats = { .vm = rcu_access_pointer(ctx->vm), @@ -346,6 +347,7 @@ static void print_context_stats(struct seq_file *m, print_file_stats(m, name, stats); } + mutex_unlock(&ctx->mutex); spin_lock(&i915->gem.contexts.lock); list_safe_reset_next(ctx, cn, link); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 00292a849c34..acc32066cec3 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -58,7 +58,6 @@ #include "display/intel_hotplug.h" #include "display/intel_overlay.h" #include "display/intel_pipe_crc.h" -#include "display/intel_psr.h" #include "display/intel_sprite.h" #include "display/intel_vga.h" @@ -216,125 +215,6 @@ intel_teardown_mchbar(struct drm_i915_private *dev_priv) release_resource(&dev_priv->mch_res); } -/* part #1: call before irq install */ -static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915) -{ - int ret; - - if (i915_inject_probe_failure(i915)) - return -ENODEV; - - if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) { - ret = drm_vblank_init(&i915->drm, - INTEL_NUM_PIPES(i915)); - if (ret) - return ret; - } - - intel_bios_init(i915); - - ret = intel_vga_register(i915); - if (ret) - goto cleanup_bios; - - intel_power_domains_init_hw(i915, false); - - intel_csr_ucode_init(i915); - - ret = intel_modeset_init_noirq(i915); - if (ret) - goto cleanup_vga_client_pw_domain_csr; - - return 0; - -cleanup_vga_client_pw_domain_csr: - intel_csr_ucode_fini(i915); - intel_power_domains_driver_remove(i915); - intel_vga_unregister(i915); -cleanup_bios: - intel_bios_driver_remove(i915); - return ret; -} - -/* part #2: call after irq install */ -static int i915_driver_modeset_probe(struct drm_i915_private *i915) -{ - int ret; - - /* Important: The output setup functions called by modeset_init need - * working irqs for e.g. gmbus and dp aux transfers. */ - ret = intel_modeset_init(i915); - if (ret) - goto out; - - ret = i915_gem_init(i915); - if (ret) - goto cleanup_modeset; - - intel_overlay_setup(i915); - - if (!HAS_DISPLAY(i915) || !INTEL_DISPLAY_ENABLED(i915)) - return 0; - - ret = intel_fbdev_init(&i915->drm); - if (ret) - goto cleanup_gem; - - /* Only enable hotplug handling once the fbdev is fully set up. */ - intel_hpd_init(i915); - - intel_init_ipc(i915); - - intel_psr_set_force_mode_changed(i915->psr.dp); - - return 0; - -cleanup_gem: - i915_gem_suspend(i915); - i915_gem_driver_remove(i915); - i915_gem_driver_release(i915); -cleanup_modeset: - /* FIXME */ - intel_modeset_driver_remove(i915); - intel_irq_uninstall(i915); - intel_modeset_driver_remove_noirq(i915); -out: - return ret; -} - -/* part #1: call before irq uninstall */ -static void i915_driver_modeset_remove(struct drm_i915_private *i915) -{ - intel_modeset_driver_remove(i915); -} - -/* part #2: call after irq uninstall */ -static void i915_driver_modeset_remove_noirq(struct drm_i915_private *i915) -{ - intel_csr_ucode_fini(i915); - - intel_power_domains_driver_remove(i915); - - intel_vga_unregister(i915); - - intel_bios_driver_remove(i915); -} - -static void intel_init_dpio(struct drm_i915_private *dev_priv) -{ - /* - * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), - * CHV x1 PHY (DP/HDMI D) - * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) - */ - if (IS_CHERRYVIEW(dev_priv)) { - DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; - DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; - } else if (IS_VALLEYVIEW(dev_priv)) { - DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; - } -} - static int i915_workqueues_init(struct drm_i915_private *dev_priv) { /* @@ -463,7 +343,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) intel_detect_pch(dev_priv); intel_pm_setup(dev_priv); - intel_init_dpio(dev_priv); ret = intel_power_domains_init(dev_priv); if (ret < 0) goto err_gem; @@ -798,7 +677,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) drm_err(&dev_priv->drm, "Failed to register driver for userspace access!\n"); - if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) { + if (HAS_DISPLAY(dev_priv)) { /* Must be done after probing outputs */ intel_opregion_register(dev_priv); acpi_video_register(); @@ -821,7 +700,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) * We need to coordinate the hotplugs with the asynchronous fbdev * configuration, for which we use the fbdev->async_cookie. */ - if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) + if (HAS_DISPLAY(dev_priv)) drm_kms_helper_poll_init(dev); intel_power_domains_enable(dev_priv); @@ -988,7 +867,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret < 0) goto out_cleanup_mmio; - ret = i915_driver_modeset_probe_noirq(i915); + ret = intel_modeset_init_noirq(i915); if (ret < 0) goto out_cleanup_hw; @@ -996,10 +875,18 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_cleanup_modeset; - ret = i915_driver_modeset_probe(i915); - if (ret < 0) + ret = intel_modeset_init_nogem(i915); + if (ret) goto out_cleanup_irq; + ret = i915_gem_init(i915); + if (ret) + goto out_cleanup_modeset2; + + ret = intel_modeset_init(i915); + if (ret) + goto out_cleanup_gem; + i915_driver_register(i915); enable_rpm_wakeref_asserts(&i915->runtime_pm); @@ -1010,10 +897,20 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; +out_cleanup_gem: + i915_gem_suspend(i915); + i915_gem_driver_remove(i915); + i915_gem_driver_release(i915); +out_cleanup_modeset2: + /* FIXME clean up the error path */ + intel_modeset_driver_remove(i915); + intel_irq_uninstall(i915); + intel_modeset_driver_remove_noirq(i915); + goto out_cleanup_modeset; out_cleanup_irq: intel_irq_uninstall(i915); out_cleanup_modeset: - i915_driver_modeset_remove_noirq(i915); + intel_modeset_driver_remove_nogem(i915); out_cleanup_hw: i915_driver_hw_remove(i915); intel_memory_regions_driver_release(i915); @@ -1045,7 +942,7 @@ void i915_driver_remove(struct drm_i915_private *i915) intel_gvt_driver_remove(i915); - i915_driver_modeset_remove(i915); + intel_modeset_driver_remove(i915); intel_irq_uninstall(i915); @@ -1054,7 +951,7 @@ void i915_driver_remove(struct drm_i915_private *i915) i915_reset_error_state(i915); i915_gem_driver_remove(i915); - i915_driver_modeset_remove_noirq(i915); + intel_modeset_driver_remove_nogem(i915); i915_driver_hw_remove(i915); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ab17084af0ff..eef9a821c49c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -108,18 +108,11 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20200824" -#define DRIVER_TIMESTAMP 1598293597 +#define DRIVER_DATE "20200917" +#define DRIVER_TIMESTAMP 1600375437 struct drm_i915_gem_object; -/* - * The code assumes that the hpd_pins below have consecutive values and - * starting with HPD_PORT_A, the HPD pin associated with any port can be - * retrieved by adding the corresponding port (or phy) enum value to - * HPD_PORT_A in most cases. For example: - * HPD_PORT_C = HPD_PORT_A + PHY_C - PHY_A - */ enum hpd_pin { HPD_NONE = 0, HPD_TV = HPD_NONE, /* TV is known to be unreliable */ @@ -131,10 +124,12 @@ enum hpd_pin { HPD_PORT_C, HPD_PORT_D, HPD_PORT_E, - HPD_PORT_F, - HPD_PORT_G, - HPD_PORT_H, - HPD_PORT_I, + HPD_PORT_TC1, + HPD_PORT_TC2, + HPD_PORT_TC3, + HPD_PORT_TC4, + HPD_PORT_TC5, + HPD_PORT_TC6, HPD_NUM_PINS }; @@ -537,13 +532,9 @@ struct intel_gmbus { struct i915_suspend_saved_registers { u32 saveDSPARB; - u32 saveFBC_CONTROL; - u32 saveCACHE_MODE_0; - u32 saveMI_ARB_STATE; u32 saveSWF0[16]; u32 saveSWF1[16]; u32 saveSWF3[3]; - u32 savePCH_PORT_HOTPLUG; u16 saveGCDGMBUS; }; @@ -1020,8 +1011,6 @@ struct drm_i915_private { */ u8 active_pipes; - int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; - struct i915_wa_list gt_wa_list; struct i915_frontbuffer_tracking fb_tracking; @@ -1572,12 +1561,41 @@ extern const struct i915_rev_steppings kbl_revids[]; #define IS_EHL_REVID(p, since, until) \ (IS_ELKHARTLAKE(p) && IS_REVID(p, since, until)) -#define TGL_REVID_A0 0x0 -#define TGL_REVID_B0 0x1 -#define TGL_REVID_C0 0x2 +enum { + TGL_REVID_A0, + TGL_REVID_B0, + TGL_REVID_B1, + TGL_REVID_C0, + TGL_REVID_D0, +}; + +extern const struct i915_rev_steppings tgl_uy_revids[]; +extern const struct i915_rev_steppings tgl_revids[]; + +static inline const struct i915_rev_steppings * +tgl_revids_get(struct drm_i915_private *dev_priv) +{ + if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) + return tgl_uy_revids; + else + return tgl_revids; +} + +#define IS_TGL_DISP_REVID(p, since, until) \ + (IS_TIGERLAKE(p) && \ + tgl_revids_get(p)->disp_stepping >= (since) && \ + tgl_revids_get(p)->disp_stepping <= (until)) + +#define IS_TGL_UY_GT_REVID(p, since, until) \ + ((IS_TGL_U(p) || IS_TGL_Y(p)) && \ + tgl_uy_revids->gt_stepping >= (since) && \ + tgl_uy_revids->gt_stepping <= (until)) -#define IS_TGL_REVID(p, since, until) \ - (IS_TIGERLAKE(p) && IS_REVID(p, since, until)) +#define IS_TGL_GT_REVID(p, since, until) \ + (IS_TIGERLAKE(p) && \ + !(IS_TGL_U(p) || IS_TGL_Y(p)) && \ + tgl_revids->gt_stepping >= (since) && \ + tgl_revids->gt_stepping <= (until)) #define RKL_REVID_A0 0x0 #define RKL_REVID_B0 0x1 @@ -1931,8 +1949,8 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); int intel_engine_cmd_parser(struct intel_engine_cs *engine, struct i915_vma *batch, - u32 batch_offset, - u32 batch_length, + unsigned long batch_offset, + unsigned long batch_length, struct i915_vma *shadow, bool trampoline); #define I915_CMD_PARSER_TRAMPOLINE_SIZE 8 diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 3e6cbb0d1150..a635ec8d0b94 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -311,6 +311,8 @@ static int compress_page(struct i915_vma_compress *c, if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK) return -EIO; + + cond_resched(); } while (zstream->avail_in); /* Fallback to uncompressed if we increase size? */ @@ -397,6 +399,7 @@ static int compress_page(struct i915_vma_compress *c, if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE))) memcpy(ptr, src, PAGE_SIZE); dst->pages[dst->page_count++] = ptr; + cond_resched(); return 0; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index f113fe44572b..759f523c6a6b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -132,40 +132,24 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = { }; static const u32 hpd_gen11[HPD_NUM_PINS] = { - [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, - [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, - [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, - [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG, -}; - -static const u32 hpd_gen12[HPD_NUM_PINS] = { - [HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, - [HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, - [HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, - [HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG, - [HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG, - [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG, + [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(PORT_TC1) | GEN11_TBT_HOTPLUG(PORT_TC1), + [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(PORT_TC2) | GEN11_TBT_HOTPLUG(PORT_TC2), + [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(PORT_TC3) | GEN11_TBT_HOTPLUG(PORT_TC3), + [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(PORT_TC4) | GEN11_TBT_HOTPLUG(PORT_TC4), + [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(PORT_TC5) | GEN11_TBT_HOTPLUG(PORT_TC5), + [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(PORT_TC6) | GEN11_TBT_HOTPLUG(PORT_TC6), }; static const u32 hpd_icp[HPD_NUM_PINS] = { [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A), [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B), - [HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1), - [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2), - [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3), - [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4), -}; - -static const u32 hpd_tgp[HPD_NUM_PINS] = { - [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A), - [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B), [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C), - [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1), - [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2), - [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3), - [HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4), - [HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5), - [HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6), + [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(PORT_TC1), + [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(PORT_TC2), + [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(PORT_TC3), + [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(PORT_TC4), + [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(PORT_TC5), + [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(PORT_TC6), }; static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) @@ -181,9 +165,7 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) return; } - if (INTEL_GEN(dev_priv) >= 12) - hpd->hpd = hpd_gen12; - else if (INTEL_GEN(dev_priv) >= 11) + if (INTEL_GEN(dev_priv) >= 11) hpd->hpd = hpd_gen11; else if (IS_GEN9_LP(dev_priv)) hpd->hpd = hpd_bxt; @@ -197,9 +179,8 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) if (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)) return; - if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv)) - hpd->pch_hpd = hpd_tgp; - else if (HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv)) + if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv) || + HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv)) hpd->pch_hpd = hpd_icp; else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv)) hpd->pch_hpd = hpd_spt; @@ -1049,33 +1030,17 @@ out: static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { - case HPD_PORT_C: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); - case HPD_PORT_D: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); - case HPD_PORT_E: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); - case HPD_PORT_F: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); - default: - return false; - } -} - -static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val) -{ - switch (pin) { - case HPD_PORT_D: + case HPD_PORT_TC1: return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); - case HPD_PORT_E: + case HPD_PORT_TC2: return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); - case HPD_PORT_F: + case HPD_PORT_TC3: return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); - case HPD_PORT_G: + case HPD_PORT_TC4: return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); - case HPD_PORT_H: + case HPD_PORT_TC5: return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5); - case HPD_PORT_I: + case HPD_PORT_TC6: return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6); default: return false; @@ -1113,33 +1078,17 @@ static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { - case HPD_PORT_C: - return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); - case HPD_PORT_D: - return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); - case HPD_PORT_E: - return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); - case HPD_PORT_F: - return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); - default: - return false; - } -} - -static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) -{ - switch (pin) { - case HPD_PORT_D: + case HPD_PORT_TC1: return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); - case HPD_PORT_E: + case HPD_PORT_TC2: return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); - case HPD_PORT_F: + case HPD_PORT_TC3: return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); - case HPD_PORT_G: + case HPD_PORT_TC4: return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); - case HPD_PORT_H: + case HPD_PORT_TC5: return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5); - case HPD_PORT_I: + case HPD_PORT_TC6: return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6); default: return false; @@ -1893,19 +1842,16 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { u32 ddi_hotplug_trigger, tc_hotplug_trigger; u32 pin_mask = 0, long_mask = 0; - bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val); if (HAS_PCH_TGP(dev_priv)) { ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP; tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP; - tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect; } else if (HAS_PCH_JSP(dev_priv)) { ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP; tc_hotplug_trigger = 0; } else if (HAS_PCH_MCC(dev_priv)) { ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1); - tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect; } else { drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv), "Unrecognized PCH type 0x%x\n", @@ -1913,7 +1859,6 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; - tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect; } if (ddi_hotplug_trigger) { @@ -1937,7 +1882,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, tc_hotplug_trigger, dig_hotplug_reg, dev_priv->hotplug.pch_hpd, - tc_port_hotplug_long_detect); + icp_tc_port_hotplug_long_detect); } if (pin_mask) @@ -2185,12 +2130,6 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) u32 pin_mask = 0, long_mask = 0; u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; - long_pulse_detect_func long_pulse_detect; - - if (INTEL_GEN(dev_priv) >= 12) - long_pulse_detect = gen12_port_hotplug_long_detect; - else - long_pulse_detect = gen11_port_hotplug_long_detect; if (trigger_tc) { u32 dig_hotplug_reg; @@ -2201,7 +2140,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, dig_hotplug_reg, dev_priv->hotplug.hpd, - long_pulse_detect); + gen11_port_hotplug_long_detect); } if (trigger_tbt) { @@ -2213,7 +2152,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, dig_hotplug_reg, dev_priv->hotplug.hpd, - long_pulse_detect); + gen11_port_hotplug_long_detect); } if (pin_mask) @@ -3048,6 +2987,18 @@ static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, return enabled_irqs; } +static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv, + const u32 hpd[HPD_NUM_PINS]) +{ + struct intel_encoder *encoder; + u32 hotplug_irqs = 0; + + for_each_intel_encoder(&dev_priv->drm, encoder) + hotplug_irqs |= hpd[encoder->hpd_pin]; + + return hotplug_irqs; +} + static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) { u32 hotplug; @@ -3077,50 +3028,50 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; - if (HAS_PCH_IBX(dev_priv)) - hotplug_irqs = SDE_HOTPLUG_MASK; - else - hotplug_irqs = SDE_HOTPLUG_MASK_CPT; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); ibx_hpd_detection_setup(dev_priv); } -static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv, - u32 ddi_hotplug_enable_mask, - u32 tc_hotplug_enable_mask) +static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv, + u32 enable_mask) { u32 hotplug; hotplug = I915_READ(SHOTPLUG_CTL_DDI); - hotplug |= ddi_hotplug_enable_mask; + hotplug |= enable_mask; I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); +} - if (tc_hotplug_enable_mask) { - hotplug = I915_READ(SHOTPLUG_CTL_TC); - hotplug |= tc_hotplug_enable_mask; - I915_WRITE(SHOTPLUG_CTL_TC, hotplug); - } +static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv, + u32 enable_mask) +{ + u32 hotplug; + + hotplug = I915_READ(SHOTPLUG_CTL_TC); + hotplug |= enable_mask; + I915_WRITE(SHOTPLUG_CTL_TC, hotplug); } static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv, - u32 sde_ddi_mask, u32 sde_tc_mask, u32 ddi_enable_mask, u32 tc_enable_mask) { u32 hotplug_irqs, enabled_irqs; - hotplug_irqs = sde_ddi_mask | sde_tc_mask; enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP) I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); - icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask); + icp_ddi_hpd_detection_setup(dev_priv, ddi_enable_mask); + if (tc_enable_mask) + icp_tc_hpd_detection_setup(dev_priv, tc_enable_mask); } /* @@ -3130,7 +3081,6 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv, static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv) { icp_hpd_irq_setup(dev_priv, - SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1), ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1)); } @@ -3142,7 +3092,6 @@ static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv) static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv) { icp_hpd_irq_setup(dev_priv, - SDE_DDI_MASK_TGP, 0, TGP_DDI_HPD_ENABLE_MASK, 0); } @@ -3154,14 +3103,18 @@ static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | - GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); + GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4) | + GEN11_HOTPLUG_CTL_ENABLE(PORT_TC5) | + GEN11_HOTPLUG_CTL_ENABLE(PORT_TC6); I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug); hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL); hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | - GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); + GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4) | + GEN11_HOTPLUG_CTL_ENABLE(PORT_TC5) | + GEN11_HOTPLUG_CTL_ENABLE(PORT_TC6); I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug); } @@ -3171,7 +3124,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) u32 val; enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); - hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); val = I915_READ(GEN11_DE_HPD_IMR); val &= ~hotplug_irqs; @@ -3182,10 +3135,10 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) gen11_hpd_detection_setup(dev_priv); if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP) - icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP, + icp_hpd_irq_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, TGP_TC_HPD_ENABLE_MASK); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP, + icp_hpd_irq_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE_MASK); } @@ -3221,8 +3174,8 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); - hotplug_irqs = SDE_HOTPLUG_MASK_SPT; enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); @@ -3249,22 +3202,13 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; - if (INTEL_GEN(dev_priv) >= 8) { - hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); + if (INTEL_GEN(dev_priv) >= 8) bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); - } else if (INTEL_GEN(dev_priv) >= 7) { - hotplug_irqs = DE_DP_A_HOTPLUG_IVB; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); - - ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); - } else { - hotplug_irqs = DE_DP_A_HOTPLUG; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); - + else ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); - } ilk_hpd_detection_setup(dev_priv); @@ -3313,7 +3257,7 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) u32 hotplug_irqs, enabled_irqs; enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); - hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); @@ -3534,17 +3478,18 @@ static void icp_irq_postinstall(struct drm_i915_private *dev_priv) gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR); I915_WRITE(SDEIMR, ~mask); - if (HAS_PCH_TGP(dev_priv)) - icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, - TGP_TC_HPD_ENABLE_MASK); - else if (HAS_PCH_JSP(dev_priv)) - icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0); - else if (HAS_PCH_MCC(dev_priv)) - icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK, - ICP_TC_HPD_ENABLE(PORT_TC1)); - else - icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK, - ICP_TC_HPD_ENABLE_MASK); + if (HAS_PCH_TGP(dev_priv)) { + icp_ddi_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK); + icp_tc_hpd_detection_setup(dev_priv, TGP_TC_HPD_ENABLE_MASK); + } else if (HAS_PCH_JSP(dev_priv)) { + icp_ddi_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK); + } else if (HAS_PCH_MCC(dev_priv)) { + icp_ddi_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK); + icp_tc_hpd_detection_setup(dev_priv, ICP_TC_HPD_ENABLE(PORT_TC1)); + } else { + icp_ddi_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK); + icp_tc_hpd_detection_setup(dev_priv, ICP_TC_HPD_ENABLE_MASK); + } } static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ac691927e29d..d805d4da6181 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1382,7 +1382,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define DPIO_CMNRST (1 << 0) #define DPIO_PHY(pipe) ((pipe) >> 1) -#define DPIO_PHY_IOSF_PORT(phy) (dev_priv->dpio_phy_iosf_port[phy]) /* * Per pipe/PLL DPIO regs @@ -7760,32 +7759,20 @@ enum { #define GEN11_DE_HPD_IMR _MMIO(0x44474) #define GEN11_DE_HPD_IIR _MMIO(0x44478) #define GEN11_DE_HPD_IER _MMIO(0x4447c) -#define GEN12_TC6_HOTPLUG (1 << 21) -#define GEN12_TC5_HOTPLUG (1 << 20) -#define GEN11_TC4_HOTPLUG (1 << 19) -#define GEN11_TC3_HOTPLUG (1 << 18) -#define GEN11_TC2_HOTPLUG (1 << 17) -#define GEN11_TC1_HOTPLUG (1 << 16) #define GEN11_TC_HOTPLUG(tc_port) (1 << ((tc_port) + 16)) -#define GEN11_DE_TC_HOTPLUG_MASK (GEN12_TC6_HOTPLUG | \ - GEN12_TC5_HOTPLUG | \ - GEN11_TC4_HOTPLUG | \ - GEN11_TC3_HOTPLUG | \ - GEN11_TC2_HOTPLUG | \ - GEN11_TC1_HOTPLUG) -#define GEN12_TBT6_HOTPLUG (1 << 5) -#define GEN12_TBT5_HOTPLUG (1 << 4) -#define GEN11_TBT4_HOTPLUG (1 << 3) -#define GEN11_TBT3_HOTPLUG (1 << 2) -#define GEN11_TBT2_HOTPLUG (1 << 1) -#define GEN11_TBT1_HOTPLUG (1 << 0) +#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC_HOTPLUG(PORT_TC6) | \ + GEN11_TC_HOTPLUG(PORT_TC5) | \ + GEN11_TC_HOTPLUG(PORT_TC4) | \ + GEN11_TC_HOTPLUG(PORT_TC3) | \ + GEN11_TC_HOTPLUG(PORT_TC2) | \ + GEN11_TC_HOTPLUG(PORT_TC1)) #define GEN11_TBT_HOTPLUG(tc_port) (1 << (tc_port)) -#define GEN11_DE_TBT_HOTPLUG_MASK (GEN12_TBT6_HOTPLUG | \ - GEN12_TBT5_HOTPLUG | \ - GEN11_TBT4_HOTPLUG | \ - GEN11_TBT3_HOTPLUG | \ - GEN11_TBT2_HOTPLUG | \ - GEN11_TBT1_HOTPLUG) +#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT_HOTPLUG(PORT_TC6) | \ + GEN11_TBT_HOTPLUG(PORT_TC5) | \ + GEN11_TBT_HOTPLUG(PORT_TC4) | \ + GEN11_TBT_HOTPLUG(PORT_TC3) | \ + GEN11_TBT_HOTPLUG(PORT_TC2) | \ + GEN11_TBT_HOTPLUG(PORT_TC1)) #define GEN11_TBT_HOTPLUG_CTL _MMIO(0x44030) #define GEN11_TC_HOTPLUG_CTL _MMIO(0x44038) @@ -9315,6 +9302,7 @@ enum { #define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7) #define GEN10_SAMPLER_MODE _MMIO(0xE18C) +#define ENABLE_SMALLPL REG_BIT(15) #define GEN11_SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5) /* IVYBRIDGE DPF */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 11e272422fb7..0e813819b041 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -542,8 +542,13 @@ bool __i915_request_submit(struct i915_request *request) if (i915_request_completed(request)) goto xfer; + if (unlikely(intel_context_is_closed(request->context) && + !intel_engine_has_heartbeat(engine))) + intel_context_set_banned(request->context); + if (unlikely(intel_context_is_banned(request->context))) i915_request_set_error_once(request, -EIO); + if (unlikely(fatal_error(request->fence.error))) __i915_request_skip(request); @@ -593,16 +598,8 @@ xfer: __notify_execute_cb_irq(request); /* We may be recursing from the signal callback of another i915 fence */ - if (!i915_request_signaled(request)) { - spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); - - if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, - &request->fence.flags) && - !i915_request_enable_breadcrumb(request)) - intel_engine_signal_breadcrumbs(engine); - - spin_unlock(&request->lock); - } + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) + i915_request_enable_breadcrumb(request); return result; } diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index ed2be3489f8e..7b64e7137270 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -34,17 +34,25 @@ static void i915_save_display(struct drm_i915_private *dev_priv) { + struct pci_dev *pdev = dev_priv->drm.pdev; + /* Display arbitration control */ if (INTEL_GEN(dev_priv) <= 4) dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); - /* save FBC interval */ - if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv)) - dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL); + if (IS_GEN(dev_priv, 4)) + pci_read_config_word(pdev, GCDGMBUS, + &dev_priv->regfile.saveGCDGMBUS); } static void i915_restore_display(struct drm_i915_private *dev_priv) { + struct pci_dev *pdev = dev_priv->drm.pdev; + + if (IS_GEN(dev_priv, 4)) + pci_write_config_word(pdev, GCDGMBUS, + dev_priv->regfile.saveGCDGMBUS); + /* Display arbitration */ if (INTEL_GEN(dev_priv) <= 4) I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); @@ -52,31 +60,17 @@ static void i915_restore_display(struct drm_i915_private *dev_priv) /* only restore FBC info on the platform that supports FBC*/ intel_fbc_global_disable(dev_priv); - /* restore FBC interval */ - if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv)) - I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); - intel_vga_redisable(dev_priv); + + intel_gmbus_reset(dev_priv); } int i915_save_state(struct drm_i915_private *dev_priv) { - struct pci_dev *pdev = dev_priv->drm.pdev; int i; i915_save_display(dev_priv); - if (IS_GEN(dev_priv, 4)) - pci_read_config_word(pdev, GCDGMBUS, - &dev_priv->regfile.saveGCDGMBUS); - - /* Cache mode state */ - if (INTEL_GEN(dev_priv) < 7) - dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); - - /* Memory Arbitration state */ - dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); - /* Scratch space */ if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) { for (i = 0; i < 7; i++) { @@ -102,22 +96,10 @@ int i915_save_state(struct drm_i915_private *dev_priv) int i915_restore_state(struct drm_i915_private *dev_priv) { - struct pci_dev *pdev = dev_priv->drm.pdev; int i; - if (IS_GEN(dev_priv, 4)) - pci_write_config_word(pdev, GCDGMBUS, - dev_priv->regfile.saveGCDGMBUS); i915_restore_display(dev_priv); - /* Cache mode state */ - if (INTEL_GEN(dev_priv) < 7) - I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | - 0xffff0000); - - /* Memory arbitration state */ - I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000); - /* Scratch space */ if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) { for (i = 0; i < 7; i++) { @@ -138,7 +120,5 @@ int i915_restore_state(struct drm_i915_private *dev_priv) I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]); } - intel_gmbus_reset(dev_priv); - return 0; } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 495d28f6d160..ffb5287e055a 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -892,9 +892,11 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, /* Allocate enough page directories to used PTE */ if (vma->vm->allocate_va_range) { - i915_vm_alloc_pt_stash(vma->vm, - &work->stash, - vma->size); + err = i915_vm_alloc_pt_stash(vma->vm, + &work->stash, + vma->size); + if (err) + goto err_fence; err = i915_vm_pin_pt_stash(vma->vm, &work->stash); diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index e2aa5bc3a6e0..adc836f15fde 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -516,6 +516,14 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) S32_MAX), USEC_PER_SEC)); } + + if (!HAS_DISPLAY(dev_priv)) { + dev_priv->drm.driver_features &= ~(DRIVER_MODESET | + DRIVER_ATOMIC); + memset(&info->display, 0, sizeof(info->display)); + memset(runtime->num_sprites, 0, sizeof(runtime->num_sprites)); + memset(runtime->num_scalers, 0, sizeof(runtime->num_scalers)); + } } void intel_driver_caps_print(const struct intel_driver_caps *caps, diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b4bd19266b8c..34e0d22d456b 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -7136,7 +7136,7 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) I915_READ(POWERGATE_ENABLE) | vd_pg_enable); /* Wa_1409825376:tgl (pre-prod)*/ - if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0)) + if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1)) I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) | TGL_VRH_GATING_DIS); diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index 916ccd1c0e96..5b3279262123 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -231,9 +231,21 @@ void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val) SB_CRWRDA_NP, reg, &val); } +static u32 vlv_dpio_phy_iosf_port(struct drm_i915_private *i915, enum dpio_phy phy) +{ + /* + * IOSF_PORT_DPIO: VLV x2 PHY (DP/HDMI B and C), CHV x1 PHY (DP/HDMI D) + * IOSF_PORT_DPIO_2: CHV x2 PHY (DP/HDMI B and C) + */ + if (IS_CHERRYVIEW(i915)) + return phy == DPIO_PHY0 ? IOSF_PORT_DPIO_2 : IOSF_PORT_DPIO; + else + return IOSF_PORT_DPIO; +} + u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg) { - int port = i915->dpio_phy_iosf_port[DPIO_PHY(pipe)]; + u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe)); u32 val = 0; vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val); @@ -252,7 +264,7 @@ u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg) void vlv_dpio_write(struct drm_i915_private *i915, enum pipe pipe, int reg, u32 val) { - int port = i915->dpio_phy_iosf_port[DPIO_PHY(pipe)]; + u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe)); vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val); } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 8d5a933e6af6..263ffcb832b7 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1993,13 +1993,14 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore, unsigned int slow_timeout_ms, u32 *out_value) { - u32 reg_value; + u32 reg_value = 0; #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value) int ret; /* Catch any overuse of this function */ might_sleep_if(slow_timeout_ms); GEM_BUG_ON(fast_timeout_us > 20000); + GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms); ret = -ETIMEDOUT; if (fast_timeout_us && fast_timeout_us <= 20000) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 028baae9631f..f88473d396f4 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -536,7 +536,7 @@ int i915_gem_evict_mock_selftests(void) with_intel_runtime_pm(&i915->runtime_pm, wakeref) err = i915_subtests(tests, &i915->gt); - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index af8205a2bd8f..c53a222e3dec 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -1727,7 +1727,7 @@ int i915_gem_gtt_mock_selftests(void) mock_fini_ggtt(ggtt); kfree(ggtt); out_put: - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 3092ca763789..64bbb8288249 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -527,7 +527,7 @@ int i915_request_mock_selftests(void) with_intel_runtime_pm(&i915->runtime_pm, wakeref) err = i915_subtests(tests, i915); - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 88c5e9acb84c..1b6125e4c1ac 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -841,7 +841,7 @@ int i915_vma_mock_selftests(void) mock_fini_ggtt(ggtt); kfree(ggtt); out_put: - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 93a38a323584..334b0648e253 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -791,7 +791,7 @@ int intel_memory_region_mock_selftests(void) intel_memory_region_put(mem); out_unref: - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index f127e633f7ca..b6c42fd872ad 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -79,8 +79,6 @@ static void mock_device_release(struct drm_device *dev) out: i915_params_free(&i915->params); - put_device(&i915->drm.pdev->dev); - i915->drm.pdev = NULL; } static struct drm_driver mock_driver = { @@ -118,22 +116,15 @@ static struct dev_pm_domain pm_domain = { struct drm_i915_private *mock_gem_device(void) { - struct drm_i915_private *i915; - struct pci_dev *pdev; #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU) - struct dev_iommu iommu; + static struct dev_iommu fake_iommu = { .priv = (void *)-1 }; #endif - int err; + struct drm_i915_private *i915; + struct pci_dev *pdev; pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); if (!pdev) return NULL; - i915 = kzalloc(sizeof(*i915), GFP_KERNEL); - if (!i915) { - kfree(pdev); - return NULL; - } - device_initialize(&pdev->dev); pdev->class = PCI_BASE_CLASS_DISPLAY << 16; pdev->dev.release = release_dev; @@ -141,13 +132,26 @@ struct drm_i915_private *mock_gem_device(void) dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU) - /* HACK HACK HACK to disable iommu for the fake device; force identity mapping */ - memset(&iommu, 0, sizeof(iommu)); - iommu.priv = (void *)-1; - pdev->dev.iommu = &iommu; + /* HACK to disable iommu for the fake device; force identity mapping */ + pdev->dev.iommu = &fake_iommu; #endif + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) { + put_device(&pdev->dev); + return NULL; + } + + i915 = devm_drm_dev_alloc(&pdev->dev, &mock_driver, + struct drm_i915_private, drm); + if (IS_ERR(i915)) { + pr_err("Failed to allocate mock GEM device: err=%ld\n", PTR_ERR(i915)); + devres_release_group(&pdev->dev, NULL); + put_device(&pdev->dev); + + return NULL; + } pci_set_drvdata(pdev, i915); + i915->drm.pdev = pdev; dev_pm_domain_set(&pdev->dev, &pm_domain); pm_runtime_enable(&pdev->dev); @@ -155,16 +159,6 @@ struct drm_i915_private *mock_gem_device(void) if (pm_runtime_enabled(&pdev->dev)) WARN_ON(pm_runtime_get_sync(&pdev->dev)); - err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev); - if (err) { - pr_err("Failed to initialise mock GEM device: err=%d\n", err); - put_device(&pdev->dev); - kfree(i915); - - return NULL; - } - i915->drm.pdev = pdev; - drmm_add_final_kfree(&i915->drm, i915); i915_params_copy(&i915->params, &i915_modparams); @@ -224,7 +218,15 @@ err_drv: intel_gt_driver_late_release(&i915->gt); intel_memory_regions_driver_release(i915); drm_mode_config_cleanup(&i915->drm); - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return NULL; } + +void mock_destroy_device(struct drm_i915_private *i915) +{ + struct device *dev = i915->drm.dev; + + devres_release_group(dev, NULL); + put_device(dev); +} diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.h b/drivers/gpu/drm/i915/selftests/mock_gem_device.h index b5dc4e394555..953cfe4fab34 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.h +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.h @@ -7,4 +7,6 @@ struct drm_i915_private; struct drm_i915_private *mock_gem_device(void); void mock_device_flush(struct drm_i915_private *i915); +void mock_destroy_device(struct drm_i915_private *i915); + #endif /* !__MOCK_GEM_DEVICE_H__ */ diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig index 207bf7409dfb..6231048aa5aa 100644 --- a/drivers/gpu/drm/imx/Kconfig +++ b/drivers/gpu/drm/imx/Kconfig @@ -39,3 +39,5 @@ config DRM_IMX_HDMI depends on DRM_IMX help Choose this if you want to use HDMI on i.MX6. + +source "drivers/gpu/drm/imx/dcss/Kconfig" diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile index 21cdcc2faabc..b644deffe948 100644 --- a/drivers/gpu/drm/imx/Makefile +++ b/drivers/gpu/drm/imx/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o +obj-$(CONFIG_DRM_IMX_DCSS) += dcss/ diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig new file mode 100644 index 000000000000..2b17a964ff05 --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/Kconfig @@ -0,0 +1,9 @@ +config DRM_IMX_DCSS + tristate "i.MX8MQ DCSS" + select IMX_IRQSTEER + select DRM_KMS_CMA_HELPER + select VIDEOMODE_HELPERS + depends on DRM && ARCH_MXC && ARM64 + help + Choose this if you have a NXP i.MX8MQ based system and want to use the + Display Controller Subsystem. This option enables DCSS support. diff --git a/drivers/gpu/drm/imx/dcss/Makefile b/drivers/gpu/drm/imx/dcss/Makefile new file mode 100644 index 000000000000..8c7c8da42792 --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/Makefile @@ -0,0 +1,6 @@ +imx-dcss-objs := dcss-drv.o dcss-dev.o dcss-blkctl.o dcss-ctxld.o dcss-dtg.o \ + dcss-ss.o dcss-dpr.o dcss-scaler.o dcss-kms.o dcss-crtc.o \ + dcss-plane.o + +obj-$(CONFIG_DRM_IMX_DCSS) += imx-dcss.o + diff --git a/drivers/gpu/drm/imx/dcss/dcss-blkctl.c b/drivers/gpu/drm/imx/dcss/dcss-blkctl.c new file mode 100644 index 000000000000..c9b54bb2692d --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-blkctl.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <linux/device.h> +#include <linux/of.h> +#include <linux/slab.h> + +#include "dcss-dev.h" + +#define DCSS_BLKCTL_RESET_CTRL 0x00 +#define B_CLK_RESETN BIT(0) +#define APB_CLK_RESETN BIT(1) +#define P_CLK_RESETN BIT(2) +#define RTR_CLK_RESETN BIT(4) +#define DCSS_BLKCTL_CONTROL0 0x10 +#define HDMI_MIPI_CLK_SEL BIT(0) +#define DISPMIX_REFCLK_SEL_POS 4 +#define DISPMIX_REFCLK_SEL_MASK GENMASK(5, 4) +#define DISPMIX_PIXCLK_SEL BIT(8) +#define HDMI_SRC_SECURE_EN BIT(16) + +struct dcss_blkctl { + struct dcss_dev *dcss; + void __iomem *base_reg; +}; + +void dcss_blkctl_cfg(struct dcss_blkctl *blkctl) +{ + if (blkctl->dcss->hdmi_output) + dcss_writel(0, blkctl->base_reg + DCSS_BLKCTL_CONTROL0); + else + dcss_writel(DISPMIX_PIXCLK_SEL, + blkctl->base_reg + DCSS_BLKCTL_CONTROL0); + + dcss_set(B_CLK_RESETN | APB_CLK_RESETN | P_CLK_RESETN | RTR_CLK_RESETN, + blkctl->base_reg + DCSS_BLKCTL_RESET_CTRL); +} + +int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base) +{ + struct dcss_blkctl *blkctl; + + blkctl = kzalloc(sizeof(*blkctl), GFP_KERNEL); + if (!blkctl) + return -ENOMEM; + + blkctl->base_reg = ioremap(blkctl_base, SZ_4K); + if (!blkctl->base_reg) { + dev_err(dcss->dev, "unable to remap BLK CTRL base\n"); + kfree(blkctl); + return -ENOMEM; + } + + dcss->blkctl = blkctl; + blkctl->dcss = dcss; + + dcss_blkctl_cfg(blkctl); + + return 0; +} + +void dcss_blkctl_exit(struct dcss_blkctl *blkctl) +{ + if (blkctl->base_reg) + iounmap(blkctl->base_reg); + + kfree(blkctl); +} diff --git a/drivers/gpu/drm/imx/dcss/dcss-crtc.c b/drivers/gpu/drm/imx/dcss/dcss-crtc.c new file mode 100644 index 000000000000..36abff0890b2 --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-crtc.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_vblank.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include "dcss-dev.h" +#include "dcss-kms.h" + +static int dcss_enable_vblank(struct drm_crtc *crtc) +{ + struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc, + base); + struct dcss_dev *dcss = crtc->dev->dev_private; + + dcss_dtg_vblank_irq_enable(dcss->dtg, true); + + dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, true); + + enable_irq(dcss_crtc->irq); + + return 0; +} + +static void dcss_disable_vblank(struct drm_crtc *crtc) +{ + struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc, + base); + struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private; + + disable_irq_nosync(dcss_crtc->irq); + + dcss_dtg_vblank_irq_enable(dcss->dtg, false); + + if (dcss_crtc->disable_ctxld_kick_irq) + dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, false); +} + +static const struct drm_crtc_funcs dcss_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = drm_crtc_cleanup, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .enable_vblank = dcss_enable_vblank, + .disable_vblank = dcss_disable_vblank, +}; + +static void dcss_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + drm_crtc_vblank_on(crtc); +} + +static void dcss_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc, + base); + struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private; + + spin_lock_irq(&crtc->dev->event_lock); + if (crtc->state->event) { + WARN_ON(drm_crtc_vblank_get(crtc)); + drm_crtc_arm_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } + spin_unlock_irq(&crtc->dev->event_lock); + + if (dcss_dtg_is_enabled(dcss->dtg)) + dcss_ctxld_enable(dcss->ctxld); +} + +static void dcss_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc, + base); + struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct drm_display_mode *old_mode = &old_crtc_state->adjusted_mode; + struct videomode vm; + + drm_display_mode_to_videomode(mode, &vm); + + pm_runtime_get_sync(dcss->dev); + + vm.pixelclock = mode->crtc_clock * 1000; + + dcss_ss_subsam_set(dcss->ss); + dcss_dtg_css_set(dcss->dtg); + + if (!drm_mode_equal(mode, old_mode) || !old_crtc_state->active) { + dcss_dtg_sync_set(dcss->dtg, &vm); + dcss_ss_sync_set(dcss->ss, &vm, + mode->flags & DRM_MODE_FLAG_PHSYNC, + mode->flags & DRM_MODE_FLAG_PVSYNC); + } + + dcss_enable_dtg_and_ss(dcss); + + dcss_ctxld_enable(dcss->ctxld); + + /* Allow CTXLD kick interrupt to be disabled when VBLANK is disabled. */ + dcss_crtc->disable_ctxld_kick_irq = true; +} + +static void dcss_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc, + base); + struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct drm_display_mode *old_mode = &old_crtc_state->adjusted_mode; + + drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false); + + spin_lock_irq(&crtc->dev->event_lock); + if (crtc->state->event) { + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } + spin_unlock_irq(&crtc->dev->event_lock); + + dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, true); + + reinit_completion(&dcss->disable_completion); + + dcss_disable_dtg_and_ss(dcss); + + dcss_ctxld_enable(dcss->ctxld); + + if (!drm_mode_equal(mode, old_mode) || !crtc->state->active) + if (!wait_for_completion_timeout(&dcss->disable_completion, + msecs_to_jiffies(100))) + dev_err(dcss->dev, "Shutting off DTG timed out.\n"); + + /* + * Do not shut off CTXLD kick interrupt when shutting VBLANK off. It + * will be needed to commit the last changes, before going to suspend. + */ + dcss_crtc->disable_ctxld_kick_irq = false; + + drm_crtc_vblank_off(crtc); + + pm_runtime_mark_last_busy(dcss->dev); + pm_runtime_put_autosuspend(dcss->dev); +} + +static const struct drm_crtc_helper_funcs dcss_helper_funcs = { + .atomic_begin = dcss_crtc_atomic_begin, + .atomic_flush = dcss_crtc_atomic_flush, + .atomic_enable = dcss_crtc_atomic_enable, + .atomic_disable = dcss_crtc_atomic_disable, +}; + +static irqreturn_t dcss_crtc_irq_handler(int irq, void *dev_id) +{ + struct dcss_crtc *dcss_crtc = dev_id; + struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private; + + if (!dcss_dtg_vblank_irq_valid(dcss->dtg)) + return IRQ_NONE; + + if (dcss_ctxld_is_flushed(dcss->ctxld)) + drm_crtc_handle_vblank(&dcss_crtc->base); + + dcss_dtg_vblank_irq_clear(dcss->dtg); + + return IRQ_HANDLED; +} + +int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm) +{ + struct dcss_dev *dcss = drm->dev_private; + struct platform_device *pdev = to_platform_device(dcss->dev); + int ret; + + crtc->plane[0] = dcss_plane_init(drm, drm_crtc_mask(&crtc->base), + DRM_PLANE_TYPE_PRIMARY, 0); + if (IS_ERR(crtc->plane[0])) + return PTR_ERR(crtc->plane[0]); + + crtc->base.port = dcss->of_port; + + drm_crtc_helper_add(&crtc->base, &dcss_helper_funcs); + ret = drm_crtc_init_with_planes(drm, &crtc->base, &crtc->plane[0]->base, + NULL, &dcss_crtc_funcs, NULL); + if (ret) { + dev_err(dcss->dev, "failed to init crtc\n"); + return ret; + } + + crtc->irq = platform_get_irq_byname(pdev, "vblank"); + if (crtc->irq < 0) + return crtc->irq; + + ret = request_irq(crtc->irq, dcss_crtc_irq_handler, + 0, "dcss_drm", crtc); + if (ret) { + dev_err(dcss->dev, "irq request failed with %d.\n", ret); + return ret; + } + + disable_irq(crtc->irq); + + return 0; +} + +void dcss_crtc_deinit(struct dcss_crtc *crtc, struct drm_device *drm) +{ + free_irq(crtc->irq, crtc); +} diff --git a/drivers/gpu/drm/imx/dcss/dcss-ctxld.c b/drivers/gpu/drm/imx/dcss/dcss-ctxld.c new file mode 100644 index 000000000000..3a84cb3209c4 --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-ctxld.c @@ -0,0 +1,424 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include "dcss-dev.h" + +#define DCSS_CTXLD_CONTROL_STATUS 0x0 +#define CTXLD_ENABLE BIT(0) +#define ARB_SEL BIT(1) +#define RD_ERR_EN BIT(2) +#define DB_COMP_EN BIT(3) +#define SB_HP_COMP_EN BIT(4) +#define SB_LP_COMP_EN BIT(5) +#define DB_PEND_SB_REC_EN BIT(6) +#define SB_PEND_DISP_ACTIVE_EN BIT(7) +#define AHB_ERR_EN BIT(8) +#define RD_ERR BIT(16) +#define DB_COMP BIT(17) +#define SB_HP_COMP BIT(18) +#define SB_LP_COMP BIT(19) +#define DB_PEND_SB_REC BIT(20) +#define SB_PEND_DISP_ACTIVE BIT(21) +#define AHB_ERR BIT(22) +#define DCSS_CTXLD_DB_BASE_ADDR 0x10 +#define DCSS_CTXLD_DB_COUNT 0x14 +#define DCSS_CTXLD_SB_BASE_ADDR 0x18 +#define DCSS_CTXLD_SB_COUNT 0x1C +#define SB_HP_COUNT_POS 0 +#define SB_HP_COUNT_MASK 0xffff +#define SB_LP_COUNT_POS 16 +#define SB_LP_COUNT_MASK 0xffff0000 +#define DCSS_AHB_ERR_ADDR 0x20 + +#define CTXLD_IRQ_COMPLETION (DB_COMP | SB_HP_COMP | SB_LP_COMP) +#define CTXLD_IRQ_ERROR (RD_ERR | DB_PEND_SB_REC | AHB_ERR) + +/* The following sizes are in context loader entries, 8 bytes each. */ +#define CTXLD_DB_CTX_ENTRIES 1024 /* max 65536 */ +#define CTXLD_SB_LP_CTX_ENTRIES 10240 /* max 65536 */ +#define CTXLD_SB_HP_CTX_ENTRIES 20000 /* max 65536 */ +#define CTXLD_SB_CTX_ENTRIES (CTXLD_SB_LP_CTX_ENTRIES + \ + CTXLD_SB_HP_CTX_ENTRIES) + +/* Sizes, in entries, of the DB, SB_HP and SB_LP context regions. */ +static u16 dcss_ctxld_ctx_size[3] = { + CTXLD_DB_CTX_ENTRIES, + CTXLD_SB_HP_CTX_ENTRIES, + CTXLD_SB_LP_CTX_ENTRIES +}; + +/* this represents an entry in the context loader map */ +struct dcss_ctxld_item { + u32 val; + u32 ofs; +}; + +#define CTX_ITEM_SIZE sizeof(struct dcss_ctxld_item) + +struct dcss_ctxld { + struct device *dev; + void __iomem *ctxld_reg; + int irq; + bool irq_en; + + struct dcss_ctxld_item *db[2]; + struct dcss_ctxld_item *sb_hp[2]; + struct dcss_ctxld_item *sb_lp[2]; + + dma_addr_t db_paddr[2]; + dma_addr_t sb_paddr[2]; + + u16 ctx_size[2][3]; /* holds the sizes of DB, SB_HP and SB_LP ctx */ + u8 current_ctx; + + bool in_use; + bool armed; + + spinlock_t lock; /* protects concurent access to private data */ +}; + +static irqreturn_t dcss_ctxld_irq_handler(int irq, void *data) +{ + struct dcss_ctxld *ctxld = data; + struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev); + u32 irq_status; + + irq_status = dcss_readl(ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS); + + if (irq_status & CTXLD_IRQ_COMPLETION && + !(irq_status & CTXLD_ENABLE) && ctxld->in_use) { + ctxld->in_use = false; + + if (dcss && dcss->disable_callback) + dcss->disable_callback(dcss); + } else if (irq_status & CTXLD_IRQ_ERROR) { + /* + * Except for throwing an error message and clearing the status + * register, there's not much we can do here. + */ + dev_err(ctxld->dev, "ctxld: error encountered: %08x\n", + irq_status); + dev_err(ctxld->dev, "ctxld: db=%d, sb_hp=%d, sb_lp=%d\n", + ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_DB], + ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_HP], + ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_LP]); + } + + dcss_clr(irq_status & (CTXLD_IRQ_ERROR | CTXLD_IRQ_COMPLETION), + ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS); + + return IRQ_HANDLED; +} + +static int dcss_ctxld_irq_config(struct dcss_ctxld *ctxld, + struct platform_device *pdev) +{ + int ret; + + ctxld->irq = platform_get_irq_byname(pdev, "ctxld"); + if (ctxld->irq < 0) + return ctxld->irq; + + ret = request_irq(ctxld->irq, dcss_ctxld_irq_handler, + 0, "dcss_ctxld", ctxld); + if (ret) { + dev_err(ctxld->dev, "ctxld: irq request failed.\n"); + return ret; + } + + ctxld->irq_en = true; + + return 0; +} + +static void dcss_ctxld_hw_cfg(struct dcss_ctxld *ctxld) +{ + dcss_writel(RD_ERR_EN | SB_HP_COMP_EN | + DB_PEND_SB_REC_EN | AHB_ERR_EN | RD_ERR | AHB_ERR, + ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS); +} + +static void dcss_ctxld_free_ctx(struct dcss_ctxld *ctxld) +{ + struct dcss_ctxld_item *ctx; + int i; + + for (i = 0; i < 2; i++) { + if (ctxld->db[i]) { + dma_free_coherent(ctxld->dev, + CTXLD_DB_CTX_ENTRIES * sizeof(*ctx), + ctxld->db[i], ctxld->db_paddr[i]); + ctxld->db[i] = NULL; + ctxld->db_paddr[i] = 0; + } + + if (ctxld->sb_hp[i]) { + dma_free_coherent(ctxld->dev, + CTXLD_SB_CTX_ENTRIES * sizeof(*ctx), + ctxld->sb_hp[i], ctxld->sb_paddr[i]); + ctxld->sb_hp[i] = NULL; + ctxld->sb_paddr[i] = 0; + } + } +} + +static int dcss_ctxld_alloc_ctx(struct dcss_ctxld *ctxld) +{ + struct dcss_ctxld_item *ctx; + int i; + + for (i = 0; i < 2; i++) { + ctx = dma_alloc_coherent(ctxld->dev, + CTXLD_DB_CTX_ENTRIES * sizeof(*ctx), + &ctxld->db_paddr[i], GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctxld->db[i] = ctx; + + ctx = dma_alloc_coherent(ctxld->dev, + CTXLD_SB_CTX_ENTRIES * sizeof(*ctx), + &ctxld->sb_paddr[i], GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctxld->sb_hp[i] = ctx; + ctxld->sb_lp[i] = ctx + CTXLD_SB_HP_CTX_ENTRIES; + } + + return 0; +} + +int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base) +{ + struct dcss_ctxld *ctxld; + int ret; + + ctxld = kzalloc(sizeof(*ctxld), GFP_KERNEL); + if (!ctxld) + return -ENOMEM; + + dcss->ctxld = ctxld; + ctxld->dev = dcss->dev; + + spin_lock_init(&ctxld->lock); + + ret = dcss_ctxld_alloc_ctx(ctxld); + if (ret) { + dev_err(dcss->dev, "ctxld: cannot allocate context memory.\n"); + goto err; + } + + ctxld->ctxld_reg = ioremap(ctxld_base, SZ_4K); + if (!ctxld->ctxld_reg) { + dev_err(dcss->dev, "ctxld: unable to remap ctxld base\n"); + ret = -ENOMEM; + goto err; + } + + ret = dcss_ctxld_irq_config(ctxld, to_platform_device(dcss->dev)); + if (ret) + goto err_irq; + + dcss_ctxld_hw_cfg(ctxld); + + return 0; + +err_irq: + iounmap(ctxld->ctxld_reg); + +err: + dcss_ctxld_free_ctx(ctxld); + kfree(ctxld); + + return ret; +} + +void dcss_ctxld_exit(struct dcss_ctxld *ctxld) +{ + free_irq(ctxld->irq, ctxld); + + if (ctxld->ctxld_reg) + iounmap(ctxld->ctxld_reg); + + dcss_ctxld_free_ctx(ctxld); + kfree(ctxld); +} + +static int dcss_ctxld_enable_locked(struct dcss_ctxld *ctxld) +{ + int curr_ctx = ctxld->current_ctx; + u32 db_base, sb_base, sb_count; + u32 sb_hp_cnt, sb_lp_cnt, db_cnt; + struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev); + + if (!dcss) + return 0; + + dcss_dpr_write_sysctrl(dcss->dpr); + + dcss_scaler_write_sclctrl(dcss->scaler); + + sb_hp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_HP]; + sb_lp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_LP]; + db_cnt = ctxld->ctx_size[curr_ctx][CTX_DB]; + + /* make sure SB_LP context area comes after SB_HP */ + if (sb_lp_cnt && + ctxld->sb_lp[curr_ctx] != ctxld->sb_hp[curr_ctx] + sb_hp_cnt) { + struct dcss_ctxld_item *sb_lp_adjusted; + + sb_lp_adjusted = ctxld->sb_hp[curr_ctx] + sb_hp_cnt; + + memcpy(sb_lp_adjusted, ctxld->sb_lp[curr_ctx], + sb_lp_cnt * CTX_ITEM_SIZE); + } + + db_base = db_cnt ? ctxld->db_paddr[curr_ctx] : 0; + + dcss_writel(db_base, ctxld->ctxld_reg + DCSS_CTXLD_DB_BASE_ADDR); + dcss_writel(db_cnt, ctxld->ctxld_reg + DCSS_CTXLD_DB_COUNT); + + if (sb_hp_cnt) + sb_count = ((sb_hp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK) | + ((sb_lp_cnt << SB_LP_COUNT_POS) & SB_LP_COUNT_MASK); + else + sb_count = (sb_lp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK; + + sb_base = sb_count ? ctxld->sb_paddr[curr_ctx] : 0; + + dcss_writel(sb_base, ctxld->ctxld_reg + DCSS_CTXLD_SB_BASE_ADDR); + dcss_writel(sb_count, ctxld->ctxld_reg + DCSS_CTXLD_SB_COUNT); + + /* enable the context loader */ + dcss_set(CTXLD_ENABLE, ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS); + + ctxld->in_use = true; + + /* + * Toggle the current context to the alternate one so that any updates + * in the modules' settings take place there. + */ + ctxld->current_ctx ^= 1; + + ctxld->ctx_size[ctxld->current_ctx][CTX_DB] = 0; + ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] = 0; + ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] = 0; + + return 0; +} + +int dcss_ctxld_enable(struct dcss_ctxld *ctxld) +{ + spin_lock_irq(&ctxld->lock); + ctxld->armed = true; + spin_unlock_irq(&ctxld->lock); + + return 0; +} + +void dcss_ctxld_kick(struct dcss_ctxld *ctxld) +{ + unsigned long flags; + + spin_lock_irqsave(&ctxld->lock, flags); + if (ctxld->armed && !ctxld->in_use) { + ctxld->armed = false; + dcss_ctxld_enable_locked(ctxld); + } + spin_unlock_irqrestore(&ctxld->lock, flags); +} + +void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctxld, u32 ctx_id, u32 val, + u32 reg_ofs) +{ + int curr_ctx = ctxld->current_ctx; + struct dcss_ctxld_item *ctx[] = { + [CTX_DB] = ctxld->db[curr_ctx], + [CTX_SB_HP] = ctxld->sb_hp[curr_ctx], + [CTX_SB_LP] = ctxld->sb_lp[curr_ctx] + }; + int item_idx = ctxld->ctx_size[curr_ctx][ctx_id]; + + if (item_idx + 1 > dcss_ctxld_ctx_size[ctx_id]) { + WARN_ON(1); + return; + } + + ctx[ctx_id][item_idx].val = val; + ctx[ctx_id][item_idx].ofs = reg_ofs; + ctxld->ctx_size[curr_ctx][ctx_id] += 1; +} + +void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id, + u32 val, u32 reg_ofs) +{ + spin_lock_irq(&ctxld->lock); + dcss_ctxld_write_irqsafe(ctxld, ctx_id, val, reg_ofs); + spin_unlock_irq(&ctxld->lock); +} + +bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld) +{ + return ctxld->ctx_size[ctxld->current_ctx][CTX_DB] == 0 && + ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] == 0 && + ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] == 0; +} + +int dcss_ctxld_resume(struct dcss_ctxld *ctxld) +{ + dcss_ctxld_hw_cfg(ctxld); + + if (!ctxld->irq_en) { + enable_irq(ctxld->irq); + ctxld->irq_en = true; + } + + return 0; +} + +int dcss_ctxld_suspend(struct dcss_ctxld *ctxld) +{ + int ret = 0; + unsigned long timeout = jiffies + msecs_to_jiffies(500); + + if (!dcss_ctxld_is_flushed(ctxld)) { + dcss_ctxld_kick(ctxld); + + while (!time_after(jiffies, timeout) && ctxld->in_use) + msleep(20); + + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + } + + spin_lock_irq(&ctxld->lock); + + if (ctxld->irq_en) { + disable_irq_nosync(ctxld->irq); + ctxld->irq_en = false; + } + + /* reset context region and sizes */ + ctxld->current_ctx = 0; + ctxld->ctx_size[0][CTX_DB] = 0; + ctxld->ctx_size[0][CTX_SB_HP] = 0; + ctxld->ctx_size[0][CTX_SB_LP] = 0; + + spin_unlock_irq(&ctxld->lock); + + return ret; +} + +void dcss_ctxld_assert_locked(struct dcss_ctxld *ctxld) +{ + lockdep_assert_held(&ctxld->lock); +} diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.c b/drivers/gpu/drm/imx/dcss/dcss-dev.c new file mode 100644 index 000000000000..c849533ca83e --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-dev.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <linux/clk.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_device.h> +#include <drm/drm_modeset_helper.h> + +#include "dcss-dev.h" +#include "dcss-kms.h" + +static void dcss_clocks_enable(struct dcss_dev *dcss) +{ + clk_prepare_enable(dcss->axi_clk); + clk_prepare_enable(dcss->apb_clk); + clk_prepare_enable(dcss->rtrm_clk); + clk_prepare_enable(dcss->dtrc_clk); + clk_prepare_enable(dcss->pix_clk); +} + +static void dcss_clocks_disable(struct dcss_dev *dcss) +{ + clk_disable_unprepare(dcss->pix_clk); + clk_disable_unprepare(dcss->dtrc_clk); + clk_disable_unprepare(dcss->rtrm_clk); + clk_disable_unprepare(dcss->apb_clk); + clk_disable_unprepare(dcss->axi_clk); +} + +static void dcss_disable_dtg_and_ss_cb(void *data) +{ + struct dcss_dev *dcss = data; + + dcss->disable_callback = NULL; + + dcss_ss_shutoff(dcss->ss); + dcss_dtg_shutoff(dcss->dtg); + + complete(&dcss->disable_completion); +} + +void dcss_disable_dtg_and_ss(struct dcss_dev *dcss) +{ + dcss->disable_callback = dcss_disable_dtg_and_ss_cb; +} + +void dcss_enable_dtg_and_ss(struct dcss_dev *dcss) +{ + if (dcss->disable_callback) + dcss->disable_callback = NULL; + + dcss_dtg_enable(dcss->dtg); + dcss_ss_enable(dcss->ss); +} + +static int dcss_submodules_init(struct dcss_dev *dcss) +{ + int ret = 0; + u32 base_addr = dcss->start_addr; + const struct dcss_type_data *devtype = dcss->devtype; + + dcss_clocks_enable(dcss); + + ret = dcss_blkctl_init(dcss, base_addr + devtype->blkctl_ofs); + if (ret) + return ret; + + ret = dcss_ctxld_init(dcss, base_addr + devtype->ctxld_ofs); + if (ret) + goto ctxld_err; + + ret = dcss_dtg_init(dcss, base_addr + devtype->dtg_ofs); + if (ret) + goto dtg_err; + + ret = dcss_ss_init(dcss, base_addr + devtype->ss_ofs); + if (ret) + goto ss_err; + + ret = dcss_dpr_init(dcss, base_addr + devtype->dpr_ofs); + if (ret) + goto dpr_err; + + ret = dcss_scaler_init(dcss, base_addr + devtype->scaler_ofs); + if (ret) + goto scaler_err; + + dcss_clocks_disable(dcss); + + return 0; + +scaler_err: + dcss_dpr_exit(dcss->dpr); + +dpr_err: + dcss_ss_exit(dcss->ss); + +ss_err: + dcss_dtg_exit(dcss->dtg); + +dtg_err: + dcss_ctxld_exit(dcss->ctxld); + +ctxld_err: + dcss_blkctl_exit(dcss->blkctl); + + dcss_clocks_disable(dcss); + + return ret; +} + +static void dcss_submodules_stop(struct dcss_dev *dcss) +{ + dcss_clocks_enable(dcss); + dcss_scaler_exit(dcss->scaler); + dcss_dpr_exit(dcss->dpr); + dcss_ss_exit(dcss->ss); + dcss_dtg_exit(dcss->dtg); + dcss_ctxld_exit(dcss->ctxld); + dcss_blkctl_exit(dcss->blkctl); + dcss_clocks_disable(dcss); +} + +static int dcss_clks_init(struct dcss_dev *dcss) +{ + int i; + struct { + const char *id; + struct clk **clk; + } clks[] = { + {"apb", &dcss->apb_clk}, + {"axi", &dcss->axi_clk}, + {"pix", &dcss->pix_clk}, + {"rtrm", &dcss->rtrm_clk}, + {"dtrc", &dcss->dtrc_clk}, + }; + + for (i = 0; i < ARRAY_SIZE(clks); i++) { + *clks[i].clk = devm_clk_get(dcss->dev, clks[i].id); + if (IS_ERR(*clks[i].clk)) { + dev_err(dcss->dev, "failed to get %s clock\n", + clks[i].id); + return PTR_ERR(*clks[i].clk); + } + } + + return 0; +} + +static void dcss_clks_release(struct dcss_dev *dcss) +{ + devm_clk_put(dcss->dev, dcss->dtrc_clk); + devm_clk_put(dcss->dev, dcss->rtrm_clk); + devm_clk_put(dcss->dev, dcss->pix_clk); + devm_clk_put(dcss->dev, dcss->axi_clk); + devm_clk_put(dcss->dev, dcss->apb_clk); +} + +struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output) +{ + struct platform_device *pdev = to_platform_device(dev); + int ret; + struct resource *res; + struct dcss_dev *dcss; + const struct dcss_type_data *devtype; + + devtype = of_device_get_match_data(dev); + if (!devtype) { + dev_err(dev, "no device match found\n"); + return ERR_PTR(-ENODEV); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "cannot get memory resource\n"); + return ERR_PTR(-EINVAL); + } + + dcss = kzalloc(sizeof(*dcss), GFP_KERNEL); + if (!dcss) + return ERR_PTR(-ENOMEM); + + dcss->dev = dev; + dcss->devtype = devtype; + dcss->hdmi_output = hdmi_output; + + ret = dcss_clks_init(dcss); + if (ret) { + dev_err(dev, "clocks initialization failed\n"); + goto err; + } + + dcss->of_port = of_graph_get_port_by_id(dev->of_node, 0); + if (!dcss->of_port) { + dev_err(dev, "no port@0 node in %s\n", dev->of_node->full_name); + ret = -ENODEV; + goto clks_err; + } + + dcss->start_addr = res->start; + + ret = dcss_submodules_init(dcss); + if (ret) { + dev_err(dev, "submodules initialization failed\n"); + goto clks_err; + } + + init_completion(&dcss->disable_completion); + + pm_runtime_set_autosuspend_delay(dev, 100); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_suspended(dev); + pm_runtime_allow(dev); + pm_runtime_enable(dev); + + return dcss; + +clks_err: + dcss_clks_release(dcss); + +err: + kfree(dcss); + + return ERR_PTR(ret); +} + +void dcss_dev_destroy(struct dcss_dev *dcss) +{ + if (!pm_runtime_suspended(dcss->dev)) { + dcss_ctxld_suspend(dcss->ctxld); + dcss_clocks_disable(dcss); + } + + pm_runtime_disable(dcss->dev); + + dcss_submodules_stop(dcss); + + dcss_clks_release(dcss); + + kfree(dcss); +} + +#ifdef CONFIG_PM_SLEEP +int dcss_dev_suspend(struct device *dev) +{ + struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev); + struct drm_device *ddev = dcss_drv_dev_to_drm(dev); + struct dcss_kms_dev *kms = container_of(ddev, struct dcss_kms_dev, base); + int ret; + + drm_bridge_connector_disable_hpd(kms->connector); + + drm_mode_config_helper_suspend(ddev); + + if (pm_runtime_suspended(dev)) + return 0; + + ret = dcss_ctxld_suspend(dcss->ctxld); + if (ret) + return ret; + + dcss_clocks_disable(dcss); + + return 0; +} + +int dcss_dev_resume(struct device *dev) +{ + struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev); + struct drm_device *ddev = dcss_drv_dev_to_drm(dev); + struct dcss_kms_dev *kms = container_of(ddev, struct dcss_kms_dev, base); + + if (pm_runtime_suspended(dev)) { + drm_mode_config_helper_resume(ddev); + return 0; + } + + dcss_clocks_enable(dcss); + + dcss_blkctl_cfg(dcss->blkctl); + + dcss_ctxld_resume(dcss->ctxld); + + drm_mode_config_helper_resume(ddev); + + drm_bridge_connector_enable_hpd(kms->connector); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +int dcss_dev_runtime_suspend(struct device *dev) +{ + struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev); + int ret; + + ret = dcss_ctxld_suspend(dcss->ctxld); + if (ret) + return ret; + + dcss_clocks_disable(dcss); + + return 0; +} + +int dcss_dev_runtime_resume(struct device *dev) +{ + struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev); + + dcss_clocks_enable(dcss); + + dcss_blkctl_cfg(dcss->blkctl); + + dcss_ctxld_resume(dcss->ctxld); + + return 0; +} +#endif /* CONFIG_PM */ diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.h b/drivers/gpu/drm/imx/dcss/dcss-dev.h new file mode 100644 index 000000000000..c642ae17837f --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-dev.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2019 NXP. + */ + +#ifndef __DCSS_PRV_H__ +#define __DCSS_PRV_H__ + +#include <drm/drm_fourcc.h> +#include <linux/io.h> +#include <video/videomode.h> + +#define SET 0x04 +#define CLR 0x08 +#define TGL 0x0C + +#define dcss_writel(v, c) writel((v), (c)) +#define dcss_readl(c) readl(c) +#define dcss_set(v, c) writel((v), (c) + SET) +#define dcss_clr(v, c) writel((v), (c) + CLR) +#define dcss_toggle(v, c) writel((v), (c) + TGL) + +static inline void dcss_update(u32 v, u32 m, void __iomem *c) +{ + writel((readl(c) & ~(m)) | (v), (c)); +} + +#define DCSS_DBG_REG(reg) {.name = #reg, .ofs = reg} + +enum { + DCSS_IMX8MQ = 0, +}; + +struct dcss_type_data { + const char *name; + u32 blkctl_ofs; + u32 ctxld_ofs; + u32 rdsrc_ofs; + u32 wrscl_ofs; + u32 dtg_ofs; + u32 scaler_ofs; + u32 ss_ofs; + u32 dpr_ofs; + u32 dtrc_ofs; + u32 dec400d_ofs; + u32 hdr10_ofs; +}; + +struct dcss_debug_reg { + char *name; + u32 ofs; +}; + +enum dcss_ctxld_ctx_type { + CTX_DB, + CTX_SB_HP, /* high-priority */ + CTX_SB_LP, /* low-priority */ +}; + +struct dcss_dev { + struct device *dev; + const struct dcss_type_data *devtype; + struct device_node *of_port; + + u32 start_addr; + + struct dcss_blkctl *blkctl; + struct dcss_ctxld *ctxld; + struct dcss_dpr *dpr; + struct dcss_dtg *dtg; + struct dcss_ss *ss; + struct dcss_hdr10 *hdr10; + struct dcss_scaler *scaler; + struct dcss_dtrc *dtrc; + struct dcss_dec400d *dec400d; + struct dcss_wrscl *wrscl; + struct dcss_rdsrc *rdsrc; + + struct clk *apb_clk; + struct clk *axi_clk; + struct clk *pix_clk; + struct clk *rtrm_clk; + struct clk *dtrc_clk; + struct clk *pll_src_clk; + struct clk *pll_phy_ref_clk; + + bool hdmi_output; + + void (*disable_callback)(void *data); + struct completion disable_completion; +}; + +struct dcss_dev *dcss_drv_dev_to_dcss(struct device *dev); +struct drm_device *dcss_drv_dev_to_drm(struct device *dev); +struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output); +void dcss_dev_destroy(struct dcss_dev *dcss); +int dcss_dev_runtime_suspend(struct device *dev); +int dcss_dev_runtime_resume(struct device *dev); +int dcss_dev_suspend(struct device *dev); +int dcss_dev_resume(struct device *dev); +void dcss_enable_dtg_and_ss(struct dcss_dev *dcss); +void dcss_disable_dtg_and_ss(struct dcss_dev *dcss); + +/* BLKCTL */ +int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base); +void dcss_blkctl_cfg(struct dcss_blkctl *blkctl); +void dcss_blkctl_exit(struct dcss_blkctl *blkctl); + +/* CTXLD */ +int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base); +void dcss_ctxld_exit(struct dcss_ctxld *ctxld); +void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id, + u32 val, u32 reg_idx); +int dcss_ctxld_resume(struct dcss_ctxld *dcss_ctxld); +int dcss_ctxld_suspend(struct dcss_ctxld *dcss_ctxld); +void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctlxd, u32 ctx_id, u32 val, + u32 reg_ofs); +void dcss_ctxld_kick(struct dcss_ctxld *ctxld); +bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld); +int dcss_ctxld_enable(struct dcss_ctxld *ctxld); +void dcss_ctxld_register_completion(struct dcss_ctxld *ctxld, + struct completion *dis_completion); +void dcss_ctxld_assert_locked(struct dcss_ctxld *ctxld); + +/* DPR */ +int dcss_dpr_init(struct dcss_dev *dcss, unsigned long dpr_base); +void dcss_dpr_exit(struct dcss_dpr *dpr); +void dcss_dpr_write_sysctrl(struct dcss_dpr *dpr); +void dcss_dpr_set_res(struct dcss_dpr *dpr, int ch_num, u32 xres, u32 yres); +void dcss_dpr_addr_set(struct dcss_dpr *dpr, int ch_num, u32 luma_base_addr, + u32 chroma_base_addr, u16 pitch); +void dcss_dpr_enable(struct dcss_dpr *dpr, int ch_num, bool en); +void dcss_dpr_format_set(struct dcss_dpr *dpr, int ch_num, + const struct drm_format_info *format, u64 modifier); +void dcss_dpr_set_rotation(struct dcss_dpr *dpr, int ch_num, u32 rotation); + +/* DTG */ +int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base); +void dcss_dtg_exit(struct dcss_dtg *dtg); +bool dcss_dtg_vblank_irq_valid(struct dcss_dtg *dtg); +void dcss_dtg_vblank_irq_enable(struct dcss_dtg *dtg, bool en); +void dcss_dtg_vblank_irq_clear(struct dcss_dtg *dtg); +void dcss_dtg_sync_set(struct dcss_dtg *dtg, struct videomode *vm); +void dcss_dtg_css_set(struct dcss_dtg *dtg); +void dcss_dtg_enable(struct dcss_dtg *dtg); +void dcss_dtg_shutoff(struct dcss_dtg *dtg); +bool dcss_dtg_is_enabled(struct dcss_dtg *dtg); +void dcss_dtg_ctxld_kick_irq_enable(struct dcss_dtg *dtg, bool en); +bool dcss_dtg_global_alpha_changed(struct dcss_dtg *dtg, int ch_num, int alpha); +void dcss_dtg_plane_alpha_set(struct dcss_dtg *dtg, int ch_num, + const struct drm_format_info *format, int alpha); +void dcss_dtg_plane_pos_set(struct dcss_dtg *dtg, int ch_num, + int px, int py, int pw, int ph); +void dcss_dtg_ch_enable(struct dcss_dtg *dtg, int ch_num, bool en); + +/* SUBSAM */ +int dcss_ss_init(struct dcss_dev *dcss, unsigned long subsam_base); +void dcss_ss_exit(struct dcss_ss *ss); +void dcss_ss_enable(struct dcss_ss *ss); +void dcss_ss_shutoff(struct dcss_ss *ss); +void dcss_ss_subsam_set(struct dcss_ss *ss); +void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm, + bool phsync, bool pvsync); + +/* SCALER */ +int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base); +void dcss_scaler_exit(struct dcss_scaler *scl); +void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num, + const struct drm_format_info *format, + int src_xres, int src_yres, int dst_xres, int dst_yres, + u32 vrefresh_hz); +void dcss_scaler_ch_enable(struct dcss_scaler *scl, int ch_num, bool en); +int dcss_scaler_get_min_max_ratios(struct dcss_scaler *scl, int ch_num, + int *min, int *max); +void dcss_scaler_write_sclctrl(struct dcss_scaler *scl); + +#endif /* __DCSS_PRV_H__ */ diff --git a/drivers/gpu/drm/imx/dcss/dcss-dpr.c b/drivers/gpu/drm/imx/dcss/dcss-dpr.c new file mode 100644 index 000000000000..df9dab949bf2 --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-dpr.c @@ -0,0 +1,562 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <linux/device.h> +#include <linux/slab.h> + +#include "dcss-dev.h" + +#define DCSS_DPR_SYSTEM_CTRL0 0x000 +#define RUN_EN BIT(0) +#define SOFT_RESET BIT(1) +#define REPEAT_EN BIT(2) +#define SHADOW_LOAD_EN BIT(3) +#define SW_SHADOW_LOAD_SEL BIT(4) +#define BCMD2AXI_MSTR_ID_CTRL BIT(16) +#define DCSS_DPR_IRQ_MASK 0x020 +#define DCSS_DPR_IRQ_MASK_STATUS 0x030 +#define DCSS_DPR_IRQ_NONMASK_STATUS 0x040 +#define IRQ_DPR_CTRL_DONE BIT(0) +#define IRQ_DPR_RUN BIT(1) +#define IRQ_DPR_SHADOW_LOADED BIT(2) +#define IRQ_AXI_READ_ERR BIT(3) +#define DPR2RTR_YRGB_FIFO_OVFL BIT(4) +#define DPR2RTR_UV_FIFO_OVFL BIT(5) +#define DPR2RTR_FIFO_LD_BUF_RDY_YRGB_ERR BIT(6) +#define DPR2RTR_FIFO_LD_BUF_RDY_UV_ERR BIT(7) +#define DCSS_DPR_MODE_CTRL0 0x050 +#define RTR_3BUF_EN BIT(0) +#define RTR_4LINE_BUF_EN BIT(1) +#define TILE_TYPE_POS 2 +#define TILE_TYPE_MASK GENMASK(4, 2) +#define YUV_EN BIT(6) +#define COMP_2PLANE_EN BIT(7) +#define PIX_SIZE_POS 8 +#define PIX_SIZE_MASK GENMASK(9, 8) +#define PIX_LUMA_UV_SWAP BIT(10) +#define PIX_UV_SWAP BIT(11) +#define B_COMP_SEL_POS 12 +#define B_COMP_SEL_MASK GENMASK(13, 12) +#define G_COMP_SEL_POS 14 +#define G_COMP_SEL_MASK GENMASK(15, 14) +#define R_COMP_SEL_POS 16 +#define R_COMP_SEL_MASK GENMASK(17, 16) +#define A_COMP_SEL_POS 18 +#define A_COMP_SEL_MASK GENMASK(19, 18) +#define DCSS_DPR_FRAME_CTRL0 0x070 +#define HFLIP_EN BIT(0) +#define VFLIP_EN BIT(1) +#define ROT_ENC_POS 2 +#define ROT_ENC_MASK GENMASK(3, 2) +#define ROT_FLIP_ORDER_EN BIT(4) +#define PITCH_POS 16 +#define PITCH_MASK GENMASK(31, 16) +#define DCSS_DPR_FRAME_1P_CTRL0 0x090 +#define DCSS_DPR_FRAME_1P_PIX_X_CTRL 0x0A0 +#define DCSS_DPR_FRAME_1P_PIX_Y_CTRL 0x0B0 +#define DCSS_DPR_FRAME_1P_BASE_ADDR 0x0C0 +#define DCSS_DPR_FRAME_2P_CTRL0 0x0E0 +#define DCSS_DPR_FRAME_2P_PIX_X_CTRL 0x0F0 +#define DCSS_DPR_FRAME_2P_PIX_Y_CTRL 0x100 +#define DCSS_DPR_FRAME_2P_BASE_ADDR 0x110 +#define DCSS_DPR_STATUS_CTRL0 0x130 +#define STATUS_MUX_SEL_MASK GENMASK(2, 0) +#define STATUS_SRC_SEL_POS 16 +#define STATUS_SRC_SEL_MASK GENMASK(18, 16) +#define DCSS_DPR_STATUS_CTRL1 0x140 +#define DCSS_DPR_RTRAM_CTRL0 0x200 +#define NUM_ROWS_ACTIVE BIT(0) +#define THRES_HIGH_POS 1 +#define THRES_HIGH_MASK GENMASK(3, 1) +#define THRES_LOW_POS 4 +#define THRES_LOW_MASK GENMASK(6, 4) +#define ABORT_SEL BIT(7) + +enum dcss_tile_type { + TILE_LINEAR = 0, + TILE_GPU_STANDARD, + TILE_GPU_SUPER, + TILE_VPU_YUV420, + TILE_VPU_VP9, +}; + +enum dcss_pix_size { + PIX_SIZE_8, + PIX_SIZE_16, + PIX_SIZE_32, +}; + +struct dcss_dpr_ch { + struct dcss_dpr *dpr; + void __iomem *base_reg; + u32 base_ofs; + + struct drm_format_info format; + enum dcss_pix_size pix_size; + enum dcss_tile_type tile; + bool rtram_4line_en; + bool rtram_3buf_en; + + u32 frame_ctrl; + u32 mode_ctrl; + u32 sys_ctrl; + u32 rtram_ctrl; + + bool sys_ctrl_chgd; + + int ch_num; + int irq; +}; + +struct dcss_dpr { + struct device *dev; + struct dcss_ctxld *ctxld; + u32 ctx_id; + + struct dcss_dpr_ch ch[3]; +}; + +static void dcss_dpr_write(struct dcss_dpr_ch *ch, u32 val, u32 ofs) +{ + struct dcss_dpr *dpr = ch->dpr; + + dcss_ctxld_write(dpr->ctxld, dpr->ctx_id, val, ch->base_ofs + ofs); +} + +static int dcss_dpr_ch_init_all(struct dcss_dpr *dpr, unsigned long dpr_base) +{ + struct dcss_dpr_ch *ch; + int i; + + for (i = 0; i < 3; i++) { + ch = &dpr->ch[i]; + + ch->base_ofs = dpr_base + i * 0x1000; + + ch->base_reg = ioremap(ch->base_ofs, SZ_4K); + if (!ch->base_reg) { + dev_err(dpr->dev, "dpr: unable to remap ch %d base\n", + i); + return -ENOMEM; + } + + ch->dpr = dpr; + ch->ch_num = i; + + dcss_writel(0xff, ch->base_reg + DCSS_DPR_IRQ_MASK); + } + + return 0; +} + +int dcss_dpr_init(struct dcss_dev *dcss, unsigned long dpr_base) +{ + struct dcss_dpr *dpr; + + dpr = kzalloc(sizeof(*dpr), GFP_KERNEL); + if (!dpr) + return -ENOMEM; + + dcss->dpr = dpr; + dpr->dev = dcss->dev; + dpr->ctxld = dcss->ctxld; + dpr->ctx_id = CTX_SB_HP; + + if (dcss_dpr_ch_init_all(dpr, dpr_base)) { + int i; + + for (i = 0; i < 3; i++) { + if (dpr->ch[i].base_reg) + iounmap(dpr->ch[i].base_reg); + } + + kfree(dpr); + + return -ENOMEM; + } + + return 0; +} + +void dcss_dpr_exit(struct dcss_dpr *dpr) +{ + int ch_no; + + /* stop DPR on all channels */ + for (ch_no = 0; ch_no < 3; ch_no++) { + struct dcss_dpr_ch *ch = &dpr->ch[ch_no]; + + dcss_writel(0, ch->base_reg + DCSS_DPR_SYSTEM_CTRL0); + + if (ch->base_reg) + iounmap(ch->base_reg); + } + + kfree(dpr); +} + +static u32 dcss_dpr_x_pix_wide_adjust(struct dcss_dpr_ch *ch, u32 pix_wide, + u32 pix_format) +{ + u8 pix_in_64byte_map[3][5] = { + /* LIN, GPU_STD, GPU_SUP, VPU_YUV420, VPU_VP9 */ + { 64, 8, 8, 8, 16}, /* PIX_SIZE_8 */ + { 32, 8, 8, 8, 8}, /* PIX_SIZE_16 */ + { 16, 4, 4, 8, 8}, /* PIX_SIZE_32 */ + }; + u32 offset; + u32 div_64byte_mod, pix_in_64byte; + + pix_in_64byte = pix_in_64byte_map[ch->pix_size][ch->tile]; + + div_64byte_mod = pix_wide % pix_in_64byte; + offset = (div_64byte_mod == 0) ? 0 : (pix_in_64byte - div_64byte_mod); + + return pix_wide + offset; +} + +static u32 dcss_dpr_y_pix_high_adjust(struct dcss_dpr_ch *ch, u32 pix_high, + u32 pix_format) +{ + u8 num_rows_buf = ch->rtram_4line_en ? 4 : 8; + u32 offset, pix_y_mod; + + pix_y_mod = pix_high % num_rows_buf; + offset = pix_y_mod ? (num_rows_buf - pix_y_mod) : 0; + + return pix_high + offset; +} + +void dcss_dpr_set_res(struct dcss_dpr *dpr, int ch_num, u32 xres, u32 yres) +{ + struct dcss_dpr_ch *ch = &dpr->ch[ch_num]; + u32 pix_format = ch->format.format; + u32 gap = DCSS_DPR_FRAME_2P_BASE_ADDR - DCSS_DPR_FRAME_1P_BASE_ADDR; + int plane, max_planes = 1; + u32 pix_x_wide, pix_y_high; + + if (pix_format == DRM_FORMAT_NV12 || + pix_format == DRM_FORMAT_NV21) + max_planes = 2; + + for (plane = 0; plane < max_planes; plane++) { + yres = plane == 1 ? yres >> 1 : yres; + + pix_x_wide = dcss_dpr_x_pix_wide_adjust(ch, xres, pix_format); + pix_y_high = dcss_dpr_y_pix_high_adjust(ch, yres, pix_format); + + dcss_dpr_write(ch, pix_x_wide, + DCSS_DPR_FRAME_1P_PIX_X_CTRL + plane * gap); + dcss_dpr_write(ch, pix_y_high, + DCSS_DPR_FRAME_1P_PIX_Y_CTRL + plane * gap); + + dcss_dpr_write(ch, 2, DCSS_DPR_FRAME_1P_CTRL0 + plane * gap); + } +} + +void dcss_dpr_addr_set(struct dcss_dpr *dpr, int ch_num, u32 luma_base_addr, + u32 chroma_base_addr, u16 pitch) +{ + struct dcss_dpr_ch *ch = &dpr->ch[ch_num]; + + dcss_dpr_write(ch, luma_base_addr, DCSS_DPR_FRAME_1P_BASE_ADDR); + + dcss_dpr_write(ch, chroma_base_addr, DCSS_DPR_FRAME_2P_BASE_ADDR); + + ch->frame_ctrl &= ~PITCH_MASK; + ch->frame_ctrl |= (((u32)pitch << PITCH_POS) & PITCH_MASK); +} + +static void dcss_dpr_argb_comp_sel(struct dcss_dpr_ch *ch, int a_sel, int r_sel, + int g_sel, int b_sel) +{ + u32 sel; + + sel = ((a_sel << A_COMP_SEL_POS) & A_COMP_SEL_MASK) | + ((r_sel << R_COMP_SEL_POS) & R_COMP_SEL_MASK) | + ((g_sel << G_COMP_SEL_POS) & G_COMP_SEL_MASK) | + ((b_sel << B_COMP_SEL_POS) & B_COMP_SEL_MASK); + + ch->mode_ctrl &= ~(A_COMP_SEL_MASK | R_COMP_SEL_MASK | + G_COMP_SEL_MASK | B_COMP_SEL_MASK); + ch->mode_ctrl |= sel; +} + +static void dcss_dpr_pix_size_set(struct dcss_dpr_ch *ch, + const struct drm_format_info *format) +{ + u32 val; + + switch (format->format) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + val = PIX_SIZE_8; + break; + + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + val = PIX_SIZE_16; + break; + + default: + val = PIX_SIZE_32; + break; + } + + ch->pix_size = val; + + ch->mode_ctrl &= ~PIX_SIZE_MASK; + ch->mode_ctrl |= ((val << PIX_SIZE_POS) & PIX_SIZE_MASK); +} + +static void dcss_dpr_uv_swap(struct dcss_dpr_ch *ch, bool swap) +{ + ch->mode_ctrl &= ~PIX_UV_SWAP; + ch->mode_ctrl |= (swap ? PIX_UV_SWAP : 0); +} + +static void dcss_dpr_y_uv_swap(struct dcss_dpr_ch *ch, bool swap) +{ + ch->mode_ctrl &= ~PIX_LUMA_UV_SWAP; + ch->mode_ctrl |= (swap ? PIX_LUMA_UV_SWAP : 0); +} + +static void dcss_dpr_2plane_en(struct dcss_dpr_ch *ch, bool en) +{ + ch->mode_ctrl &= ~COMP_2PLANE_EN; + ch->mode_ctrl |= (en ? COMP_2PLANE_EN : 0); +} + +static void dcss_dpr_yuv_en(struct dcss_dpr_ch *ch, bool en) +{ + ch->mode_ctrl &= ~YUV_EN; + ch->mode_ctrl |= (en ? YUV_EN : 0); +} + +void dcss_dpr_enable(struct dcss_dpr *dpr, int ch_num, bool en) +{ + struct dcss_dpr_ch *ch = &dpr->ch[ch_num]; + u32 sys_ctrl; + + sys_ctrl = (en ? REPEAT_EN | RUN_EN : 0); + + if (en) { + dcss_dpr_write(ch, ch->mode_ctrl, DCSS_DPR_MODE_CTRL0); + dcss_dpr_write(ch, ch->frame_ctrl, DCSS_DPR_FRAME_CTRL0); + dcss_dpr_write(ch, ch->rtram_ctrl, DCSS_DPR_RTRAM_CTRL0); + } + + if (ch->sys_ctrl != sys_ctrl) + ch->sys_ctrl_chgd = true; + + ch->sys_ctrl = sys_ctrl; +} + +struct rgb_comp_sel { + u32 drm_format; + int a_sel; + int r_sel; + int g_sel; + int b_sel; +}; + +static struct rgb_comp_sel comp_sel_map[] = { + {DRM_FORMAT_ARGB8888, 3, 2, 1, 0}, + {DRM_FORMAT_XRGB8888, 3, 2, 1, 0}, + {DRM_FORMAT_ABGR8888, 3, 0, 1, 2}, + {DRM_FORMAT_XBGR8888, 3, 0, 1, 2}, + {DRM_FORMAT_RGBA8888, 0, 3, 2, 1}, + {DRM_FORMAT_RGBX8888, 0, 3, 2, 1}, + {DRM_FORMAT_BGRA8888, 0, 1, 2, 3}, + {DRM_FORMAT_BGRX8888, 0, 1, 2, 3}, +}; + +static int to_comp_sel(u32 pix_fmt, int *a_sel, int *r_sel, int *g_sel, + int *b_sel) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(comp_sel_map); i++) { + if (comp_sel_map[i].drm_format == pix_fmt) { + *a_sel = comp_sel_map[i].a_sel; + *r_sel = comp_sel_map[i].r_sel; + *g_sel = comp_sel_map[i].g_sel; + *b_sel = comp_sel_map[i].b_sel; + + return 0; + } + } + + return -1; +} + +static void dcss_dpr_rtram_set(struct dcss_dpr_ch *ch, u32 pix_format) +{ + u32 val, mask; + + switch (pix_format) { + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV12: + ch->rtram_3buf_en = true; + ch->rtram_4line_en = false; + break; + + default: + ch->rtram_3buf_en = true; + ch->rtram_4line_en = true; + break; + } + + val = (ch->rtram_4line_en ? RTR_4LINE_BUF_EN : 0); + val |= (ch->rtram_3buf_en ? RTR_3BUF_EN : 0); + mask = RTR_4LINE_BUF_EN | RTR_3BUF_EN; + + ch->mode_ctrl &= ~mask; + ch->mode_ctrl |= (val & mask); + + val = (ch->rtram_4line_en ? 0 : NUM_ROWS_ACTIVE); + val |= (3 << THRES_LOW_POS) & THRES_LOW_MASK; + val |= (4 << THRES_HIGH_POS) & THRES_HIGH_MASK; + mask = THRES_LOW_MASK | THRES_HIGH_MASK | NUM_ROWS_ACTIVE; + + ch->rtram_ctrl &= ~mask; + ch->rtram_ctrl |= (val & mask); +} + +static void dcss_dpr_setup_components(struct dcss_dpr_ch *ch, + const struct drm_format_info *format) +{ + int a_sel, r_sel, g_sel, b_sel; + bool uv_swap, y_uv_swap; + + switch (format->format) { + case DRM_FORMAT_YVYU: + uv_swap = true; + y_uv_swap = true; + break; + + case DRM_FORMAT_VYUY: + case DRM_FORMAT_NV21: + uv_swap = true; + y_uv_swap = false; + break; + + case DRM_FORMAT_YUYV: + uv_swap = false; + y_uv_swap = true; + break; + + default: + uv_swap = false; + y_uv_swap = false; + break; + } + + dcss_dpr_uv_swap(ch, uv_swap); + + dcss_dpr_y_uv_swap(ch, y_uv_swap); + + if (!format->is_yuv) { + if (!to_comp_sel(format->format, &a_sel, &r_sel, + &g_sel, &b_sel)) { + dcss_dpr_argb_comp_sel(ch, a_sel, r_sel, g_sel, b_sel); + } else { + dcss_dpr_argb_comp_sel(ch, 3, 2, 1, 0); + } + } else { + dcss_dpr_argb_comp_sel(ch, 0, 0, 0, 0); + } +} + +static void dcss_dpr_tile_set(struct dcss_dpr_ch *ch, uint64_t modifier) +{ + switch (ch->ch_num) { + case 0: + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + ch->tile = TILE_LINEAR; + break; + case DRM_FORMAT_MOD_VIVANTE_TILED: + ch->tile = TILE_GPU_STANDARD; + break; + case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED: + ch->tile = TILE_GPU_SUPER; + break; + default: + WARN_ON(1); + break; + } + break; + case 1: + case 2: + ch->tile = TILE_LINEAR; + break; + default: + WARN_ON(1); + return; + } + + ch->mode_ctrl &= ~TILE_TYPE_MASK; + ch->mode_ctrl |= ((ch->tile << TILE_TYPE_POS) & TILE_TYPE_MASK); +} + +void dcss_dpr_format_set(struct dcss_dpr *dpr, int ch_num, + const struct drm_format_info *format, u64 modifier) +{ + struct dcss_dpr_ch *ch = &dpr->ch[ch_num]; + + ch->format = *format; + + dcss_dpr_yuv_en(ch, format->is_yuv); + + dcss_dpr_pix_size_set(ch, format); + + dcss_dpr_setup_components(ch, format); + + dcss_dpr_2plane_en(ch, format->num_planes == 2); + + dcss_dpr_rtram_set(ch, format->format); + + dcss_dpr_tile_set(ch, modifier); +} + +/* This function will be called from interrupt context. */ +void dcss_dpr_write_sysctrl(struct dcss_dpr *dpr) +{ + int chnum; + + dcss_ctxld_assert_locked(dpr->ctxld); + + for (chnum = 0; chnum < 3; chnum++) { + struct dcss_dpr_ch *ch = &dpr->ch[chnum]; + + if (ch->sys_ctrl_chgd) { + dcss_ctxld_write_irqsafe(dpr->ctxld, dpr->ctx_id, + ch->sys_ctrl, + ch->base_ofs + + DCSS_DPR_SYSTEM_CTRL0); + ch->sys_ctrl_chgd = false; + } + } +} + +void dcss_dpr_set_rotation(struct dcss_dpr *dpr, int ch_num, u32 rotation) +{ + struct dcss_dpr_ch *ch = &dpr->ch[ch_num]; + + ch->frame_ctrl &= ~(HFLIP_EN | VFLIP_EN | ROT_ENC_MASK); + + ch->frame_ctrl |= rotation & DRM_MODE_REFLECT_X ? HFLIP_EN : 0; + ch->frame_ctrl |= rotation & DRM_MODE_REFLECT_Y ? VFLIP_EN : 0; + + if (rotation & DRM_MODE_ROTATE_90) + ch->frame_ctrl |= 1 << ROT_ENC_POS; + else if (rotation & DRM_MODE_ROTATE_180) + ch->frame_ctrl |= 2 << ROT_ENC_POS; + else if (rotation & DRM_MODE_ROTATE_270) + ch->frame_ctrl |= 3 << ROT_ENC_POS; +} diff --git a/drivers/gpu/drm/imx/dcss/dcss-drv.c b/drivers/gpu/drm/imx/dcss/dcss-drv.c new file mode 100644 index 000000000000..8dc2f85c514b --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-drv.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <drm/drm_of.h> + +#include "dcss-dev.h" +#include "dcss-kms.h" + +struct dcss_drv { + struct dcss_dev *dcss; + struct dcss_kms_dev *kms; +}; + +struct dcss_dev *dcss_drv_dev_to_dcss(struct device *dev) +{ + struct dcss_drv *mdrv = dev_get_drvdata(dev); + + return mdrv ? mdrv->dcss : NULL; +} + +struct drm_device *dcss_drv_dev_to_drm(struct device *dev) +{ + struct dcss_drv *mdrv = dev_get_drvdata(dev); + + return mdrv ? &mdrv->kms->base : NULL; +} + +static int dcss_drv_platform_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *remote; + struct dcss_drv *mdrv; + int err = 0; + bool hdmi_output = true; + + if (!dev->of_node) + return -ENODEV; + + remote = of_graph_get_remote_node(dev->of_node, 0, 0); + if (!remote) + return -ENODEV; + + hdmi_output = !of_device_is_compatible(remote, "fsl,imx8mq-nwl-dsi"); + + of_node_put(remote); + + mdrv = kzalloc(sizeof(*mdrv), GFP_KERNEL); + if (!mdrv) + return -ENOMEM; + + mdrv->dcss = dcss_dev_create(dev, hdmi_output); + if (IS_ERR(mdrv->dcss)) { + err = PTR_ERR(mdrv->dcss); + goto err; + } + + dev_set_drvdata(dev, mdrv); + + mdrv->kms = dcss_kms_attach(mdrv->dcss); + if (IS_ERR(mdrv->kms)) { + err = PTR_ERR(mdrv->kms); + goto dcss_shutoff; + } + + return 0; + +dcss_shutoff: + dcss_dev_destroy(mdrv->dcss); + + dev_set_drvdata(dev, NULL); + +err: + kfree(mdrv); + return err; +} + +static int dcss_drv_platform_remove(struct platform_device *pdev) +{ + struct dcss_drv *mdrv = dev_get_drvdata(&pdev->dev); + + if (!mdrv) + return 0; + + dcss_kms_detach(mdrv->kms); + dcss_dev_destroy(mdrv->dcss); + + dev_set_drvdata(&pdev->dev, NULL); + + kfree(mdrv); + + return 0; +} + +static struct dcss_type_data dcss_types[] = { + [DCSS_IMX8MQ] = { + .name = "DCSS_IMX8MQ", + .blkctl_ofs = 0x2F000, + .ctxld_ofs = 0x23000, + .dtg_ofs = 0x20000, + .scaler_ofs = 0x1C000, + .ss_ofs = 0x1B000, + .dpr_ofs = 0x18000, + }, +}; + +static const struct of_device_id dcss_of_match[] = { + { .compatible = "nxp,imx8mq-dcss", .data = &dcss_types[DCSS_IMX8MQ], }, + {}, +}; + +MODULE_DEVICE_TABLE(of, dcss_of_match); + +static const struct dev_pm_ops dcss_dev_pm = { + SET_SYSTEM_SLEEP_PM_OPS(dcss_dev_suspend, dcss_dev_resume) + SET_RUNTIME_PM_OPS(dcss_dev_runtime_suspend, + dcss_dev_runtime_resume, NULL) +}; + +static struct platform_driver dcss_platform_driver = { + .probe = dcss_drv_platform_probe, + .remove = dcss_drv_platform_remove, + .driver = { + .name = "imx-dcss", + .of_match_table = dcss_of_match, + .pm = &dcss_dev_pm, + }, +}; + +module_platform_driver(dcss_platform_driver); + +MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@nxp.com>"); +MODULE_DESCRIPTION("DCSS driver for i.MX8MQ"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/imx/dcss/dcss-dtg.c b/drivers/gpu/drm/imx/dcss/dcss-dtg.c new file mode 100644 index 000000000000..30de00540f63 --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-dtg.c @@ -0,0 +1,409 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include "dcss-dev.h" + +#define DCSS_DTG_TC_CONTROL_STATUS 0x00 +#define CH3_EN BIT(0) +#define CH2_EN BIT(1) +#define CH1_EN BIT(2) +#define OVL_DATA_MODE BIT(3) +#define BLENDER_VIDEO_ALPHA_SEL BIT(7) +#define DTG_START BIT(8) +#define DBY_MODE_EN BIT(9) +#define CH1_ALPHA_SEL BIT(10) +#define CSS_PIX_COMP_SWAP_POS 12 +#define CSS_PIX_COMP_SWAP_MASK GENMASK(14, 12) +#define DEFAULT_FG_ALPHA_POS 24 +#define DEFAULT_FG_ALPHA_MASK GENMASK(31, 24) +#define DCSS_DTG_TC_DTG 0x04 +#define DCSS_DTG_TC_DISP_TOP 0x08 +#define DCSS_DTG_TC_DISP_BOT 0x0C +#define DCSS_DTG_TC_CH1_TOP 0x10 +#define DCSS_DTG_TC_CH1_BOT 0x14 +#define DCSS_DTG_TC_CH2_TOP 0x18 +#define DCSS_DTG_TC_CH2_BOT 0x1C +#define DCSS_DTG_TC_CH3_TOP 0x20 +#define DCSS_DTG_TC_CH3_BOT 0x24 +#define TC_X_POS 0 +#define TC_X_MASK GENMASK(12, 0) +#define TC_Y_POS 16 +#define TC_Y_MASK GENMASK(28, 16) +#define DCSS_DTG_TC_CTXLD 0x28 +#define TC_CTXLD_DB_Y_POS 0 +#define TC_CTXLD_DB_Y_MASK GENMASK(12, 0) +#define TC_CTXLD_SB_Y_POS 16 +#define TC_CTXLD_SB_Y_MASK GENMASK(28, 16) +#define DCSS_DTG_TC_CH1_BKRND 0x2C +#define DCSS_DTG_TC_CH2_BKRND 0x30 +#define BKRND_R_Y_COMP_POS 20 +#define BKRND_R_Y_COMP_MASK GENMASK(29, 20) +#define BKRND_G_U_COMP_POS 10 +#define BKRND_G_U_COMP_MASK GENMASK(19, 10) +#define BKRND_B_V_COMP_POS 0 +#define BKRND_B_V_COMP_MASK GENMASK(9, 0) +#define DCSS_DTG_BLENDER_DBY_RANGEINV 0x38 +#define DCSS_DTG_BLENDER_DBY_RANGEMIN 0x3C +#define DCSS_DTG_BLENDER_DBY_BDP 0x40 +#define DCSS_DTG_BLENDER_BKRND_I 0x44 +#define DCSS_DTG_BLENDER_BKRND_P 0x48 +#define DCSS_DTG_BLENDER_BKRND_T 0x4C +#define DCSS_DTG_LINE0_INT 0x50 +#define DCSS_DTG_LINE1_INT 0x54 +#define DCSS_DTG_BG_ALPHA_DEFAULT 0x58 +#define DCSS_DTG_INT_STATUS 0x5C +#define DCSS_DTG_INT_CONTROL 0x60 +#define DCSS_DTG_TC_CH3_BKRND 0x64 +#define DCSS_DTG_INT_MASK 0x68 +#define LINE0_IRQ BIT(0) +#define LINE1_IRQ BIT(1) +#define LINE2_IRQ BIT(2) +#define LINE3_IRQ BIT(3) +#define DCSS_DTG_LINE2_INT 0x6C +#define DCSS_DTG_LINE3_INT 0x70 +#define DCSS_DTG_DBY_OL 0x74 +#define DCSS_DTG_DBY_BL 0x78 +#define DCSS_DTG_DBY_EL 0x7C + +struct dcss_dtg { + struct device *dev; + struct dcss_ctxld *ctxld; + void __iomem *base_reg; + u32 base_ofs; + + u32 ctx_id; + + bool in_use; + + u32 dis_ulc_x; + u32 dis_ulc_y; + + u32 control_status; + u32 alpha; + u32 alpha_cfg; + + int ctxld_kick_irq; + bool ctxld_kick_irq_en; +}; + +static void dcss_dtg_write(struct dcss_dtg *dtg, u32 val, u32 ofs) +{ + if (!dtg->in_use) + dcss_writel(val, dtg->base_reg + ofs); + + dcss_ctxld_write(dtg->ctxld, dtg->ctx_id, + val, dtg->base_ofs + ofs); +} + +static irqreturn_t dcss_dtg_irq_handler(int irq, void *data) +{ + struct dcss_dtg *dtg = data; + u32 status; + + status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS); + + if (!(status & LINE0_IRQ)) + return IRQ_NONE; + + dcss_ctxld_kick(dtg->ctxld); + + dcss_writel(status & LINE0_IRQ, dtg->base_reg + DCSS_DTG_INT_CONTROL); + + return IRQ_HANDLED; +} + +static int dcss_dtg_irq_config(struct dcss_dtg *dtg, + struct platform_device *pdev) +{ + int ret; + + dtg->ctxld_kick_irq = platform_get_irq_byname(pdev, "ctxld_kick"); + if (dtg->ctxld_kick_irq < 0) + return dtg->ctxld_kick_irq; + + dcss_update(0, LINE0_IRQ | LINE1_IRQ, + dtg->base_reg + DCSS_DTG_INT_MASK); + + ret = request_irq(dtg->ctxld_kick_irq, dcss_dtg_irq_handler, + 0, "dcss_ctxld_kick", dtg); + if (ret) { + dev_err(dtg->dev, "dtg: irq request failed.\n"); + return ret; + } + + disable_irq(dtg->ctxld_kick_irq); + + dtg->ctxld_kick_irq_en = false; + + return 0; +} + +int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base) +{ + int ret = 0; + struct dcss_dtg *dtg; + + dtg = kzalloc(sizeof(*dtg), GFP_KERNEL); + if (!dtg) + return -ENOMEM; + + dcss->dtg = dtg; + dtg->dev = dcss->dev; + dtg->ctxld = dcss->ctxld; + + dtg->base_reg = ioremap(dtg_base, SZ_4K); + if (!dtg->base_reg) { + dev_err(dcss->dev, "dtg: unable to remap dtg base\n"); + ret = -ENOMEM; + goto err_ioremap; + } + + dtg->base_ofs = dtg_base; + dtg->ctx_id = CTX_DB; + + dtg->alpha = 255; + + dtg->control_status |= OVL_DATA_MODE | BLENDER_VIDEO_ALPHA_SEL | + ((dtg->alpha << DEFAULT_FG_ALPHA_POS) & DEFAULT_FG_ALPHA_MASK); + + ret = dcss_dtg_irq_config(dtg, to_platform_device(dcss->dev)); + if (ret) + goto err_irq; + + return 0; + +err_irq: + iounmap(dtg->base_reg); + +err_ioremap: + kfree(dtg); + + return ret; +} + +void dcss_dtg_exit(struct dcss_dtg *dtg) +{ + free_irq(dtg->ctxld_kick_irq, dtg); + + if (dtg->base_reg) + iounmap(dtg->base_reg); + + kfree(dtg); +} + +void dcss_dtg_sync_set(struct dcss_dtg *dtg, struct videomode *vm) +{ + struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dtg->dev); + u16 dtg_lrc_x, dtg_lrc_y; + u16 dis_ulc_x, dis_ulc_y; + u16 dis_lrc_x, dis_lrc_y; + u32 sb_ctxld_trig, db_ctxld_trig; + u32 pixclock = vm->pixelclock; + u32 actual_clk; + + dtg_lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len + + vm->hactive - 1; + dtg_lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len + + vm->vactive - 1; + dis_ulc_x = vm->hsync_len + vm->hback_porch - 1; + dis_ulc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch - 1; + dis_lrc_x = vm->hsync_len + vm->hback_porch + vm->hactive - 1; + dis_lrc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch + + vm->vactive - 1; + + clk_disable_unprepare(dcss->pix_clk); + clk_set_rate(dcss->pix_clk, vm->pixelclock); + clk_prepare_enable(dcss->pix_clk); + + actual_clk = clk_get_rate(dcss->pix_clk); + if (pixclock != actual_clk) { + dev_info(dtg->dev, + "Pixel clock set to %u kHz instead of %u kHz.\n", + (actual_clk / 1000), (pixclock / 1000)); + } + + dcss_dtg_write(dtg, ((dtg_lrc_y << TC_Y_POS) | dtg_lrc_x), + DCSS_DTG_TC_DTG); + dcss_dtg_write(dtg, ((dis_ulc_y << TC_Y_POS) | dis_ulc_x), + DCSS_DTG_TC_DISP_TOP); + dcss_dtg_write(dtg, ((dis_lrc_y << TC_Y_POS) | dis_lrc_x), + DCSS_DTG_TC_DISP_BOT); + + dtg->dis_ulc_x = dis_ulc_x; + dtg->dis_ulc_y = dis_ulc_y; + + sb_ctxld_trig = ((0 * dis_lrc_y / 100) << TC_CTXLD_SB_Y_POS) & + TC_CTXLD_SB_Y_MASK; + db_ctxld_trig = ((99 * dis_lrc_y / 100) << TC_CTXLD_DB_Y_POS) & + TC_CTXLD_DB_Y_MASK; + + dcss_dtg_write(dtg, sb_ctxld_trig | db_ctxld_trig, DCSS_DTG_TC_CTXLD); + + /* vblank trigger */ + dcss_dtg_write(dtg, 0, DCSS_DTG_LINE1_INT); + + /* CTXLD trigger */ + dcss_dtg_write(dtg, ((90 * dis_lrc_y) / 100) << 16, DCSS_DTG_LINE0_INT); +} + +void dcss_dtg_plane_pos_set(struct dcss_dtg *dtg, int ch_num, + int px, int py, int pw, int ph) +{ + u16 p_ulc_x, p_ulc_y; + u16 p_lrc_x, p_lrc_y; + + p_ulc_x = dtg->dis_ulc_x + px; + p_ulc_y = dtg->dis_ulc_y + py; + p_lrc_x = p_ulc_x + pw; + p_lrc_y = p_ulc_y + ph; + + if (!px && !py && !pw && !ph) { + dcss_dtg_write(dtg, 0, DCSS_DTG_TC_CH1_TOP + 0x8 * ch_num); + dcss_dtg_write(dtg, 0, DCSS_DTG_TC_CH1_BOT + 0x8 * ch_num); + } else { + dcss_dtg_write(dtg, ((p_ulc_y << TC_Y_POS) | p_ulc_x), + DCSS_DTG_TC_CH1_TOP + 0x8 * ch_num); + dcss_dtg_write(dtg, ((p_lrc_y << TC_Y_POS) | p_lrc_x), + DCSS_DTG_TC_CH1_BOT + 0x8 * ch_num); + } +} + +bool dcss_dtg_global_alpha_changed(struct dcss_dtg *dtg, int ch_num, int alpha) +{ + if (ch_num) + return false; + + return alpha != dtg->alpha; +} + +void dcss_dtg_plane_alpha_set(struct dcss_dtg *dtg, int ch_num, + const struct drm_format_info *format, int alpha) +{ + /* we care about alpha only when channel 0 is concerned */ + if (ch_num) + return; + + /* + * Use global alpha if pixel format does not have alpha channel or the + * user explicitly chose to use global alpha (i.e. alpha is not OPAQUE). + */ + if (!format->has_alpha || alpha != 255) + dtg->alpha_cfg = (alpha << DEFAULT_FG_ALPHA_POS) & DEFAULT_FG_ALPHA_MASK; + else /* use per-pixel alpha otherwise */ + dtg->alpha_cfg = CH1_ALPHA_SEL; + + dtg->alpha = alpha; +} + +void dcss_dtg_css_set(struct dcss_dtg *dtg) +{ + dtg->control_status |= + (0x5 << CSS_PIX_COMP_SWAP_POS) & CSS_PIX_COMP_SWAP_MASK; +} + +void dcss_dtg_enable(struct dcss_dtg *dtg) +{ + dtg->control_status |= DTG_START; + + dtg->control_status &= ~(CH1_ALPHA_SEL | DEFAULT_FG_ALPHA_MASK); + dtg->control_status |= dtg->alpha_cfg; + + dcss_dtg_write(dtg, dtg->control_status, DCSS_DTG_TC_CONTROL_STATUS); + + dtg->in_use = true; +} + +void dcss_dtg_shutoff(struct dcss_dtg *dtg) +{ + dtg->control_status &= ~DTG_START; + + dcss_writel(dtg->control_status, + dtg->base_reg + DCSS_DTG_TC_CONTROL_STATUS); + + dtg->in_use = false; +} + +bool dcss_dtg_is_enabled(struct dcss_dtg *dtg) +{ + return dtg->in_use; +} + +void dcss_dtg_ch_enable(struct dcss_dtg *dtg, int ch_num, bool en) +{ + u32 ch_en_map[] = {CH1_EN, CH2_EN, CH3_EN}; + u32 control_status; + + control_status = dtg->control_status & ~ch_en_map[ch_num]; + control_status |= en ? ch_en_map[ch_num] : 0; + + control_status &= ~(CH1_ALPHA_SEL | DEFAULT_FG_ALPHA_MASK); + control_status |= dtg->alpha_cfg; + + if (dtg->control_status != control_status) + dcss_dtg_write(dtg, control_status, DCSS_DTG_TC_CONTROL_STATUS); + + dtg->control_status = control_status; +} + +void dcss_dtg_vblank_irq_enable(struct dcss_dtg *dtg, bool en) +{ + u32 status; + u32 mask = en ? LINE1_IRQ : 0; + + if (en) { + status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS); + dcss_writel(status & LINE1_IRQ, + dtg->base_reg + DCSS_DTG_INT_CONTROL); + } + + dcss_update(mask, LINE1_IRQ, dtg->base_reg + DCSS_DTG_INT_MASK); +} + +void dcss_dtg_ctxld_kick_irq_enable(struct dcss_dtg *dtg, bool en) +{ + u32 status; + u32 mask = en ? LINE0_IRQ : 0; + + if (en) { + status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS); + + if (!dtg->ctxld_kick_irq_en) { + dcss_writel(status & LINE0_IRQ, + dtg->base_reg + DCSS_DTG_INT_CONTROL); + enable_irq(dtg->ctxld_kick_irq); + dtg->ctxld_kick_irq_en = true; + dcss_update(mask, LINE0_IRQ, + dtg->base_reg + DCSS_DTG_INT_MASK); + } + + return; + } + + if (!dtg->ctxld_kick_irq_en) + return; + + disable_irq_nosync(dtg->ctxld_kick_irq); + dtg->ctxld_kick_irq_en = false; + + dcss_update(mask, LINE0_IRQ, dtg->base_reg + DCSS_DTG_INT_MASK); +} + +void dcss_dtg_vblank_irq_clear(struct dcss_dtg *dtg) +{ + dcss_update(LINE1_IRQ, LINE1_IRQ, dtg->base_reg + DCSS_DTG_INT_CONTROL); +} + +bool dcss_dtg_vblank_irq_valid(struct dcss_dtg *dtg) +{ + return !!(dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS) & LINE1_IRQ); +} + diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c new file mode 100644 index 000000000000..135a62366ab8 --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_of.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include "dcss-dev.h" +#include "dcss-kms.h" + +DEFINE_DRM_GEM_CMA_FOPS(dcss_cma_fops); + +static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = { + .fb_create = drm_gem_fb_create, + .output_poll_changed = drm_fb_helper_output_poll_changed, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static struct drm_driver dcss_kms_driver = { + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, + .gem_free_object_unlocked = drm_gem_cma_free_object, + .gem_vm_ops = &drm_gem_cma_vm_ops, + .dumb_create = drm_gem_cma_dumb_create, + + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, + .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, + .gem_prime_vmap = drm_gem_cma_prime_vmap, + .gem_prime_vunmap = drm_gem_cma_prime_vunmap, + .gem_prime_mmap = drm_gem_cma_prime_mmap, + .fops = &dcss_cma_fops, + .name = "imx-dcss", + .desc = "i.MX8MQ Display Subsystem", + .date = "20190917", + .major = 1, + .minor = 0, + .patchlevel = 0, +}; + +static const struct drm_mode_config_helper_funcs dcss_mode_config_helpers = { + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, +}; + +static void dcss_kms_mode_config_init(struct dcss_kms_dev *kms) +{ + struct drm_mode_config *config = &kms->base.mode_config; + + drm_mode_config_init(&kms->base); + + config->min_width = 1; + config->min_height = 1; + config->max_width = 4096; + config->max_height = 4096; + config->allow_fb_modifiers = true; + config->normalize_zpos = true; + + config->funcs = &dcss_drm_mode_config_funcs; + config->helper_private = &dcss_mode_config_helpers; +} + +static const struct drm_encoder_funcs dcss_kms_simple_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static int dcss_kms_bridge_connector_init(struct dcss_kms_dev *kms) +{ + struct drm_device *ddev = &kms->base; + struct drm_encoder *encoder = &kms->encoder; + struct drm_crtc *crtc = (struct drm_crtc *)&kms->crtc; + struct drm_panel *panel; + struct drm_bridge *bridge; + int ret; + + ret = drm_of_find_panel_or_bridge(ddev->dev->of_node, 0, 0, + &panel, &bridge); + if (ret) + return ret; + + if (!bridge) { + dev_err(ddev->dev, "No bridge found %d.\n", ret); + return -ENODEV; + } + + encoder->possible_crtcs = drm_crtc_mask(crtc); + + ret = drm_encoder_init(&kms->base, encoder, + &dcss_kms_simple_encoder_funcs, + DRM_MODE_ENCODER_NONE, NULL); + if (ret) { + dev_err(ddev->dev, "Failed initializing encoder %d.\n", ret); + return ret; + } + + ret = drm_bridge_attach(encoder, bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret < 0) { + dev_err(ddev->dev, "Unable to attach bridge %pOF\n", + bridge->of_node); + return ret; + } + + kms->connector = drm_bridge_connector_init(ddev, encoder); + if (IS_ERR(kms->connector)) { + dev_err(ddev->dev, "Unable to create bridge connector.\n"); + return PTR_ERR(kms->connector); + } + + drm_connector_attach_encoder(kms->connector, encoder); + + return 0; +} + +struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss) +{ + struct dcss_kms_dev *kms; + struct drm_device *drm; + struct dcss_crtc *crtc; + int ret; + + kms = devm_drm_dev_alloc(dcss->dev, &dcss_kms_driver, + struct dcss_kms_dev, base); + if (IS_ERR(kms)) + return kms; + + drm = &kms->base; + crtc = &kms->crtc; + + drm->dev_private = dcss; + + dcss_kms_mode_config_init(kms); + + ret = drm_vblank_init(drm, 1); + if (ret) + goto cleanup_mode_config; + + drm->irq_enabled = true; + + ret = dcss_kms_bridge_connector_init(kms); + if (ret) + goto cleanup_mode_config; + + ret = dcss_crtc_init(crtc, drm); + if (ret) + goto cleanup_mode_config; + + drm_mode_config_reset(drm); + + drm_kms_helper_poll_init(drm); + + drm_bridge_connector_enable_hpd(kms->connector); + + ret = drm_dev_register(drm, 0); + if (ret) + goto cleanup_crtc; + + drm_fbdev_generic_setup(drm, 32); + + return kms; + +cleanup_crtc: + drm_bridge_connector_disable_hpd(kms->connector); + drm_kms_helper_poll_fini(drm); + dcss_crtc_deinit(crtc, drm); + +cleanup_mode_config: + drm_mode_config_cleanup(drm); + drm->dev_private = NULL; + + return ERR_PTR(ret); +} + +void dcss_kms_detach(struct dcss_kms_dev *kms) +{ + struct drm_device *drm = &kms->base; + + drm_dev_unregister(drm); + drm_bridge_connector_disable_hpd(kms->connector); + drm_kms_helper_poll_fini(drm); + drm_atomic_helper_shutdown(drm); + drm_crtc_vblank_off(&kms->crtc.base); + drm->irq_enabled = false; + drm_mode_config_cleanup(drm); + dcss_crtc_deinit(&kms->crtc, drm); + drm->dev_private = NULL; +} diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.h b/drivers/gpu/drm/imx/dcss/dcss-kms.h new file mode 100644 index 000000000000..dfe5dd99eea3 --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-kms.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2019 NXP. + */ + +#ifndef _DCSS_KMS_H_ +#define _DCSS_KMS_H_ + +#include <drm/drm_encoder.h> + +struct dcss_plane { + struct drm_plane base; + + int ch_num; +}; + +struct dcss_crtc { + struct drm_crtc base; + struct drm_crtc_state *state; + + struct dcss_plane *plane[3]; + + int irq; + + bool disable_ctxld_kick_irq; +}; + +struct dcss_kms_dev { + struct drm_device base; + struct dcss_crtc crtc; + struct drm_encoder encoder; + struct drm_connector *connector; +}; + +struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss); +void dcss_kms_detach(struct dcss_kms_dev *kms); +int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm); +void dcss_crtc_deinit(struct dcss_crtc *crtc, struct drm_device *drm); +struct dcss_plane *dcss_plane_init(struct drm_device *drm, + unsigned int possible_crtcs, + enum drm_plane_type type, + unsigned int zpos); + +#endif diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c new file mode 100644 index 000000000000..961d671f171b --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c @@ -0,0 +1,405 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_cma_helper.h> + +#include "dcss-dev.h" +#include "dcss-kms.h" + +static const u32 dcss_common_formats[] = { + /* RGB */ + DRM_FORMAT_ARGB8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_RGBX1010102, + DRM_FORMAT_BGRX1010102, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_RGBA1010102, + DRM_FORMAT_BGRA1010102, +}; + +static const u64 dcss_video_format_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID, +}; + +static const u64 dcss_graphics_format_modifiers[] = { + DRM_FORMAT_MOD_VIVANTE_TILED, + DRM_FORMAT_MOD_VIVANTE_SUPER_TILED, + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID, +}; + +static inline struct dcss_plane *to_dcss_plane(struct drm_plane *p) +{ + return container_of(p, struct dcss_plane, base); +} + +static inline bool dcss_plane_fb_is_linear(const struct drm_framebuffer *fb) +{ + return ((fb->flags & DRM_MODE_FB_MODIFIERS) == 0) || + ((fb->flags & DRM_MODE_FB_MODIFIERS) != 0 && + fb->modifier == DRM_FORMAT_MOD_LINEAR); +} + +static void dcss_plane_destroy(struct drm_plane *plane) +{ + struct dcss_plane *dcss_plane = container_of(plane, struct dcss_plane, + base); + + drm_plane_cleanup(plane); + kfree(dcss_plane); +} + +static bool dcss_plane_format_mod_supported(struct drm_plane *plane, + u32 format, + u64 modifier) +{ + switch (plane->type) { + case DRM_PLANE_TYPE_PRIMARY: + switch (format) { + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB2101010: + return modifier == DRM_FORMAT_MOD_LINEAR || + modifier == DRM_FORMAT_MOD_VIVANTE_TILED || + modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED; + default: + return modifier == DRM_FORMAT_MOD_LINEAR; + } + break; + case DRM_PLANE_TYPE_OVERLAY: + return modifier == DRM_FORMAT_MOD_LINEAR; + default: + return false; + } +} + +static const struct drm_plane_funcs dcss_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = dcss_plane_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .format_mod_supported = dcss_plane_format_mod_supported, +}; + +static bool dcss_plane_can_rotate(const struct drm_format_info *format, + bool mod_present, u64 modifier, + unsigned int rotation) +{ + bool linear_format = !mod_present || + (mod_present && modifier == DRM_FORMAT_MOD_LINEAR); + u32 supported_rotation = DRM_MODE_ROTATE_0; + + if (!format->is_yuv && linear_format) + supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | + DRM_MODE_REFLECT_MASK; + else if (!format->is_yuv && + modifier == DRM_FORMAT_MOD_VIVANTE_TILED) + supported_rotation = DRM_MODE_ROTATE_MASK | + DRM_MODE_REFLECT_MASK; + else if (format->is_yuv && linear_format && + (format->format == DRM_FORMAT_NV12 || + format->format == DRM_FORMAT_NV21)) + supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | + DRM_MODE_REFLECT_MASK; + + return !!(rotation & supported_rotation); +} + +static bool dcss_plane_is_source_size_allowed(u16 src_w, u16 src_h, u32 pix_fmt) +{ + if (src_w < 64 && + (pix_fmt == DRM_FORMAT_NV12 || pix_fmt == DRM_FORMAT_NV21)) + return false; + else if (src_w < 32 && + (pix_fmt == DRM_FORMAT_UYVY || pix_fmt == DRM_FORMAT_VYUY || + pix_fmt == DRM_FORMAT_YUYV || pix_fmt == DRM_FORMAT_YVYU)) + return false; + + return src_w >= 16 && src_h >= 8; +} + +static int dcss_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct dcss_plane *dcss_plane = to_dcss_plane(plane); + struct dcss_dev *dcss = plane->dev->dev_private; + struct drm_framebuffer *fb = state->fb; + bool is_primary_plane = plane->type == DRM_PLANE_TYPE_PRIMARY; + struct drm_gem_cma_object *cma_obj; + struct drm_crtc_state *crtc_state; + int hdisplay, vdisplay; + int min, max; + int ret; + + if (!fb || !state->crtc) + return 0; + + cma_obj = drm_fb_cma_get_gem_obj(fb, 0); + WARN_ON(!cma_obj); + + crtc_state = drm_atomic_get_existing_crtc_state(state->state, + state->crtc); + + hdisplay = crtc_state->adjusted_mode.hdisplay; + vdisplay = crtc_state->adjusted_mode.vdisplay; + + if (!dcss_plane_is_source_size_allowed(state->src_w >> 16, + state->src_h >> 16, + fb->format->format)) { + DRM_DEBUG_KMS("Source plane size is not allowed!\n"); + return -EINVAL; + } + + dcss_scaler_get_min_max_ratios(dcss->scaler, dcss_plane->ch_num, + &min, &max); + + ret = drm_atomic_helper_check_plane_state(state, crtc_state, + min, max, !is_primary_plane, + false); + if (ret) + return ret; + + if (!state->visible) + return 0; + + if (!dcss_plane_can_rotate(fb->format, + !!(fb->flags & DRM_MODE_FB_MODIFIERS), + fb->modifier, + state->rotation)) { + DRM_DEBUG_KMS("requested rotation is not allowed!\n"); + return -EINVAL; + } + + if ((state->crtc_x < 0 || state->crtc_y < 0 || + state->crtc_x + state->crtc_w > hdisplay || + state->crtc_y + state->crtc_h > vdisplay) && + !dcss_plane_fb_is_linear(fb)) { + DRM_DEBUG_KMS("requested cropping operation is not allowed!\n"); + return -EINVAL; + } + + if ((fb->flags & DRM_MODE_FB_MODIFIERS) && + !plane->funcs->format_mod_supported(plane, + fb->format->format, + fb->modifier)) { + DRM_DEBUG_KMS("Invalid modifier: %llx", fb->modifier); + return -EINVAL; + } + + return 0; +} + +static void dcss_plane_atomic_set_base(struct dcss_plane *dcss_plane) +{ + struct drm_plane *plane = &dcss_plane->base; + struct drm_plane_state *state = plane->state; + struct dcss_dev *dcss = plane->dev->dev_private; + struct drm_framebuffer *fb = state->fb; + const struct drm_format_info *format = fb->format; + struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); + unsigned long p1_ba = 0, p2_ba = 0; + + if (!format->is_yuv || + format->format == DRM_FORMAT_NV12 || + format->format == DRM_FORMAT_NV21) + p1_ba = cma_obj->paddr + fb->offsets[0] + + fb->pitches[0] * (state->src.y1 >> 16) + + format->char_per_block[0] * (state->src.x1 >> 16); + else if (format->format == DRM_FORMAT_UYVY || + format->format == DRM_FORMAT_VYUY || + format->format == DRM_FORMAT_YUYV || + format->format == DRM_FORMAT_YVYU) + p1_ba = cma_obj->paddr + fb->offsets[0] + + fb->pitches[0] * (state->src.y1 >> 16) + + 2 * format->char_per_block[0] * (state->src.x1 >> 17); + + if (format->format == DRM_FORMAT_NV12 || + format->format == DRM_FORMAT_NV21) + p2_ba = cma_obj->paddr + fb->offsets[1] + + (((fb->pitches[1] >> 1) * (state->src.y1 >> 17) + + (state->src.x1 >> 17)) << 1); + + dcss_dpr_addr_set(dcss->dpr, dcss_plane->ch_num, p1_ba, p2_ba, + fb->pitches[0]); +} + +static bool dcss_plane_needs_setup(struct drm_plane_state *state, + struct drm_plane_state *old_state) +{ + struct drm_framebuffer *fb = state->fb; + struct drm_framebuffer *old_fb = old_state->fb; + + return state->crtc_x != old_state->crtc_x || + state->crtc_y != old_state->crtc_y || + state->crtc_w != old_state->crtc_w || + state->crtc_h != old_state->crtc_h || + state->src_x != old_state->src_x || + state->src_y != old_state->src_y || + state->src_w != old_state->src_w || + state->src_h != old_state->src_h || + fb->format->format != old_fb->format->format || + fb->modifier != old_fb->modifier || + state->rotation != old_state->rotation; +} + +static void dcss_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_plane_state *state = plane->state; + struct dcss_plane *dcss_plane = to_dcss_plane(plane); + struct dcss_dev *dcss = plane->dev->dev_private; + struct drm_framebuffer *fb = state->fb; + u32 pixel_format; + struct drm_crtc_state *crtc_state; + bool modifiers_present; + u32 src_w, src_h, dst_w, dst_h; + struct drm_rect src, dst; + bool enable = true; + + if (!fb || !state->crtc || !state->visible) + return; + + pixel_format = state->fb->format->format; + crtc_state = state->crtc->state; + modifiers_present = !!(fb->flags & DRM_MODE_FB_MODIFIERS); + + if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state) && + !dcss_plane_needs_setup(state, old_state)) { + dcss_plane_atomic_set_base(dcss_plane); + return; + } + + src = plane->state->src; + dst = plane->state->dst; + + /* + * The width and height after clipping. + */ + src_w = drm_rect_width(&src) >> 16; + src_h = drm_rect_height(&src) >> 16; + dst_w = drm_rect_width(&dst); + dst_h = drm_rect_height(&dst); + + if (plane->type == DRM_PLANE_TYPE_OVERLAY && + modifiers_present && fb->modifier == DRM_FORMAT_MOD_LINEAR) + modifiers_present = false; + + dcss_dpr_format_set(dcss->dpr, dcss_plane->ch_num, state->fb->format, + modifiers_present ? fb->modifier : + DRM_FORMAT_MOD_LINEAR); + dcss_dpr_set_res(dcss->dpr, dcss_plane->ch_num, src_w, src_h); + dcss_dpr_set_rotation(dcss->dpr, dcss_plane->ch_num, + state->rotation); + + dcss_plane_atomic_set_base(dcss_plane); + + dcss_scaler_setup(dcss->scaler, dcss_plane->ch_num, + state->fb->format, src_w, src_h, + dst_w, dst_h, + drm_mode_vrefresh(&crtc_state->mode)); + + dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num, + dst.x1, dst.y1, dst_w, dst_h); + dcss_dtg_plane_alpha_set(dcss->dtg, dcss_plane->ch_num, + fb->format, state->alpha >> 8); + + if (!dcss_plane->ch_num && (state->alpha >> 8) == 0) + enable = false; + + dcss_dpr_enable(dcss->dpr, dcss_plane->ch_num, enable); + dcss_scaler_ch_enable(dcss->scaler, dcss_plane->ch_num, enable); + + if (!enable) + dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num, + 0, 0, 0, 0); + + dcss_dtg_ch_enable(dcss->dtg, dcss_plane->ch_num, enable); +} + +static void dcss_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct dcss_plane *dcss_plane = to_dcss_plane(plane); + struct dcss_dev *dcss = plane->dev->dev_private; + + dcss_dpr_enable(dcss->dpr, dcss_plane->ch_num, false); + dcss_scaler_ch_enable(dcss->scaler, dcss_plane->ch_num, false); + dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num, 0, 0, 0, 0); + dcss_dtg_ch_enable(dcss->dtg, dcss_plane->ch_num, false); +} + +static const struct drm_plane_helper_funcs dcss_plane_helper_funcs = { + .prepare_fb = drm_gem_fb_prepare_fb, + .atomic_check = dcss_plane_atomic_check, + .atomic_update = dcss_plane_atomic_update, + .atomic_disable = dcss_plane_atomic_disable, +}; + +struct dcss_plane *dcss_plane_init(struct drm_device *drm, + unsigned int possible_crtcs, + enum drm_plane_type type, + unsigned int zpos) +{ + struct dcss_plane *dcss_plane; + const u64 *format_modifiers = dcss_video_format_modifiers; + int ret; + + if (zpos > 2) + return ERR_PTR(-EINVAL); + + dcss_plane = kzalloc(sizeof(*dcss_plane), GFP_KERNEL); + if (!dcss_plane) { + DRM_ERROR("failed to allocate plane\n"); + return ERR_PTR(-ENOMEM); + } + + if (type == DRM_PLANE_TYPE_PRIMARY) + format_modifiers = dcss_graphics_format_modifiers; + + ret = drm_universal_plane_init(drm, &dcss_plane->base, possible_crtcs, + &dcss_plane_funcs, dcss_common_formats, + ARRAY_SIZE(dcss_common_formats), + format_modifiers, type, NULL); + if (ret) { + DRM_ERROR("failed to initialize plane\n"); + kfree(dcss_plane); + return ERR_PTR(ret); + } + + drm_plane_helper_add(&dcss_plane->base, &dcss_plane_helper_funcs); + + ret = drm_plane_create_zpos_immutable_property(&dcss_plane->base, zpos); + if (ret) + return ERR_PTR(ret); + + drm_plane_create_rotation_property(&dcss_plane->base, + DRM_MODE_ROTATE_0, + DRM_MODE_ROTATE_0 | + DRM_MODE_ROTATE_90 | + DRM_MODE_ROTATE_180 | + DRM_MODE_ROTATE_270 | + DRM_MODE_REFLECT_X | + DRM_MODE_REFLECT_Y); + + dcss_plane->ch_num = zpos; + + return dcss_plane; +} diff --git a/drivers/gpu/drm/imx/dcss/dcss-scaler.c b/drivers/gpu/drm/imx/dcss/dcss-scaler.c new file mode 100644 index 000000000000..cd21905de580 --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-scaler.c @@ -0,0 +1,826 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + * + * Scaling algorithms were contributed by Dzung Hoang <dzung.hoang@nxp.com> + */ + +#include <linux/device.h> +#include <linux/slab.h> + +#include "dcss-dev.h" + +#define DCSS_SCALER_CTRL 0x00 +#define SCALER_EN BIT(0) +#define REPEAT_EN BIT(4) +#define SCALE2MEM_EN BIT(8) +#define MEM2OFIFO_EN BIT(12) +#define DCSS_SCALER_OFIFO_CTRL 0x04 +#define OFIFO_LOW_THRES_POS 0 +#define OFIFO_LOW_THRES_MASK GENMASK(9, 0) +#define OFIFO_HIGH_THRES_POS 16 +#define OFIFO_HIGH_THRES_MASK GENMASK(25, 16) +#define UNDERRUN_DETECT_CLR BIT(26) +#define LOW_THRES_DETECT_CLR BIT(27) +#define HIGH_THRES_DETECT_CLR BIT(28) +#define UNDERRUN_DETECT_EN BIT(29) +#define LOW_THRES_DETECT_EN BIT(30) +#define HIGH_THRES_DETECT_EN BIT(31) +#define DCSS_SCALER_SDATA_CTRL 0x08 +#define YUV_EN BIT(0) +#define RTRAM_8LINES BIT(1) +#define Y_UV_BYTE_SWAP BIT(4) +#define A2R10G10B10_FORMAT_POS 8 +#define A2R10G10B10_FORMAT_MASK GENMASK(11, 8) +#define DCSS_SCALER_BIT_DEPTH 0x0C +#define LUM_BIT_DEPTH_POS 0 +#define LUM_BIT_DEPTH_MASK GENMASK(1, 0) +#define CHR_BIT_DEPTH_POS 4 +#define CHR_BIT_DEPTH_MASK GENMASK(5, 4) +#define DCSS_SCALER_SRC_FORMAT 0x10 +#define DCSS_SCALER_DST_FORMAT 0x14 +#define FORMAT_MASK GENMASK(1, 0) +#define DCSS_SCALER_SRC_LUM_RES 0x18 +#define DCSS_SCALER_SRC_CHR_RES 0x1C +#define DCSS_SCALER_DST_LUM_RES 0x20 +#define DCSS_SCALER_DST_CHR_RES 0x24 +#define WIDTH_POS 0 +#define WIDTH_MASK GENMASK(11, 0) +#define HEIGHT_POS 16 +#define HEIGHT_MASK GENMASK(27, 16) +#define DCSS_SCALER_V_LUM_START 0x48 +#define V_START_MASK GENMASK(15, 0) +#define DCSS_SCALER_V_LUM_INC 0x4C +#define V_INC_MASK GENMASK(15, 0) +#define DCSS_SCALER_H_LUM_START 0x50 +#define H_START_MASK GENMASK(18, 0) +#define DCSS_SCALER_H_LUM_INC 0x54 +#define H_INC_MASK GENMASK(15, 0) +#define DCSS_SCALER_V_CHR_START 0x58 +#define DCSS_SCALER_V_CHR_INC 0x5C +#define DCSS_SCALER_H_CHR_START 0x60 +#define DCSS_SCALER_H_CHR_INC 0x64 +#define DCSS_SCALER_COEF_VLUM 0x80 +#define DCSS_SCALER_COEF_HLUM 0x140 +#define DCSS_SCALER_COEF_VCHR 0x200 +#define DCSS_SCALER_COEF_HCHR 0x300 + +struct dcss_scaler_ch { + void __iomem *base_reg; + u32 base_ofs; + struct dcss_scaler *scl; + + u32 sdata_ctrl; + u32 scaler_ctrl; + + bool scaler_ctrl_chgd; + + u32 c_vstart; + u32 c_hstart; +}; + +struct dcss_scaler { + struct device *dev; + + struct dcss_ctxld *ctxld; + u32 ctx_id; + + struct dcss_scaler_ch ch[3]; +}; + +/* scaler coefficients generator */ +#define PSC_FRAC_BITS 30 +#define PSC_FRAC_SCALE BIT(PSC_FRAC_BITS) +#define PSC_BITS_FOR_PHASE 4 +#define PSC_NUM_PHASES 16 +#define PSC_STORED_PHASES (PSC_NUM_PHASES / 2 + 1) +#define PSC_NUM_TAPS 7 +#define PSC_NUM_TAPS_RGBA 5 +#define PSC_COEFF_PRECISION 10 +#define PSC_PHASE_FRACTION_BITS 13 +#define PSC_PHASE_MASK (PSC_NUM_PHASES - 1) +#define PSC_Q_FRACTION 19 +#define PSC_Q_ROUND_OFFSET (1 << (PSC_Q_FRACTION - 1)) + +/** + * mult_q() - Performs fixed-point multiplication. + * @A: multiplier + * @B: multiplicand + */ +static int mult_q(int A, int B) +{ + int result; + s64 temp; + + temp = (int64_t)A * (int64_t)B; + temp += PSC_Q_ROUND_OFFSET; + result = (int)(temp >> PSC_Q_FRACTION); + return result; +} + +/** + * div_q() - Performs fixed-point division. + * @A: dividend + * @B: divisor + */ +static int div_q(int A, int B) +{ + int result; + s64 temp; + + temp = (int64_t)A << PSC_Q_FRACTION; + if ((temp >= 0 && B >= 0) || (temp < 0 && B < 0)) + temp += B / 2; + else + temp -= B / 2; + + result = (int)(temp / B); + return result; +} + +/** + * exp_approx_q() - Compute approximation to exp(x) function using Taylor + * series. + * @x: fixed-point argument of exp function + */ +static int exp_approx_q(int x) +{ + int sum = 1 << PSC_Q_FRACTION; + int term = 1 << PSC_Q_FRACTION; + + term = mult_q(term, div_q(x, 1 << PSC_Q_FRACTION)); + sum += term; + term = mult_q(term, div_q(x, 2 << PSC_Q_FRACTION)); + sum += term; + term = mult_q(term, div_q(x, 3 << PSC_Q_FRACTION)); + sum += term; + term = mult_q(term, div_q(x, 4 << PSC_Q_FRACTION)); + sum += term; + + return sum; +} + +/** + * dcss_scaler_gaussian_filter() - Generate gaussian prototype filter. + * @fc_q: fixed-point cutoff frequency normalized to range [0, 1] + * @use_5_taps: indicates whether to use 5 taps or 7 taps + * @coef: output filter coefficients + */ +static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps, + bool phase0_identity, + int coef[][PSC_NUM_TAPS]) +{ + int sigma_q, g0_q, g1_q, g2_q; + int tap_cnt1, tap_cnt2, tap_idx, phase_cnt; + int mid; + int phase; + int i; + int taps; + + if (use_5_taps) + for (phase = 0; phase < PSC_STORED_PHASES; phase++) { + coef[phase][0] = 0; + coef[phase][PSC_NUM_TAPS - 1] = 0; + } + + /* seed coefficient scanner */ + taps = use_5_taps ? PSC_NUM_TAPS_RGBA : PSC_NUM_TAPS; + mid = (PSC_NUM_PHASES * taps) / 2 - 1; + phase_cnt = (PSC_NUM_PHASES * (PSC_NUM_TAPS + 1)) / 2; + tap_cnt1 = (PSC_NUM_PHASES * PSC_NUM_TAPS) / 2; + tap_cnt2 = (PSC_NUM_PHASES * PSC_NUM_TAPS) / 2; + + /* seed gaussian filter generator */ + sigma_q = div_q(PSC_Q_ROUND_OFFSET, fc_q); + g0_q = 1 << PSC_Q_FRACTION; + g1_q = exp_approx_q(div_q(-PSC_Q_ROUND_OFFSET, + mult_q(sigma_q, sigma_q))); + g2_q = mult_q(g1_q, g1_q); + coef[phase_cnt & PSC_PHASE_MASK][tap_cnt1 >> PSC_BITS_FOR_PHASE] = g0_q; + + for (i = 0; i < mid; i++) { + phase_cnt++; + tap_cnt1--; + tap_cnt2++; + + g0_q = mult_q(g0_q, g1_q); + g1_q = mult_q(g1_q, g2_q); + + if ((phase_cnt & PSC_PHASE_MASK) <= 8) { + tap_idx = tap_cnt1 >> PSC_BITS_FOR_PHASE; + coef[phase_cnt & PSC_PHASE_MASK][tap_idx] = g0_q; + } + if (((-phase_cnt) & PSC_PHASE_MASK) <= 8) { + tap_idx = tap_cnt2 >> PSC_BITS_FOR_PHASE; + coef[(-phase_cnt) & PSC_PHASE_MASK][tap_idx] = g0_q; + } + } + + phase_cnt++; + tap_cnt1--; + coef[phase_cnt & PSC_PHASE_MASK][tap_cnt1 >> PSC_BITS_FOR_PHASE] = 0; + + /* override phase 0 with identity filter if specified */ + if (phase0_identity) + for (i = 0; i < PSC_NUM_TAPS; i++) + coef[0][i] = i == (PSC_NUM_TAPS >> 1) ? + (1 << PSC_COEFF_PRECISION) : 0; + + /* normalize coef */ + for (phase = 0; phase < PSC_STORED_PHASES; phase++) { + int sum = 0; + s64 ll_temp; + + for (i = 0; i < PSC_NUM_TAPS; i++) + sum += coef[phase][i]; + for (i = 0; i < PSC_NUM_TAPS; i++) { + ll_temp = coef[phase][i]; + ll_temp <<= PSC_COEFF_PRECISION; + ll_temp += sum >> 1; + ll_temp /= sum; + coef[phase][i] = (int)ll_temp; + } + } +} + +/** + * dcss_scaler_filter_design() - Compute filter coefficients using + * Gaussian filter. + * @src_length: length of input + * @dst_length: length of output + * @use_5_taps: 0 for 7 taps per phase, 1 for 5 taps + * @coef: output coefficients + */ +static void dcss_scaler_filter_design(int src_length, int dst_length, + bool use_5_taps, bool phase0_identity, + int coef[][PSC_NUM_TAPS]) +{ + int fc_q; + + /* compute cutoff frequency */ + if (dst_length >= src_length) + fc_q = div_q(1, PSC_NUM_PHASES); + else + fc_q = div_q(dst_length, src_length * PSC_NUM_PHASES); + + /* compute gaussian filter coefficients */ + dcss_scaler_gaussian_filter(fc_q, use_5_taps, phase0_identity, coef); +} + +static void dcss_scaler_write(struct dcss_scaler_ch *ch, u32 val, u32 ofs) +{ + struct dcss_scaler *scl = ch->scl; + + dcss_ctxld_write(scl->ctxld, scl->ctx_id, val, ch->base_ofs + ofs); +} + +static int dcss_scaler_ch_init_all(struct dcss_scaler *scl, + unsigned long scaler_base) +{ + struct dcss_scaler_ch *ch; + int i; + + for (i = 0; i < 3; i++) { + ch = &scl->ch[i]; + + ch->base_ofs = scaler_base + i * 0x400; + + ch->base_reg = ioremap(ch->base_ofs, SZ_4K); + if (!ch->base_reg) { + dev_err(scl->dev, "scaler: unable to remap ch base\n"); + return -ENOMEM; + } + + ch->scl = scl; + } + + return 0; +} + +int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base) +{ + struct dcss_scaler *scaler; + + scaler = kzalloc(sizeof(*scaler), GFP_KERNEL); + if (!scaler) + return -ENOMEM; + + dcss->scaler = scaler; + scaler->dev = dcss->dev; + scaler->ctxld = dcss->ctxld; + scaler->ctx_id = CTX_SB_HP; + + if (dcss_scaler_ch_init_all(scaler, scaler_base)) { + int i; + + for (i = 0; i < 3; i++) { + if (scaler->ch[i].base_reg) + iounmap(scaler->ch[i].base_reg); + } + + kfree(scaler); + + return -ENOMEM; + } + + return 0; +} + +void dcss_scaler_exit(struct dcss_scaler *scl) +{ + int ch_no; + + for (ch_no = 0; ch_no < 3; ch_no++) { + struct dcss_scaler_ch *ch = &scl->ch[ch_no]; + + dcss_writel(0, ch->base_reg + DCSS_SCALER_CTRL); + + if (ch->base_reg) + iounmap(ch->base_reg); + } + + kfree(scl); +} + +void dcss_scaler_ch_enable(struct dcss_scaler *scl, int ch_num, bool en) +{ + struct dcss_scaler_ch *ch = &scl->ch[ch_num]; + u32 scaler_ctrl; + + scaler_ctrl = en ? SCALER_EN | REPEAT_EN : 0; + + if (en) + dcss_scaler_write(ch, ch->sdata_ctrl, DCSS_SCALER_SDATA_CTRL); + + if (ch->scaler_ctrl != scaler_ctrl) + ch->scaler_ctrl_chgd = true; + + ch->scaler_ctrl = scaler_ctrl; +} + +static void dcss_scaler_yuv_enable(struct dcss_scaler_ch *ch, bool en) +{ + ch->sdata_ctrl &= ~YUV_EN; + ch->sdata_ctrl |= en ? YUV_EN : 0; +} + +static void dcss_scaler_rtr_8lines_enable(struct dcss_scaler_ch *ch, bool en) +{ + ch->sdata_ctrl &= ~RTRAM_8LINES; + ch->sdata_ctrl |= en ? RTRAM_8LINES : 0; +} + +static void dcss_scaler_bit_depth_set(struct dcss_scaler_ch *ch, int depth) +{ + u32 val; + + val = depth == 30 ? 2 : 0; + + dcss_scaler_write(ch, + ((val << CHR_BIT_DEPTH_POS) & CHR_BIT_DEPTH_MASK) | + ((val << LUM_BIT_DEPTH_POS) & LUM_BIT_DEPTH_MASK), + DCSS_SCALER_BIT_DEPTH); +} + +enum buffer_format { + BUF_FMT_YUV420, + BUF_FMT_YUV422, + BUF_FMT_ARGB8888_YUV444, +}; + +enum chroma_location { + PSC_LOC_HORZ_0_VERT_1_OVER_4 = 0, + PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4 = 1, + PSC_LOC_HORZ_0_VERT_0 = 2, + PSC_LOC_HORZ_1_OVER_4_VERT_0 = 3, + PSC_LOC_HORZ_0_VERT_1_OVER_2 = 4, + PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2 = 5 +}; + +static void dcss_scaler_format_set(struct dcss_scaler_ch *ch, + enum buffer_format src_fmt, + enum buffer_format dst_fmt) +{ + dcss_scaler_write(ch, src_fmt, DCSS_SCALER_SRC_FORMAT); + dcss_scaler_write(ch, dst_fmt, DCSS_SCALER_DST_FORMAT); +} + +static void dcss_scaler_res_set(struct dcss_scaler_ch *ch, + int src_xres, int src_yres, + int dst_xres, int dst_yres, + u32 pix_format, enum buffer_format dst_format) +{ + u32 lsrc_xres, lsrc_yres, csrc_xres, csrc_yres; + u32 ldst_xres, ldst_yres, cdst_xres, cdst_yres; + bool src_is_444 = true; + + lsrc_xres = src_xres; + csrc_xres = src_xres; + lsrc_yres = src_yres; + csrc_yres = src_yres; + ldst_xres = dst_xres; + cdst_xres = dst_xres; + ldst_yres = dst_yres; + cdst_yres = dst_yres; + + if (pix_format == DRM_FORMAT_UYVY || pix_format == DRM_FORMAT_VYUY || + pix_format == DRM_FORMAT_YUYV || pix_format == DRM_FORMAT_YVYU) { + csrc_xres >>= 1; + src_is_444 = false; + } else if (pix_format == DRM_FORMAT_NV12 || + pix_format == DRM_FORMAT_NV21) { + csrc_xres >>= 1; + csrc_yres >>= 1; + src_is_444 = false; + } + + if (dst_format == BUF_FMT_YUV422) + cdst_xres >>= 1; + + /* for 4:4:4 to 4:2:2 conversion, source height should be 1 less */ + if (src_is_444 && dst_format == BUF_FMT_YUV422) { + lsrc_yres--; + csrc_yres--; + } + + dcss_scaler_write(ch, (((lsrc_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) | + (((lsrc_xres - 1) << WIDTH_POS) & WIDTH_MASK), + DCSS_SCALER_SRC_LUM_RES); + dcss_scaler_write(ch, (((csrc_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) | + (((csrc_xres - 1) << WIDTH_POS) & WIDTH_MASK), + DCSS_SCALER_SRC_CHR_RES); + dcss_scaler_write(ch, (((ldst_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) | + (((ldst_xres - 1) << WIDTH_POS) & WIDTH_MASK), + DCSS_SCALER_DST_LUM_RES); + dcss_scaler_write(ch, (((cdst_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) | + (((cdst_xres - 1) << WIDTH_POS) & WIDTH_MASK), + DCSS_SCALER_DST_CHR_RES); +} + +#define downscale_fp(factor, fp_pos) ((factor) << (fp_pos)) +#define upscale_fp(factor, fp_pos) ((1 << (fp_pos)) / (factor)) + +struct dcss_scaler_factors { + int downscale; + int upscale; +}; + +static const struct dcss_scaler_factors dcss_scaler_factors[] = { + {3, 8}, {5, 8}, {5, 8}, +}; + +static void dcss_scaler_fractions_set(struct dcss_scaler_ch *ch, + int src_xres, int src_yres, + int dst_xres, int dst_yres, + u32 src_format, u32 dst_format, + enum chroma_location src_chroma_loc) +{ + int src_c_xres, src_c_yres, dst_c_xres, dst_c_yres; + u32 l_vinc, l_hinc, c_vinc, c_hinc; + u32 c_vstart, c_hstart; + + src_c_xres = src_xres; + src_c_yres = src_yres; + dst_c_xres = dst_xres; + dst_c_yres = dst_yres; + + c_vstart = 0; + c_hstart = 0; + + /* adjustments for source chroma location */ + if (src_format == BUF_FMT_YUV420) { + /* vertical input chroma position adjustment */ + switch (src_chroma_loc) { + case PSC_LOC_HORZ_0_VERT_1_OVER_4: + case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4: + /* + * move chroma up to first luma line + * (1/4 chroma input line spacing) + */ + c_vstart -= (1 << (PSC_PHASE_FRACTION_BITS - 2)); + break; + case PSC_LOC_HORZ_0_VERT_1_OVER_2: + case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2: + /* + * move chroma up to first luma line + * (1/2 chroma input line spacing) + */ + c_vstart -= (1 << (PSC_PHASE_FRACTION_BITS - 1)); + break; + default: + break; + } + /* horizontal input chroma position adjustment */ + switch (src_chroma_loc) { + case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4: + case PSC_LOC_HORZ_1_OVER_4_VERT_0: + case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2: + /* move chroma left 1/4 chroma input sample spacing */ + c_hstart -= (1 << (PSC_PHASE_FRACTION_BITS - 2)); + break; + default: + break; + } + } + + /* adjustments to chroma resolution */ + if (src_format == BUF_FMT_YUV420) { + src_c_xres >>= 1; + src_c_yres >>= 1; + } else if (src_format == BUF_FMT_YUV422) { + src_c_xres >>= 1; + } + + if (dst_format == BUF_FMT_YUV422) + dst_c_xres >>= 1; + + l_vinc = ((src_yres << 13) + (dst_yres >> 1)) / dst_yres; + c_vinc = ((src_c_yres << 13) + (dst_c_yres >> 1)) / dst_c_yres; + l_hinc = ((src_xres << 13) + (dst_xres >> 1)) / dst_xres; + c_hinc = ((src_c_xres << 13) + (dst_c_xres >> 1)) / dst_c_xres; + + /* save chroma start phase */ + ch->c_vstart = c_vstart; + ch->c_hstart = c_hstart; + + dcss_scaler_write(ch, 0, DCSS_SCALER_V_LUM_START); + dcss_scaler_write(ch, l_vinc, DCSS_SCALER_V_LUM_INC); + + dcss_scaler_write(ch, 0, DCSS_SCALER_H_LUM_START); + dcss_scaler_write(ch, l_hinc, DCSS_SCALER_H_LUM_INC); + + dcss_scaler_write(ch, c_vstart, DCSS_SCALER_V_CHR_START); + dcss_scaler_write(ch, c_vinc, DCSS_SCALER_V_CHR_INC); + + dcss_scaler_write(ch, c_hstart, DCSS_SCALER_H_CHR_START); + dcss_scaler_write(ch, c_hinc, DCSS_SCALER_H_CHR_INC); +} + +int dcss_scaler_get_min_max_ratios(struct dcss_scaler *scl, int ch_num, + int *min, int *max) +{ + *min = upscale_fp(dcss_scaler_factors[ch_num].upscale, 16); + *max = downscale_fp(dcss_scaler_factors[ch_num].downscale, 16); + + return 0; +} + +static void dcss_scaler_program_5_coef_set(struct dcss_scaler_ch *ch, + int base_addr, + int coef[][PSC_NUM_TAPS]) +{ + int i, phase; + + for (i = 0; i < PSC_STORED_PHASES; i++) { + dcss_scaler_write(ch, ((coef[i][1] & 0xfff) << 16 | + (coef[i][2] & 0xfff) << 4 | + (coef[i][3] & 0xf00) >> 8), + base_addr + i * sizeof(u32)); + dcss_scaler_write(ch, ((coef[i][3] & 0x0ff) << 20 | + (coef[i][4] & 0xfff) << 8 | + (coef[i][5] & 0xff0) >> 4), + base_addr + 0x40 + i * sizeof(u32)); + dcss_scaler_write(ch, ((coef[i][5] & 0x00f) << 24), + base_addr + 0x80 + i * sizeof(u32)); + } + + /* reverse both phase and tap orderings */ + for (phase = (PSC_NUM_PHASES >> 1) - 1; + i < PSC_NUM_PHASES; i++, phase--) { + dcss_scaler_write(ch, ((coef[phase][5] & 0xfff) << 16 | + (coef[phase][4] & 0xfff) << 4 | + (coef[phase][3] & 0xf00) >> 8), + base_addr + i * sizeof(u32)); + dcss_scaler_write(ch, ((coef[phase][3] & 0x0ff) << 20 | + (coef[phase][2] & 0xfff) << 8 | + (coef[phase][1] & 0xff0) >> 4), + base_addr + 0x40 + i * sizeof(u32)); + dcss_scaler_write(ch, ((coef[phase][1] & 0x00f) << 24), + base_addr + 0x80 + i * sizeof(u32)); + } +} + +static void dcss_scaler_program_7_coef_set(struct dcss_scaler_ch *ch, + int base_addr, + int coef[][PSC_NUM_TAPS]) +{ + int i, phase; + + for (i = 0; i < PSC_STORED_PHASES; i++) { + dcss_scaler_write(ch, ((coef[i][0] & 0xfff) << 16 | + (coef[i][1] & 0xfff) << 4 | + (coef[i][2] & 0xf00) >> 8), + base_addr + i * sizeof(u32)); + dcss_scaler_write(ch, ((coef[i][2] & 0x0ff) << 20 | + (coef[i][3] & 0xfff) << 8 | + (coef[i][4] & 0xff0) >> 4), + base_addr + 0x40 + i * sizeof(u32)); + dcss_scaler_write(ch, ((coef[i][4] & 0x00f) << 24 | + (coef[i][5] & 0xfff) << 12 | + (coef[i][6] & 0xfff)), + base_addr + 0x80 + i * sizeof(u32)); + } + + /* reverse both phase and tap orderings */ + for (phase = (PSC_NUM_PHASES >> 1) - 1; + i < PSC_NUM_PHASES; i++, phase--) { + dcss_scaler_write(ch, ((coef[phase][6] & 0xfff) << 16 | + (coef[phase][5] & 0xfff) << 4 | + (coef[phase][4] & 0xf00) >> 8), + base_addr + i * sizeof(u32)); + dcss_scaler_write(ch, ((coef[phase][4] & 0x0ff) << 20 | + (coef[phase][3] & 0xfff) << 8 | + (coef[phase][2] & 0xff0) >> 4), + base_addr + 0x40 + i * sizeof(u32)); + dcss_scaler_write(ch, ((coef[phase][2] & 0x00f) << 24 | + (coef[phase][1] & 0xfff) << 12 | + (coef[phase][0] & 0xfff)), + base_addr + 0x80 + i * sizeof(u32)); + } +} + +static void dcss_scaler_yuv_coef_set(struct dcss_scaler_ch *ch, + enum buffer_format src_format, + enum buffer_format dst_format, + bool use_5_taps, + int src_xres, int src_yres, int dst_xres, + int dst_yres) +{ + int coef[PSC_STORED_PHASES][PSC_NUM_TAPS]; + bool program_5_taps = use_5_taps || + (dst_format == BUF_FMT_YUV422 && + src_format == BUF_FMT_ARGB8888_YUV444); + + /* horizontal luma */ + dcss_scaler_filter_design(src_xres, dst_xres, false, + src_xres == dst_xres, coef); + dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef); + + /* vertical luma */ + dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps, + src_yres == dst_yres, coef); + + if (program_5_taps) + dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef); + else + dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef); + + /* adjust chroma resolution */ + if (src_format != BUF_FMT_ARGB8888_YUV444) + src_xres >>= 1; + if (src_format == BUF_FMT_YUV420) + src_yres >>= 1; + if (dst_format != BUF_FMT_ARGB8888_YUV444) + dst_xres >>= 1; + if (dst_format == BUF_FMT_YUV420) /* should not happen */ + dst_yres >>= 1; + + /* horizontal chroma */ + dcss_scaler_filter_design(src_xres, dst_xres, false, + (src_xres == dst_xres) && (ch->c_hstart == 0), + coef); + + dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HCHR, coef); + + /* vertical chroma */ + dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps, + (src_yres == dst_yres) && (ch->c_vstart == 0), + coef); + if (program_5_taps) + dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef); + else + dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef); +} + +static void dcss_scaler_rgb_coef_set(struct dcss_scaler_ch *ch, + int src_xres, int src_yres, int dst_xres, + int dst_yres) +{ + int coef[PSC_STORED_PHASES][PSC_NUM_TAPS]; + + /* horizontal RGB */ + dcss_scaler_filter_design(src_xres, dst_xres, false, + src_xres == dst_xres, coef); + dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef); + + /* vertical RGB */ + dcss_scaler_filter_design(src_yres, dst_yres, false, + src_yres == dst_yres, coef); + dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef); +} + +static void dcss_scaler_set_rgb10_order(struct dcss_scaler_ch *ch, + const struct drm_format_info *format) +{ + u32 a2r10g10b10_format; + + if (format->is_yuv) + return; + + ch->sdata_ctrl &= ~A2R10G10B10_FORMAT_MASK; + + if (format->depth != 30) + return; + + switch (format->format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_XRGB2101010: + a2r10g10b10_format = 0; + break; + + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_XBGR2101010: + a2r10g10b10_format = 5; + break; + + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_RGBX1010102: + a2r10g10b10_format = 6; + break; + + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_BGRX1010102: + a2r10g10b10_format = 11; + break; + + default: + a2r10g10b10_format = 0; + break; + } + + ch->sdata_ctrl |= a2r10g10b10_format << A2R10G10B10_FORMAT_POS; +} + +void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num, + const struct drm_format_info *format, + int src_xres, int src_yres, int dst_xres, int dst_yres, + u32 vrefresh_hz) +{ + struct dcss_scaler_ch *ch = &scl->ch[ch_num]; + unsigned int pixel_depth = 0; + bool rtr_8line_en = false; + bool use_5_taps = false; + enum buffer_format src_format = BUF_FMT_ARGB8888_YUV444; + enum buffer_format dst_format = BUF_FMT_ARGB8888_YUV444; + u32 pix_format = format->format; + + if (format->is_yuv) { + dcss_scaler_yuv_enable(ch, true); + + if (pix_format == DRM_FORMAT_NV12 || + pix_format == DRM_FORMAT_NV21) { + rtr_8line_en = true; + src_format = BUF_FMT_YUV420; + } else if (pix_format == DRM_FORMAT_UYVY || + pix_format == DRM_FORMAT_VYUY || + pix_format == DRM_FORMAT_YUYV || + pix_format == DRM_FORMAT_YVYU) { + src_format = BUF_FMT_YUV422; + } + + use_5_taps = !rtr_8line_en; + } else { + dcss_scaler_yuv_enable(ch, false); + + pixel_depth = format->depth; + } + + dcss_scaler_fractions_set(ch, src_xres, src_yres, dst_xres, + dst_yres, src_format, dst_format, + PSC_LOC_HORZ_0_VERT_1_OVER_4); + + if (format->is_yuv) + dcss_scaler_yuv_coef_set(ch, src_format, dst_format, + use_5_taps, src_xres, src_yres, + dst_xres, dst_yres); + else + dcss_scaler_rgb_coef_set(ch, src_xres, src_yres, + dst_xres, dst_yres); + + dcss_scaler_rtr_8lines_enable(ch, rtr_8line_en); + dcss_scaler_bit_depth_set(ch, pixel_depth); + dcss_scaler_set_rgb10_order(ch, format); + dcss_scaler_format_set(ch, src_format, dst_format); + dcss_scaler_res_set(ch, src_xres, src_yres, dst_xres, dst_yres, + pix_format, dst_format); +} + +/* This function will be called from interrupt context. */ +void dcss_scaler_write_sclctrl(struct dcss_scaler *scl) +{ + int chnum; + + dcss_ctxld_assert_locked(scl->ctxld); + + for (chnum = 0; chnum < 3; chnum++) { + struct dcss_scaler_ch *ch = &scl->ch[chnum]; + + if (ch->scaler_ctrl_chgd) { + dcss_ctxld_write_irqsafe(scl->ctxld, scl->ctx_id, + ch->scaler_ctrl, + ch->base_ofs + + DCSS_SCALER_CTRL); + ch->scaler_ctrl_chgd = false; + } + } +} diff --git a/drivers/gpu/drm/imx/dcss/dcss-ss.c b/drivers/gpu/drm/imx/dcss/dcss-ss.c new file mode 100644 index 000000000000..8ddf08da911b --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-ss.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <linux/device.h> +#include <linux/slab.h> + +#include "dcss-dev.h" + +#define DCSS_SS_SYS_CTRL 0x00 +#define RUN_EN BIT(0) +#define DCSS_SS_DISPLAY 0x10 +#define LRC_X_POS 0 +#define LRC_X_MASK GENMASK(12, 0) +#define LRC_Y_POS 16 +#define LRC_Y_MASK GENMASK(28, 16) +#define DCSS_SS_HSYNC 0x20 +#define DCSS_SS_VSYNC 0x30 +#define SYNC_START_POS 0 +#define SYNC_START_MASK GENMASK(12, 0) +#define SYNC_END_POS 16 +#define SYNC_END_MASK GENMASK(28, 16) +#define SYNC_POL BIT(31) +#define DCSS_SS_DE_ULC 0x40 +#define ULC_X_POS 0 +#define ULC_X_MASK GENMASK(12, 0) +#define ULC_Y_POS 16 +#define ULC_Y_MASK GENMASK(28, 16) +#define ULC_POL BIT(31) +#define DCSS_SS_DE_LRC 0x50 +#define DCSS_SS_MODE 0x60 +#define PIPE_MODE_POS 0 +#define PIPE_MODE_MASK GENMASK(1, 0) +#define DCSS_SS_COEFF 0x70 +#define HORIZ_A_POS 0 +#define HORIZ_A_MASK GENMASK(3, 0) +#define HORIZ_B_POS 4 +#define HORIZ_B_MASK GENMASK(7, 4) +#define HORIZ_C_POS 8 +#define HORIZ_C_MASK GENMASK(11, 8) +#define HORIZ_H_NORM_POS 12 +#define HORIZ_H_NORM_MASK GENMASK(14, 12) +#define VERT_A_POS 16 +#define VERT_A_MASK GENMASK(19, 16) +#define VERT_B_POS 20 +#define VERT_B_MASK GENMASK(23, 20) +#define VERT_C_POS 24 +#define VERT_C_MASK GENMASK(27, 24) +#define VERT_H_NORM_POS 28 +#define VERT_H_NORM_MASK GENMASK(30, 28) +#define DCSS_SS_CLIP_CB 0x80 +#define DCSS_SS_CLIP_CR 0x90 +#define CLIP_MIN_POS 0 +#define CLIP_MIN_MASK GENMASK(9, 0) +#define CLIP_MAX_POS 0 +#define CLIP_MAX_MASK GENMASK(23, 16) +#define DCSS_SS_INTER_MODE 0xA0 +#define INT_EN BIT(0) +#define VSYNC_SHIFT BIT(1) + +struct dcss_ss { + struct device *dev; + void __iomem *base_reg; + u32 base_ofs; + + struct dcss_ctxld *ctxld; + u32 ctx_id; + + bool in_use; +}; + +static void dcss_ss_write(struct dcss_ss *ss, u32 val, u32 ofs) +{ + if (!ss->in_use) + dcss_writel(val, ss->base_reg + ofs); + + dcss_ctxld_write(ss->ctxld, ss->ctx_id, val, + ss->base_ofs + ofs); +} + +int dcss_ss_init(struct dcss_dev *dcss, unsigned long ss_base) +{ + struct dcss_ss *ss; + + ss = kzalloc(sizeof(*ss), GFP_KERNEL); + if (!ss) + return -ENOMEM; + + dcss->ss = ss; + ss->dev = dcss->dev; + ss->ctxld = dcss->ctxld; + + ss->base_reg = ioremap(ss_base, SZ_4K); + if (!ss->base_reg) { + dev_err(dcss->dev, "ss: unable to remap ss base\n"); + kfree(ss); + return -ENOMEM; + } + + ss->base_ofs = ss_base; + ss->ctx_id = CTX_SB_HP; + + return 0; +} + +void dcss_ss_exit(struct dcss_ss *ss) +{ + /* stop SS */ + dcss_writel(0, ss->base_reg + DCSS_SS_SYS_CTRL); + + if (ss->base_reg) + iounmap(ss->base_reg); + + kfree(ss); +} + +void dcss_ss_subsam_set(struct dcss_ss *ss) +{ + dcss_ss_write(ss, 0x41614161, DCSS_SS_COEFF); + dcss_ss_write(ss, 0, DCSS_SS_MODE); + dcss_ss_write(ss, 0x03ff0000, DCSS_SS_CLIP_CB); + dcss_ss_write(ss, 0x03ff0000, DCSS_SS_CLIP_CR); +} + +void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm, + bool phsync, bool pvsync) +{ + u16 lrc_x, lrc_y; + u16 hsync_start, hsync_end; + u16 vsync_start, vsync_end; + u16 de_ulc_x, de_ulc_y; + u16 de_lrc_x, de_lrc_y; + + lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len + + vm->hactive - 1; + lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len + + vm->vactive - 1; + + dcss_ss_write(ss, (lrc_y << LRC_Y_POS) | lrc_x, DCSS_SS_DISPLAY); + + hsync_start = vm->hfront_porch + vm->hback_porch + vm->hsync_len + + vm->hactive - 1; + hsync_end = vm->hsync_len - 1; + + dcss_ss_write(ss, (phsync ? SYNC_POL : 0) | + ((u32)hsync_end << SYNC_END_POS) | hsync_start, + DCSS_SS_HSYNC); + + vsync_start = vm->vfront_porch - 1; + vsync_end = vm->vfront_porch + vm->vsync_len - 1; + + dcss_ss_write(ss, (pvsync ? SYNC_POL : 0) | + ((u32)vsync_end << SYNC_END_POS) | vsync_start, + DCSS_SS_VSYNC); + + de_ulc_x = vm->hsync_len + vm->hback_porch - 1; + de_ulc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch; + + dcss_ss_write(ss, SYNC_POL | ((u32)de_ulc_y << ULC_Y_POS) | de_ulc_x, + DCSS_SS_DE_ULC); + + de_lrc_x = vm->hsync_len + vm->hback_porch + vm->hactive - 1; + de_lrc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch + + vm->vactive - 1; + + dcss_ss_write(ss, (de_lrc_y << LRC_Y_POS) | de_lrc_x, DCSS_SS_DE_LRC); +} + +void dcss_ss_enable(struct dcss_ss *ss) +{ + dcss_ss_write(ss, RUN_EN, DCSS_SS_SYS_CTRL); + ss->in_use = true; +} + +void dcss_ss_shutoff(struct dcss_ss *ss) +{ + dcss_writel(0, ss->base_reg + DCSS_SS_SYS_CTRL); + ss->in_use = false; +} diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index a3d1617d7c67..937d080f5d06 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -9,6 +9,8 @@ #include <linux/component.h> #include <linux/clk.h> #include <linux/dma-mapping.h> +#include <linux/dma-noncoherent.h> +#include <linux/io.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/platform_device.h> @@ -19,6 +21,7 @@ #include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_fb_cma_helper.h> @@ -76,6 +79,11 @@ static const u32 ingenic_drm_primary_formats[] = { DRM_FORMAT_XRGB8888, }; +static bool ingenic_drm_cached_gem_buf; +module_param_named(cached_gem_buffers, ingenic_drm_cached_gem_buf, bool, 0400); +MODULE_PARM_DESC(cached_gem_buffers, + "Enable fully cached GEM buffers [default=false]"); + static bool ingenic_drm_writeable_reg(struct device *dev, unsigned int reg) { switch (reg) { @@ -338,6 +346,8 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane, plane->state->fb->format->format != state->fb->format->format)) crtc_state->mode_changed = true; + drm_atomic_helper_check_plane_damage(state->state, state); + return 0; } @@ -440,6 +450,38 @@ void ingenic_drm_plane_config(struct device *dev, } } +void ingenic_drm_sync_data(struct device *dev, + struct drm_plane_state *old_state, + struct drm_plane_state *state) +{ + const struct drm_format_info *finfo = state->fb->format; + struct ingenic_drm *priv = dev_get_drvdata(dev); + struct drm_atomic_helper_damage_iter iter; + unsigned int offset, i; + struct drm_rect clip; + dma_addr_t paddr; + void *addr; + + if (!ingenic_drm_cached_gem_buf) + return; + + drm_atomic_helper_damage_iter_init(&iter, old_state, state); + + drm_atomic_for_each_plane_damage(&iter, &clip) { + for (i = 0; i < finfo->num_planes; i++) { + paddr = drm_fb_cma_get_gem_addr(state->fb, state, i); + addr = phys_to_virt(paddr); + + /* Ignore x1/x2 values, invalidate complete lines */ + offset = clip.y1 * state->fb->pitches[i]; + + dma_cache_sync(priv->dev, addr + offset, + (clip.y2 - clip.y1) * state->fb->pitches[i], + DMA_TO_DEVICE); + } + } +} + static void ingenic_drm_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *oldstate) { @@ -450,6 +492,8 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane, dma_addr_t addr; if (state && state->fb) { + ingenic_drm_sync_data(priv->dev, oldstate, state); + addr = drm_fb_cma_get_gem_addr(state->fb, state, 0); width = state->src_w >> 16; height = state->src_h >> 16; @@ -605,7 +649,69 @@ static void ingenic_drm_disable_vblank(struct drm_crtc *crtc) regmap_update_bits(priv->map, JZ_REG_LCD_CTRL, JZ_LCD_CTRL_EOF_IRQ, 0); } -DEFINE_DRM_GEM_CMA_FOPS(ingenic_drm_fops); +static struct drm_framebuffer * +ingenic_drm_gem_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + if (ingenic_drm_cached_gem_buf) + return drm_gem_fb_create_with_dirty(dev, file, mode_cmd); + + return drm_gem_fb_create(dev, file, mode_cmd); +} + +static int ingenic_drm_gem_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma) +{ + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); + struct device *dev = cma_obj->base.dev->dev; + unsigned long attrs; + int ret; + + if (ingenic_drm_cached_gem_buf) + attrs = DMA_ATTR_NON_CONSISTENT; + else + attrs = DMA_ATTR_WRITE_COMBINE; + + /* + * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the + * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map + * the whole buffer. + */ + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_pgoff = 0; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + + ret = dma_mmap_attrs(dev, vma, cma_obj->vaddr, cma_obj->paddr, + vma->vm_end - vma->vm_start, attrs); + if (ret) + drm_gem_vm_close(vma); + + return ret; +} + +static int ingenic_drm_gem_cma_mmap(struct file *filp, + struct vm_area_struct *vma) +{ + int ret; + + ret = drm_gem_mmap(filp, vma); + if (ret) + return ret; + + return ingenic_drm_gem_mmap(vma->vm_private_data, vma); +} + +static const struct file_operations ingenic_drm_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .compat_ioctl = drm_compat_ioctl, + .poll = drm_poll, + .read = drm_read, + .llseek = noop_llseek, + .mmap = ingenic_drm_gem_cma_mmap, +}; static struct drm_driver ingenic_drm_driver_data = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, @@ -669,7 +775,7 @@ static const struct drm_encoder_helper_funcs ingenic_drm_encoder_helper_funcs = }; static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = { - .fb_create = drm_gem_fb_create, + .fb_create = ingenic_drm_gem_fb_create, .output_poll_changed = drm_fb_helper_output_poll_changed, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, @@ -796,6 +902,8 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) return ret; } + drm_plane_enable_fb_damage_clips(&priv->f1); + drm_crtc_helper_add(&priv->crtc, &ingenic_drm_crtc_helper_funcs); ret = drm_crtc_init_with_planes(drm, &priv->crtc, &priv->f1, @@ -821,6 +929,8 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) return ret; } + drm_plane_enable_fb_damage_clips(&priv->f0); + if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && has_components) { ret = component_bind_all(dev, drm); if (ret) { diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.h b/drivers/gpu/drm/ingenic/ingenic-drm.h index 43f7d959cff7..df99f0f75d39 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm.h +++ b/drivers/gpu/drm/ingenic/ingenic-drm.h @@ -168,6 +168,10 @@ void ingenic_drm_plane_config(struct device *dev, struct drm_plane *plane, u32 fourcc); void ingenic_drm_plane_disable(struct device *dev, struct drm_plane *plane); +void ingenic_drm_sync_data(struct device *dev, + struct drm_plane_state *old_state, + struct drm_plane_state *state); + extern struct platform_driver *ingenic_ipu_driver_ptr; #endif /* DRIVERS_GPU_DRM_INGENIC_INGENIC_DRM_H */ diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c index fc8c6e970ee3..38c83e8cc6a5 100644 --- a/drivers/gpu/drm/ingenic/ingenic-ipu.c +++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c @@ -20,6 +20,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fourcc.h> @@ -316,6 +317,8 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane, JZ_IPU_CTRL_CHIP_EN | JZ_IPU_CTRL_LCDC_SEL); } + ingenic_drm_sync_data(ipu->master, oldstate, state); + /* New addresses will be committed in vblank handler... */ ipu->addr_y = drm_fb_cma_get_gem_addr(state->fb, state, 0); if (finfo->num_planes > 1) @@ -534,7 +537,7 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, if (!state->crtc || !crtc_state->mode.hdisplay || !crtc_state->mode.vdisplay) - return 0; + goto out_check_damage; /* Plane must be fully visible */ if (state->crtc_x < 0 || state->crtc_y < 0 || @@ -551,7 +554,7 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, return -EINVAL; if (!osd_changed(state, plane->state)) - return 0; + goto out_check_damage; crtc_state->mode_changed = true; @@ -578,6 +581,9 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, ipu->denom_w = denom_w; ipu->denom_h = denom_h; +out_check_damage: + drm_atomic_helper_check_plane_damage(state->state, state); + return 0; } @@ -759,6 +765,8 @@ static int ingenic_ipu_bind(struct device *dev, struct device *master, void *d) return err; } + drm_plane_enable_fb_damage_clips(plane); + /* * Sharpness settings range is [0,32] * 0 : nearest-neighbor diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 155f2b4b4030..11223fe348df 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -69,8 +69,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) return ret; if (bo->base.sgt) { - dma_unmap_sg(dev, bo->base.sgt->sgl, - bo->base.sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0); sg_free_table(bo->base.sgt); } else { bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL); @@ -80,7 +79,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) } } - dma_map_sg(dev, sgt.sgl, sgt.nents, DMA_BIDIRECTIONAL); + ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0); + if (ret) { + sg_free_table(&sgt); + kfree(bo->base.sgt); + bo->base.sgt = NULL; + return ret; + } *bo->base.sgt = sgt; diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c index 5b92fb82674a..2b2739adc7f5 100644 --- a/drivers/gpu/drm/lima/lima_vm.c +++ b/drivers/gpu/drm/lima/lima_vm.c @@ -124,7 +124,7 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create) if (err) goto err_out1; - for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, bo->base.sgt->nents, 0) { + for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, 0) { err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter), bo_va->node.start + offset); if (err) @@ -298,8 +298,7 @@ int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff) mutex_lock(&vm->lock); base = bo_va->node.start + (pageoff << PAGE_SHIFT); - for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, - bo->base.sgt->nents, pageoff) { + for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, pageoff) { err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter), base + offset); if (err) diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index aa74aac3cbcc..65cd03a4be29 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig @@ -24,6 +24,6 @@ config DRM_MEDIATEK_HDMI tristate "DRM HDMI Support for Mediatek SoCs" depends on DRM_MEDIATEK select SND_SOC_HDMI_CODEC if SND_SOC - select GENERIC_PHY + select PHY_MTK_HDMI help DRM/KMS HDMI driver for Mediatek SoCs diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile index b7a82ed5788f..77b0fd86063d 100644 --- a/drivers/gpu/drm/mediatek/Makefile +++ b/drivers/gpu/drm/mediatek/Makefile @@ -19,9 +19,6 @@ obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o mediatek-drm-hdmi-objs := mtk_cec.o \ mtk_hdmi.o \ - mtk_hdmi_ddc.o \ - mtk_mt2701_hdmi_phy.o \ - mtk_mt8173_hdmi_phy.o \ - mtk_hdmi_phy.o + mtk_hdmi_ddc.o obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mediatek-drm-hdmi.o diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index d4f0fb7ad312..cf11c4850b40 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -64,7 +64,8 @@ enum mtk_dpi_out_color_format { struct mtk_dpi { struct mtk_ddp_comp ddp_comp; struct drm_encoder encoder; - struct drm_bridge *bridge; + struct drm_bridge bridge; + struct drm_bridge *next_bridge; void __iomem *regs; struct device *dev; struct clk *engine_clk; @@ -83,9 +84,9 @@ struct mtk_dpi { int refcount; }; -static inline struct mtk_dpi *mtk_dpi_from_encoder(struct drm_encoder *e) +static inline struct mtk_dpi *bridge_to_dpi(struct drm_bridge *b) { - return container_of(e, struct mtk_dpi, encoder); + return container_of(b, struct mtk_dpi, bridge); } enum mtk_dpi_polarity { @@ -521,50 +522,53 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi, return 0; } -static bool mtk_dpi_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder) { - return true; + drm_encoder_cleanup(encoder); } -static void mtk_dpi_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = { + .destroy = mtk_dpi_encoder_destroy, +}; + +static int mtk_dpi_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) { - struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder); + struct mtk_dpi *dpi = bridge_to_dpi(bridge); + + return drm_bridge_attach(bridge->encoder, dpi->next_bridge, + &dpi->bridge, flags); +} + +static void mtk_dpi_bridge_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + struct mtk_dpi *dpi = bridge_to_dpi(bridge); drm_mode_copy(&dpi->mode, adjusted_mode); } -static void mtk_dpi_encoder_disable(struct drm_encoder *encoder) +static void mtk_dpi_bridge_disable(struct drm_bridge *bridge) { - struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder); + struct mtk_dpi *dpi = bridge_to_dpi(bridge); mtk_dpi_power_off(dpi); } -static void mtk_dpi_encoder_enable(struct drm_encoder *encoder) +static void mtk_dpi_bridge_enable(struct drm_bridge *bridge) { - struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder); + struct mtk_dpi *dpi = bridge_to_dpi(bridge); mtk_dpi_power_on(dpi); mtk_dpi_set_display_mode(dpi, &dpi->mode); } -static int mtk_dpi_atomic_check(struct drm_encoder *encoder, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - return 0; -} - -static const struct drm_encoder_helper_funcs mtk_dpi_encoder_helper_funcs = { - .mode_fixup = mtk_dpi_encoder_mode_fixup, - .mode_set = mtk_dpi_encoder_mode_set, - .disable = mtk_dpi_encoder_disable, - .enable = mtk_dpi_encoder_enable, - .atomic_check = mtk_dpi_atomic_check, +static const struct drm_bridge_funcs mtk_dpi_bridge_funcs = { + .attach = mtk_dpi_bridge_attach, + .mode_set = mtk_dpi_bridge_mode_set, + .disable = mtk_dpi_bridge_disable, + .enable = mtk_dpi_bridge_enable, }; static void mtk_dpi_start(struct mtk_ddp_comp *comp) @@ -605,12 +609,10 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data) dev_err(dev, "Failed to initialize decoder: %d\n", ret); goto err_unregister; } - drm_encoder_helper_add(&dpi->encoder, &mtk_dpi_encoder_helper_funcs); - /* Currently DPI0 is fixed to be driven by OVL1 */ - dpi->encoder.possible_crtcs = BIT(1); + dpi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm_dev, dpi->ddp_comp); - ret = drm_bridge_attach(&dpi->encoder, dpi->bridge, NULL, 0); + ret = drm_bridge_attach(&dpi->encoder, &dpi->bridge, NULL, 0); if (ret) { dev_err(dev, "Failed to attach bridge: %d\n", ret); goto err_cleanup; @@ -770,11 +772,11 @@ static int mtk_dpi_probe(struct platform_device *pdev) } ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, - NULL, &dpi->bridge); + NULL, &dpi->next_bridge); if (ret) return ret; - dev_info(dev, "Found bridge node: %pOF\n", dpi->bridge->of_node); + dev_info(dev, "Found bridge node: %pOF\n", dpi->next_bridge->of_node); comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DPI); if (comp_id < 0) { @@ -791,8 +793,15 @@ static int mtk_dpi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dpi); + dpi->bridge.funcs = &mtk_dpi_bridge_funcs; + dpi->bridge.of_node = dev->of_node; + dpi->bridge.type = DRM_MODE_CONNECTOR_DPI; + + drm_bridge_add(&dpi->bridge); + ret = component_add(dev, &mtk_dpi_component_ops); if (ret) { + drm_bridge_remove(&dpi->bridge); dev_err(dev, "Failed to add component: %d\n", ret); return ret; } @@ -802,7 +811,10 @@ static int mtk_dpi_probe(struct platform_device *pdev) static int mtk_dpi_remove(struct platform_device *pdev) { + struct mtk_dpi *dpi = platform_get_drvdata(pdev); + component_del(&pdev->dev, &mtk_dpi_component_ops); + drm_bridge_remove(&dpi->bridge); return 0; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 57c88de9a329..bfd42ae1f64c 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -13,6 +13,8 @@ #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/soc/mediatek/mtk-cmdq.h> +#include <drm/drm_print.h> + #include "mtk_drm_drv.h" #include "mtk_drm_plane.h" #include "mtk_drm_ddp_comp.h" @@ -412,6 +414,22 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL }, }; +static bool mtk_drm_find_comp_in_ddp(struct mtk_ddp_comp ddp_comp, + const enum mtk_ddp_comp_id *path, + unsigned int path_len) +{ + unsigned int i; + + if (path == NULL) + return false; + + for (i = 0U; i < path_len; i++) + if (ddp_comp.id == path[i]) + return true; + + return false; +} + int mtk_ddp_comp_get_id(struct device_node *node, enum mtk_ddp_comp_type comp_type) { @@ -427,6 +445,26 @@ int mtk_ddp_comp_get_id(struct device_node *node, return -EINVAL; } +unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm, + struct mtk_ddp_comp ddp_comp) +{ + struct mtk_drm_private *private = drm->dev_private; + unsigned int ret = 0; + + if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->main_path, private->data->main_len)) + ret = BIT(0); + else if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->ext_path, + private->data->ext_len)) + ret = BIT(1); + else if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->third_path, + private->data->third_len)) + ret = BIT(2); + else + DRM_INFO("Failed to find comp in ddp table\n"); + + return ret; +} + int mtk_ddp_comp_init(struct device *dev, struct device_node *node, struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id, const struct mtk_ddp_comp_funcs *funcs) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index debe36395fe7..1d9e00b69462 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -202,6 +202,8 @@ static inline void mtk_ddp_ctm_set(struct mtk_ddp_comp *comp, int mtk_ddp_comp_get_id(struct device_node *node, enum mtk_ddp_comp_type comp_type); +unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm, + struct mtk_ddp_comp ddp_comp); int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node, struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id, const struct mtk_ddp_comp_funcs *funcs); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 040a8f393fe2..2350e3200b59 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -74,6 +74,19 @@ static const enum mtk_ddp_comp_id mt2701_mtk_ddp_ext[] = { DDP_COMPONENT_DPI0, }; +static const enum mtk_ddp_comp_id mt7623_mtk_ddp_main[] = { + DDP_COMPONENT_OVL0, + DDP_COMPONENT_RDMA0, + DDP_COMPONENT_COLOR0, + DDP_COMPONENT_BLS, + DDP_COMPONENT_DPI0, +}; + +static const enum mtk_ddp_comp_id mt7623_mtk_ddp_ext[] = { + DDP_COMPONENT_RDMA1, + DDP_COMPONENT_DSI0, +}; + static const enum mtk_ddp_comp_id mt2712_mtk_ddp_main[] = { DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0, @@ -127,6 +140,14 @@ static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = { .shadow_register = true, }; +static const struct mtk_mmsys_driver_data mt7623_mmsys_driver_data = { + .main_path = mt7623_mtk_ddp_main, + .main_len = ARRAY_SIZE(mt7623_mtk_ddp_main), + .ext_path = mt7623_mtk_ddp_ext, + .ext_len = ARRAY_SIZE(mt7623_mtk_ddp_ext), + .shadow_register = true, +}; + static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = { .main_path = mt2712_mtk_ddp_main, .main_len = ARRAY_SIZE(mt2712_mtk_ddp_main), @@ -422,6 +443,8 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = { static const struct of_device_id mtk_drm_of_ids[] = { { .compatible = "mediatek,mt2701-mmsys", .data = &mt2701_mmsys_driver_data}, + { .compatible = "mediatek,mt7623-mmsys", + .data = &mt7623_mmsys_driver_data}, { .compatible = "mediatek,mt2712-mmsys", .data = &mt2712_mmsys_driver_data}, { .compatible = "mediatek,mt8173-mmsys", diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index 6190cc3b7b0d..0583e557ad37 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -212,46 +212,28 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg) { struct mtk_drm_gem_obj *mtk_gem; - int ret; - struct scatterlist *s; - unsigned int i; - dma_addr_t expected; - mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size); + /* check if the entries in the sg_table are contiguous */ + if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) { + DRM_ERROR("sg_table is not contiguous"); + return ERR_PTR(-EINVAL); + } + mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size); if (IS_ERR(mtk_gem)) return ERR_CAST(mtk_gem); - expected = sg_dma_address(sg->sgl); - for_each_sg(sg->sgl, s, sg->nents, i) { - if (!sg_dma_len(s)) - break; - - if (sg_dma_address(s) != expected) { - DRM_ERROR("sg_table is not contiguous"); - ret = -EINVAL; - goto err_gem_free; - } - expected = sg_dma_address(s) + sg_dma_len(s); - } - mtk_gem->dma_addr = sg_dma_address(sg->sgl); mtk_gem->sg = sg; return &mtk_gem->base; - -err_gem_free: - kfree(mtk_gem); - return ERR_PTR(ret); } void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj) { struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); struct sg_table *sgt; - struct sg_page_iter iter; unsigned int npages; - unsigned int i = 0; if (mtk_gem->kvaddr) return mtk_gem->kvaddr; @@ -265,11 +247,8 @@ void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj) if (!mtk_gem->pages) goto out; - for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) { - mtk_gem->pages[i++] = sg_page_iter_page(&iter); - if (i > npages) - break; - } + drm_prime_sg_to_page_addr_arrays(sgt, mtk_gem->pages, NULL, npages); + mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 16fd99dcdacf..20f348992086 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -970,11 +970,7 @@ static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi) return ret; } - /* - * Currently display data paths are statically assigned to a crtc each. - * crtc 0 is OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0 - */ - dsi->encoder.possible_crtcs = 1; + dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->ddp_comp); ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index f2e9b429960b..0ed7b0b1a022 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -12,6 +12,7 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> +#include <linux/module.h> #include <linux/mutex.h> #include <linux/of_platform.h> #include <linux/of.h> @@ -145,11 +146,16 @@ struct hdmi_audio_param { struct hdmi_codec_params codec_params; }; +struct mtk_hdmi_conf { + bool tz_disabled; +}; + struct mtk_hdmi { struct drm_bridge bridge; struct drm_bridge *next_bridge; struct drm_connector conn; struct device *dev; + const struct mtk_hdmi_conf *conf; struct phy *phy; struct device *cec_dev; struct i2c_adapter *ddc_adpt; @@ -234,7 +240,6 @@ static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black) static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable) { struct arm_smccc_res res; - struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(hdmi->phy); /* * MT8173 HDMI hardware has an output control bit to enable/disable HDMI @@ -242,7 +247,7 @@ static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable) * The ARM trusted firmware provides an API for the HDMI driver to set * this control bit to enable HDMI output in supervisor mode. */ - if (hdmi_phy->conf && hdmi_phy->conf->tz_disabled) + if (hdmi->conf && hdmi->conf->tz_disabled) regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20, 0x80008005, enable ? 0x80000005 : 0x8000); @@ -1723,6 +1728,7 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev) return -ENOMEM; hdmi->dev = dev; + hdmi->conf = of_device_get_match_data(dev); ret = mtk_hdmi_dt_parse_pdata(hdmi, pdev); if (ret) @@ -1803,8 +1809,16 @@ static int mtk_hdmi_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops, mtk_hdmi_suspend, mtk_hdmi_resume); +static const struct mtk_hdmi_conf mtk_hdmi_conf_mt2701 = { + .tz_disabled = true, +}; + static const struct of_device_id mtk_drm_hdmi_of_ids[] = { - { .compatible = "mediatek,mt8173-hdmi", }, + { .compatible = "mediatek,mt2701-hdmi", + .data = &mtk_hdmi_conf_mt2701, + }, + { .compatible = "mediatek,mt8173-hdmi", + }, {} }; @@ -1819,7 +1833,6 @@ static struct platform_driver mtk_hdmi_driver = { }; static struct platform_driver * const mtk_hdmi_drivers[] = { - &mtk_hdmi_phy_driver, &mtk_hdmi_ddc_driver, &mtk_cec_driver, &mtk_hdmi_driver, diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.h b/drivers/gpu/drm/mediatek/mtk_hdmi.h index bb3653de6bd1..472bf141c92b 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.h +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.h @@ -5,7 +5,6 @@ */ #ifndef _MTK_HDMI_CTRL_H #define _MTK_HDMI_CTRL_H -#include "mtk_hdmi_phy.h" struct platform_driver; diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 6deaa7d01654..e5816b498494 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -6,8 +6,8 @@ config DRM_MSM depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST) depends on OF && COMMON_CLK depends on MMU - depends on INTERCONNECT || !INTERCONNECT depends on QCOM_OCMEM || QCOM_OCMEM=n + select IOMMU_IO_PGTABLE select QCOM_MDT_LOADER if ARCH_QCOM select REGULATOR select DRM_KMS_HELPER @@ -57,6 +57,15 @@ config DRM_MSM_HDMI_HDCP help Choose this option to enable HDCP state machine +config DRM_MSM_DP + bool "Enable DisplayPort support in MSM DRM driver" + depends on DRM_MSM + default y + help + Compile in support for DP driver in MSM DRM driver. DP external + display support is enabled through this config option. It can + be primary or secondary display on device. + config DRM_MSM_DSI bool "Enable DSI support in MSM DRM driver" depends on DRM_MSM @@ -110,3 +119,11 @@ config DRM_MSM_DSI_10NM_PHY default y help Choose this option if DSI PHY on SDM845 is used on the platform. + +config DRM_MSM_DSI_7NM_PHY + bool "Enable DSI 7nm PHY driver in MSM DRM (used by SM8150/SM8250)" + depends on DRM_MSM_DSI + default y + help + Choose this option if DSI PHY on SM8150/SM8250 is used on the + platform. diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 42f8aae28b31..340682cd0f32 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -2,6 +2,7 @@ ccflags-y := -I $(srctree)/$(src) ccflags-y += -I $(srctree)/$(src)/disp/dpu1 ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(srctree)/$(src)/dsi +ccflags-$(CONFIG_DRM_MSM_DP) += -I $(srctree)/$(src)/dp msm-y := \ adreno/adreno_device.o \ @@ -95,10 +96,23 @@ msm-y := \ msm_gpu_tracepoints.o \ msm_gpummu.o -msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o +msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \ + dp/dp_debug.o msm-$(CONFIG_DRM_MSM_GPU_STATE) += adreno/a6xx_gpu_state.o +msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \ + dp/dp_catalog.o \ + dp/dp_ctrl.o \ + dp/dp_display.o \ + dp/dp_drm.o \ + dp/dp_hpd.o \ + dp/dp_link.o \ + dp/dp_panel.o \ + dp/dp_parser.o \ + dp/dp_power.o \ + dp/dp_audio.o + msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o @@ -119,6 +133,7 @@ msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o +msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/phy/dsi_phy_7nm.o ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y) msm-y += dsi/pll/dsi_pll.o @@ -126,6 +141,7 @@ msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/pll/dsi_pll_10nm.o +msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/pll/dsi_pll_7nm.o endif obj-$(CONFIG_DRM_MSM) += msm.o diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c index 48fa49f69d6d..7e82c41a85f1 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -10,6 +10,48 @@ extern bool hang_debug; static void a2xx_dump(struct msm_gpu *gpu); static bool a2xx_idle(struct msm_gpu *gpu); +static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) +{ + struct msm_drm_private *priv = gpu->dev->dev_private; + struct msm_ringbuffer *ring = submit->ring; + unsigned int i; + + for (i = 0; i < submit->nr_cmds; i++) { + switch (submit->cmd[i].type) { + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + /* ignore IB-targets */ + break; + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + /* ignore if there has not been a ctx switch: */ + if (priv->lastctx == submit->queue->ctx) + break; + fallthrough; + case MSM_SUBMIT_CMD_BUF: + OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2); + OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); + OUT_RING(ring, submit->cmd[i].size); + OUT_PKT2(ring); + break; + } + } + + OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); + OUT_RING(ring, submit->seqno); + + /* wait for idle before cache flush/interrupt */ + OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); + OUT_RING(ring, 0x00000000); + + OUT_PKT3(ring, CP_EVENT_WRITE, 3); + OUT_RING(ring, CACHE_FLUSH_TS); + OUT_RING(ring, rbmemptr(ring, fence)); + OUT_RING(ring, submit->seqno); + OUT_PKT3(ring, CP_INTERRUPT, 1); + OUT_RING(ring, 0x80000000); + + adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); +} + static bool a2xx_me_init(struct msm_gpu *gpu) { struct msm_ringbuffer *ring = gpu->rb[0]; @@ -53,7 +95,7 @@ static bool a2xx_me_init(struct msm_gpu *gpu) OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1); OUT_RING(ring, 1); - gpu->funcs->flush(gpu, ring); + adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); return a2xx_idle(gpu); } @@ -421,16 +463,11 @@ a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) return aspace; } -/* Register offset defines for A2XX - copy of A3XX */ -static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE), - REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR), - REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL), -}; +static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR); + return ring->memptrs->rptr; +} static const struct adreno_gpu_funcs funcs = { .base = { @@ -439,8 +476,7 @@ static const struct adreno_gpu_funcs funcs = { .pm_suspend = msm_gpu_pm_suspend, .pm_resume = msm_gpu_pm_resume, .recover = a2xx_recover, - .submit = adreno_submit, - .flush = adreno_flush, + .submit = a2xx_submit, .active_ring = adreno_active_ring, .irq = a2xx_irq, .destroy = a2xx_destroy, @@ -450,6 +486,7 @@ static const struct adreno_gpu_funcs funcs = { .gpu_state_get = a2xx_gpu_state_get, .gpu_state_put = adreno_gpu_state_put, .create_address_space = a2xx_create_address_space, + .get_rptr = a2xx_get_rptr, }, }; @@ -491,8 +528,6 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev) else adreno_gpu->registers = a220_registers; - adreno_gpu->reg_offsets = a2xx_register_offsets; - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); if (ret) goto fail; diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index f6471145a7a6..f29c77d9cd42 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -28,6 +28,61 @@ extern bool hang_debug; static void a3xx_dump(struct msm_gpu *gpu); static bool a3xx_idle(struct msm_gpu *gpu); +static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) +{ + struct msm_drm_private *priv = gpu->dev->dev_private; + struct msm_ringbuffer *ring = submit->ring; + unsigned int i; + + for (i = 0; i < submit->nr_cmds; i++) { + switch (submit->cmd[i].type) { + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + /* ignore IB-targets */ + break; + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + /* ignore if there has not been a ctx switch: */ + if (priv->lastctx == submit->queue->ctx) + break; + fallthrough; + case MSM_SUBMIT_CMD_BUF: + OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2); + OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); + OUT_RING(ring, submit->cmd[i].size); + OUT_PKT2(ring); + break; + } + } + + OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); + OUT_RING(ring, submit->seqno); + + /* Flush HLSQ lazy updates to make sure there is nothing + * pending for indirect loads after the timestamp has + * passed: + */ + OUT_PKT3(ring, CP_EVENT_WRITE, 1); + OUT_RING(ring, HLSQ_FLUSH); + + /* wait for idle before cache flush/interrupt */ + OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); + OUT_RING(ring, 0x00000000); + + /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ + OUT_PKT3(ring, CP_EVENT_WRITE, 3); + OUT_RING(ring, CACHE_FLUSH_TS | BIT(31)); + OUT_RING(ring, rbmemptr(ring, fence)); + OUT_RING(ring, submit->seqno); + +#if 0 + /* Dummy set-constant to trigger context rollover */ + OUT_PKT3(ring, CP_SET_CONSTANT, 2); + OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG)); + OUT_RING(ring, 0x00000000); +#endif + + adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); +} + static bool a3xx_me_init(struct msm_gpu *gpu) { struct msm_ringbuffer *ring = gpu->rb[0]; @@ -51,7 +106,7 @@ static bool a3xx_me_init(struct msm_gpu *gpu) OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); - gpu->funcs->flush(gpu, ring); + adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); return a3xx_idle(gpu); } @@ -423,16 +478,11 @@ static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu) return state; } -/* Register offset defines for A3XX */ -static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE), - REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR), - REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL), -}; +static u32 a3xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR); + return ring->memptrs->rptr; +} static const struct adreno_gpu_funcs funcs = { .base = { @@ -441,8 +491,7 @@ static const struct adreno_gpu_funcs funcs = { .pm_suspend = msm_gpu_pm_suspend, .pm_resume = msm_gpu_pm_resume, .recover = a3xx_recover, - .submit = adreno_submit, - .flush = adreno_flush, + .submit = a3xx_submit, .active_ring = adreno_active_ring, .irq = a3xx_irq, .destroy = a3xx_destroy, @@ -452,6 +501,7 @@ static const struct adreno_gpu_funcs funcs = { .gpu_state_get = a3xx_gpu_state_get, .gpu_state_put = adreno_gpu_state_put, .create_address_space = adreno_iommu_create_address_space, + .get_rptr = a3xx_get_rptr, }, }; @@ -490,7 +540,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs); adreno_gpu->registers = a3xx_registers; - adreno_gpu->reg_offsets = a3xx_register_offsets; ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); if (ret) diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 954753600625..2b93b33b05e4 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -22,6 +22,54 @@ extern bool hang_debug; static void a4xx_dump(struct msm_gpu *gpu); static bool a4xx_idle(struct msm_gpu *gpu); +static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) +{ + struct msm_drm_private *priv = gpu->dev->dev_private; + struct msm_ringbuffer *ring = submit->ring; + unsigned int i; + + for (i = 0; i < submit->nr_cmds; i++) { + switch (submit->cmd[i].type) { + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + /* ignore IB-targets */ + break; + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + /* ignore if there has not been a ctx switch: */ + if (priv->lastctx == submit->queue->ctx) + break; + fallthrough; + case MSM_SUBMIT_CMD_BUF: + OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFE, 2); + OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); + OUT_RING(ring, submit->cmd[i].size); + OUT_PKT2(ring); + break; + } + } + + OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); + OUT_RING(ring, submit->seqno); + + /* Flush HLSQ lazy updates to make sure there is nothing + * pending for indirect loads after the timestamp has + * passed: + */ + OUT_PKT3(ring, CP_EVENT_WRITE, 1); + OUT_RING(ring, HLSQ_FLUSH); + + /* wait for idle before cache flush/interrupt */ + OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); + OUT_RING(ring, 0x00000000); + + /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ + OUT_PKT3(ring, CP_EVENT_WRITE, 3); + OUT_RING(ring, CACHE_FLUSH_TS | BIT(31)); + OUT_RING(ring, rbmemptr(ring, fence)); + OUT_RING(ring, submit->seqno); + + adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR); +} + /* * a4xx_enable_hwcg() - Program the clock control registers * @device: The adreno device pointer @@ -129,7 +177,7 @@ static bool a4xx_me_init(struct msm_gpu *gpu) OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); - gpu->funcs->flush(gpu, ring); + adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR); return a4xx_idle(gpu); } @@ -515,17 +563,6 @@ static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu) return state; } -/* Register offset defines for A4XX, in order of enum adreno_regs */ -static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE), - REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR), - REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL), -}; - static void a4xx_dump(struct msm_gpu *gpu) { printk("status: %08x\n", @@ -576,6 +613,12 @@ static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) return 0; } +static u32 a4xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + ring->memptrs->rptr = gpu_read(gpu, REG_A4XX_CP_RB_RPTR); + return ring->memptrs->rptr; +} + static const struct adreno_gpu_funcs funcs = { .base = { .get_param = adreno_get_param, @@ -583,8 +626,7 @@ static const struct adreno_gpu_funcs funcs = { .pm_suspend = a4xx_pm_suspend, .pm_resume = a4xx_pm_resume, .recover = a4xx_recover, - .submit = adreno_submit, - .flush = adreno_flush, + .submit = a4xx_submit, .active_ring = adreno_active_ring, .irq = a4xx_irq, .destroy = a4xx_destroy, @@ -594,6 +636,7 @@ static const struct adreno_gpu_funcs funcs = { .gpu_state_get = a4xx_gpu_state_get, .gpu_state_put = adreno_gpu_state_put, .create_address_space = adreno_iommu_create_address_space, + .get_rptr = a4xx_get_rptr, }, .get_timestamp = a4xx_get_timestamp, }; @@ -631,15 +674,12 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) adreno_gpu->registers = adreno_is_a405(adreno_gpu) ? a405_registers : a4xx_registers; - adreno_gpu->reg_offsets = a4xx_register_offsets; /* if needed, allocate gmem: */ - if (adreno_is_a4xx(adreno_gpu)) { - ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu, - &a4xx_gpu->ocmem); - if (ret) - goto fail; - } + ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu, + &a4xx_gpu->ocmem); + if (ret) + goto fail; if (!gpu->aspace) { /* TODO we think it is possible to configure the GPU to diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c index 68eddac7771c..fc2c905b6c9e 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c @@ -11,7 +11,7 @@ #include "a5xx_gpu.h" -static int pfp_print(struct msm_gpu *gpu, struct drm_printer *p) +static void pfp_print(struct msm_gpu *gpu, struct drm_printer *p) { int i; @@ -22,11 +22,9 @@ static int pfp_print(struct msm_gpu *gpu, struct drm_printer *p) drm_printf(p, " %02x: %08x\n", i, gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA)); } - - return 0; } -static int me_print(struct msm_gpu *gpu, struct drm_printer *p) +static void me_print(struct msm_gpu *gpu, struct drm_printer *p) { int i; @@ -37,11 +35,9 @@ static int me_print(struct msm_gpu *gpu, struct drm_printer *p) drm_printf(p, " %02x: %08x\n", i, gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA)); } - - return 0; } -static int meq_print(struct msm_gpu *gpu, struct drm_printer *p) +static void meq_print(struct msm_gpu *gpu, struct drm_printer *p) { int i; @@ -52,11 +48,9 @@ static int meq_print(struct msm_gpu *gpu, struct drm_printer *p) drm_printf(p, " %02x: %08x\n", i, gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA)); } - - return 0; } -static int roq_print(struct msm_gpu *gpu, struct drm_printer *p) +static void roq_print(struct msm_gpu *gpu, struct drm_printer *p) { int i; @@ -71,8 +65,6 @@ static int roq_print(struct msm_gpu *gpu, struct drm_printer *p) drm_printf(p, " %02x: %08x %08x %08x %08x\n", i, val[0], val[1], val[2], val[3]); } - - return 0; } static int show(struct seq_file *m, void *arg) @@ -81,10 +73,11 @@ static int show(struct seq_file *m, void *arg) struct drm_device *dev = node->minor->dev; struct msm_drm_private *priv = dev->dev_private; struct drm_printer p = drm_seq_file_printer(m); - int (*show)(struct msm_gpu *gpu, struct drm_printer *p) = + void (*show)(struct msm_gpu *gpu, struct drm_printer *p) = node->info_ent->data; - return show(priv->gpu, &p); + show(priv->gpu, &p); + return 0; } #define ENT(n) { .name = #n, .show = show, .data = n ##_print } diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 91726da82ed6..d6804a802355 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -18,13 +18,24 @@ static void a5xx_dump(struct msm_gpu *gpu); #define GPU_PAS_ID 13 -static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, + bool sync) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); uint32_t wptr; unsigned long flags; + /* + * Most flush operations need to issue a WHERE_AM_I opcode to sync up + * the rptr shadow + */ + if (a5xx_gpu->has_whereami && sync) { + OUT_PKT7(ring, CP_WHERE_AM_I, 2); + OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring))); + OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring))); + } + spin_lock_irqsave(&ring->lock, flags); /* Copy the shadow to the actual register */ @@ -43,8 +54,7 @@ static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); } -static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit, - struct msm_file_private *ctx) +static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) { struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_ringbuffer *ring = submit->ring; @@ -57,7 +67,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (priv->lastctx == ctx) + if (priv->lastctx == submit->queue->ctx) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -91,7 +101,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit } } - a5xx_flush(gpu, ring); + a5xx_flush(gpu, ring, true); a5xx_preempt_trigger(gpu); /* we might not necessarily have a cmd from userspace to @@ -103,8 +113,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit msm_gpu_retire(gpu); } -static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, - struct msm_file_private *ctx) +static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); @@ -114,7 +123,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) { priv->lastctx = NULL; - a5xx_submit_in_rb(gpu, submit, ctx); + a5xx_submit_in_rb(gpu, submit); return; } @@ -148,7 +157,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (priv->lastctx == ctx) + if (priv->lastctx == submit->queue->ctx) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -206,7 +215,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, /* Set bit 0 to trigger an interrupt on preempt complete */ OUT_RING(ring, 0x01); - a5xx_flush(gpu, ring); + /* A WHERE_AM_I packet is not needed after a YIELD */ + a5xx_flush(gpu, ring, false); /* Check to see if we need to start preemption */ a5xx_preempt_trigger(gpu); @@ -365,7 +375,7 @@ static int a5xx_me_init(struct msm_gpu *gpu) OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); - gpu->funcs->flush(gpu, ring); + a5xx_flush(gpu, ring, true); return a5xx_idle(gpu, ring) ? 0 : -EINVAL; } @@ -407,11 +417,31 @@ static int a5xx_preempt_start(struct msm_gpu *gpu) OUT_RING(ring, 0x01); OUT_RING(ring, 0x01); - gpu->funcs->flush(gpu, ring); + /* The WHERE_AMI_I packet is not needed after a YIELD is issued */ + a5xx_flush(gpu, ring, false); return a5xx_idle(gpu, ring) ? 0 : -EINVAL; } +static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu, + struct drm_gem_object *obj) +{ + u32 *buf = msm_gem_get_vaddr_active(obj); + + if (IS_ERR(buf)) + return; + + /* + * If the lowest nibble is 0xa that is an indication that this microcode + * has been patched. The actual version is in dword [3] but we only care + * about the patchlevel which is the lowest nibble of dword [3] + */ + if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) + a5xx_gpu->has_whereami = true; + + msm_gem_put_vaddr(obj); +} + static int a5xx_ucode_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -447,6 +477,7 @@ static int a5xx_ucode_init(struct msm_gpu *gpu) } msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw"); + a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo); } gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, @@ -506,6 +537,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu) static int a5xx_hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); int ret; gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); @@ -714,9 +746,36 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI, gpu->rb[0]->iova); + /* + * If the microcode supports the WHERE_AM_I opcode then we can use that + * in lieu of the RPTR shadow and enable preemption. Otherwise, we + * can't safely use the RPTR shadow or preemption. In either case, the + * RPTR shadow should be disabled in hardware. + */ gpu_write(gpu, REG_A5XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + /* Disable preemption if WHERE_AM_I isn't available */ + if (!a5xx_gpu->has_whereami && gpu->nr_rings > 1) { + a5xx_preempt_fini(gpu); + gpu->nr_rings = 1; + } else { + /* Create a privileged buffer for the RPTR shadow */ + if (!a5xx_gpu->shadow_bo) { + a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, + sizeof(u32) * gpu->nr_rings, + MSM_BO_UNCACHED | MSM_BO_MAP_PRIV, + gpu->aspace, &a5xx_gpu->shadow_bo, + &a5xx_gpu->shadow_iova); + + if (IS_ERR(a5xx_gpu->shadow)) + return PTR_ERR(a5xx_gpu->shadow); + } + + gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR, + REG_A5XX_CP_RB_RPTR_ADDR_HI, shadowptr(a5xx_gpu, gpu->rb[0])); + } + a5xx_preempt_hw_init(gpu); /* Disable the interrupts through the initial bringup stage */ @@ -740,7 +799,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1); OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT)); - gpu->funcs->flush(gpu, gpu->rb[0]); + a5xx_flush(gpu, gpu->rb[0], true); if (!a5xx_idle(gpu, gpu->rb[0])) return -EINVAL; } @@ -758,7 +817,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1); OUT_RING(gpu->rb[0], 0x00000000); - gpu->funcs->flush(gpu, gpu->rb[0]); + a5xx_flush(gpu, gpu->rb[0], true); if (!a5xx_idle(gpu, gpu->rb[0])) return -EINVAL; } else if (ret == -ENODEV) { @@ -825,6 +884,11 @@ static void a5xx_destroy(struct msm_gpu *gpu) drm_gem_object_put(a5xx_gpu->gpmu_bo); } + if (a5xx_gpu->shadow_bo) { + msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace); + drm_gem_object_put(a5xx_gpu->shadow_bo); + } + adreno_gpu_cleanup(adreno_gpu); kfree(a5xx_gpu); } @@ -1057,17 +1121,6 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu) return IRQ_HANDLED; } -static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI, - REG_A5XX_CP_RB_RPTR_ADDR_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL), -}; - static const u32 a5xx_registers[] = { 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B, 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095, @@ -1432,6 +1485,17 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu) return (unsigned long)busy_time; } +static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + + if (a5xx_gpu->has_whereami) + return a5xx_gpu->shadow[ring->id]; + + return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR); +} + static const struct adreno_gpu_funcs funcs = { .base = { .get_param = adreno_get_param, @@ -1440,7 +1504,6 @@ static const struct adreno_gpu_funcs funcs = { .pm_resume = a5xx_pm_resume, .recover = a5xx_recover, .submit = a5xx_submit, - .flush = a5xx_flush, .active_ring = a5xx_active_ring, .irq = a5xx_irq, .destroy = a5xx_destroy, @@ -1454,6 +1517,7 @@ static const struct adreno_gpu_funcs funcs = { .gpu_state_get = a5xx_gpu_state_get, .gpu_state_put = a5xx_gpu_state_put, .create_address_space = adreno_iommu_create_address_space, + .get_rptr = a5xx_get_rptr, }, .get_timestamp = a5xx_get_timestamp, }; @@ -1512,14 +1576,12 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) gpu = &adreno_gpu->base; adreno_gpu->registers = a5xx_registers; - adreno_gpu->reg_offsets = a5xx_register_offsets; a5xx_gpu->lm_leakage = 0x4E001A; check_speed_bin(&pdev->dev); - /* Restricting nr_rings to 1 to temporarily disable preemption */ - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4); if (ret) { a5xx_destroy(&(a5xx_gpu->base.base)); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index 1e5b1a15a70f..c7187bcc5e90 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -37,6 +37,13 @@ struct a5xx_gpu { atomic_t preempt_state; struct timer_list preempt_timer; + + struct drm_gem_object *shadow_bo; + uint64_t shadow_iova; + uint32_t *shadow; + + /* True if the microcode supports the WHERE_AM_I opcode */ + bool has_whereami; }; #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) @@ -141,6 +148,9 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs, return -ETIMEDOUT; } +#define shadowptr(a5xx_gpu, ring) ((a5xx_gpu)->shadow_iova + \ + ((ring)->id * sizeof(uint32_t))) + bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring); void a5xx_set_hwcg(struct msm_gpu *gpu, bool state); @@ -150,6 +160,8 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu); void a5xx_preempt_irq(struct msm_gpu *gpu); void a5xx_preempt_fini(struct msm_gpu *gpu); +void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, bool sync); + /* Return true if we are in a preempt state */ static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu) { diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index 321a8061fd32..f176a6f3eff6 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -240,7 +240,7 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu) OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); OUT_RING(ring, 1); - gpu->funcs->flush(gpu, ring); + a5xx_flush(gpu, ring, true); if (!a5xx_idle(gpu, ring)) { DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n", diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c index 9f3fe177b00e..7e04509c4e1f 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c @@ -259,8 +259,9 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, ptr->magic = A5XX_PREEMPT_RECORD_MAGIC; ptr->info = 0; ptr->data = 0; - ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT; - ptr->rptr_addr = rbmemptr(ring, rptr); + ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE; + + ptr->rptr_addr = shadowptr(a5xx_gpu, ring); ptr->counter = counters_iova; return 0; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index e1c7bcd1b1eb..491fee410daf 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -11,6 +11,7 @@ #include "a6xx_gpu.h" #include "a6xx_gmu.xml.h" #include "msm_gem.h" +#include "msm_gpu_trace.h" #include "msm_mmu.h" static void a6xx_gmu_fault(struct a6xx_gmu *gmu) @@ -124,6 +125,8 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) gmu->current_perf_index = perf_index; gmu->freq = gmu->gpu_freqs[perf_index]; + trace_msm_gmu_freq_change(gmu->freq, perf_index); + /* * This can get called from devfreq while the hardware is idle. Don't * bring up the power if it isn't already active diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 66a95e22b7b3..948f3656c20c 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -51,9 +51,20 @@ bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); uint32_t wptr; unsigned long flags; + /* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */ + if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) { + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + OUT_PKT7(ring, CP_WHERE_AM_I, 2); + OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring))); + OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring))); + } + spin_lock_irqsave(&ring->lock, flags); /* Copy the shadow to the actual register */ @@ -81,8 +92,50 @@ static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter, OUT_RING(ring, upper_32_bits(iova)); } -static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, - struct msm_file_private *ctx) +static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, + struct msm_ringbuffer *ring, struct msm_file_private *ctx) +{ + phys_addr_t ttbr; + u32 asid; + u64 memptr = rbmemptr(ring, ttbr0); + + if (ctx == a6xx_gpu->cur_ctx) + return; + + if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) + return; + + /* Execute the table update */ + OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 4); + OUT_RING(ring, CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(lower_32_bits(ttbr))); + + OUT_RING(ring, + CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(upper_32_bits(ttbr)) | + CP_SMMU_TABLE_UPDATE_1_ASID(asid)); + OUT_RING(ring, CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(0)); + OUT_RING(ring, CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(0)); + + /* + * Write the new TTBR0 to the memstore. This is good for debugging. + */ + OUT_PKT7(ring, CP_MEM_WRITE, 4); + OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr))); + OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr))); + OUT_RING(ring, lower_32_bits(ttbr)); + OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr)); + + /* + * And finally, trigger a uche flush to be sure there isn't anything + * lingering in that part of the GPU + */ + + OUT_PKT7(ring, CP_EVENT_WRITE, 1); + OUT_RING(ring, 0x31); + + a6xx_gpu->cur_ctx = ctx; +} + +static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) { unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; struct msm_drm_private *priv = gpu->dev->dev_private; @@ -91,6 +144,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_ringbuffer *ring = submit->ring; unsigned int i; + a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); + get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO, rbmemptr_stats(ring, index, cpcycles_start)); @@ -115,7 +170,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (priv->lastctx == ctx) + if (priv->lastctx == submit->queue->ctx) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -464,6 +519,30 @@ static int a6xx_cp_init(struct msm_gpu *gpu) return a6xx_idle(gpu, ring) ? 0 : -EINVAL; } +static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, + struct drm_gem_object *obj) +{ + u32 *buf = msm_gem_get_vaddr_active(obj); + + if (IS_ERR(buf)) + return; + + /* + * If the lowest nibble is 0xa that is an indication that this microcode + * has been patched. The actual version is in dword [3] but we only care + * about the patchlevel which is the lowest nibble of dword [3] + * + * Otherwise check that the firmware is greater than or equal to 1.90 + * which was the first version that had this fix built in + */ + if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) + a6xx_gpu->has_whereami = true; + else if ((buf[0] & 0xfff) > 0x190) + a6xx_gpu->has_whereami = true; + + msm_gem_put_vaddr(obj); +} + static int a6xx_ucode_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -484,6 +563,7 @@ static int a6xx_ucode_init(struct msm_gpu *gpu) } msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw"); + a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo); } gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO, @@ -699,12 +779,43 @@ static int a6xx_hw_init(struct msm_gpu *gpu) gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI, gpu->rb[0]->iova); - gpu_write(gpu, REG_A6XX_CP_RB_CNTL, - MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + /* Targets that support extended APRIV can use the RPTR shadow from + * hardware but all the other ones need to disable the feature. Targets + * that support the WHERE_AM_I opcode can use that instead + */ + if (adreno_gpu->base.hw_apriv) + gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT); + else + gpu_write(gpu, REG_A6XX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + + /* + * Expanded APRIV and targets that support WHERE_AM_I both need a + * privileged buffer to store the RPTR shadow + */ + + if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) { + if (!a6xx_gpu->shadow_bo) { + a6xx_gpu->shadow = msm_gem_kernel_new_locked(gpu->dev, + sizeof(u32) * gpu->nr_rings, + MSM_BO_UNCACHED | MSM_BO_MAP_PRIV, + gpu->aspace, &a6xx_gpu->shadow_bo, + &a6xx_gpu->shadow_iova); + + if (IS_ERR(a6xx_gpu->shadow)) + return PTR_ERR(a6xx_gpu->shadow); + } + + gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO, + REG_A6XX_CP_RB_RPTR_ADDR_HI, + shadowptr(a6xx_gpu, gpu->rb[0])); + } /* Always come up on rb 0 */ a6xx_gpu->cur_ring = gpu->rb[0]; + a6xx_gpu->cur_ctx = NULL; + /* Enable the SQE_to start the CP engine */ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1); @@ -911,18 +1022,6 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu) return IRQ_HANDLED; } -static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A6XX_CP_RB_BASE), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A6XX_CP_RB_BASE_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, - REG_A6XX_CP_RB_RPTR_ADDR_LO), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI, - REG_A6XX_CP_RB_RPTR_ADDR_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A6XX_CP_RB_RPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A6XX_CP_RB_WPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL), -}; - static int a6xx_pm_resume(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -931,6 +1030,8 @@ static int a6xx_pm_resume(struct msm_gpu *gpu) gpu->needs_hw_init = true; + trace_msm_gpu_resume(0); + ret = a6xx_gmu_resume(a6xx_gpu); if (ret) return ret; @@ -945,6 +1046,8 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + trace_msm_gpu_suspend(0); + devfreq_suspend_device(gpu->devfreq.devfreq); return a6xx_gmu_stop(a6xx_gpu); @@ -983,6 +1086,11 @@ static void a6xx_destroy(struct msm_gpu *gpu) drm_gem_object_put(a6xx_gpu->sqe_bo); } + if (a6xx_gpu->shadow_bo) { + msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace); + drm_gem_object_put(a6xx_gpu->shadow_bo); + } + a6xx_gmu_remove(a6xx_gpu); adreno_gpu_cleanup(adreno_gpu); @@ -1017,6 +1125,31 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) return (unsigned long)busy_time; } +static struct msm_gem_address_space * +a6xx_create_private_address_space(struct msm_gpu *gpu) +{ + struct msm_mmu *mmu; + + mmu = msm_iommu_pagetable_create(gpu->aspace->mmu); + + if (IS_ERR(mmu)) + return ERR_CAST(mmu); + + return msm_gem_address_space_create(mmu, + "gpu", 0x100000000ULL, 0x1ffffffffULL); +} + +static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) + return a6xx_gpu->shadow[ring->id]; + + return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR); +} + static const struct adreno_gpu_funcs funcs = { .base = { .get_param = adreno_get_param, @@ -1025,7 +1158,6 @@ static const struct adreno_gpu_funcs funcs = { .pm_resume = a6xx_pm_resume, .recover = a6xx_recover, .submit = a6xx_submit, - .flush = a6xx_flush, .active_ring = a6xx_active_ring, .irq = a6xx_irq, .destroy = a6xx_destroy, @@ -1040,6 +1172,8 @@ static const struct adreno_gpu_funcs funcs = { .gpu_state_put = a6xx_gpu_state_put, #endif .create_address_space = adreno_iommu_create_address_space, + .create_private_address_space = a6xx_create_private_address_space, + .get_rptr = a6xx_get_rptr, }, .get_timestamp = a6xx_get_timestamp, }; @@ -1048,6 +1182,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; struct platform_device *pdev = priv->gpu_pdev; + struct adreno_platform_config *config = pdev->dev.platform_data; + const struct adreno_info *info; struct device_node *node; struct a6xx_gpu *a6xx_gpu; struct adreno_gpu *adreno_gpu; @@ -1062,9 +1198,15 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) gpu = &adreno_gpu->base; adreno_gpu->registers = NULL; - adreno_gpu->reg_offsets = a6xx_register_offsets; - if (adreno_is_a650(adreno_gpu)) + /* + * We need to know the platform type before calling into adreno_gpu_init + * so that the hw_apriv flag can be correctly set. Snoop into the info + * and grab the revision number + */ + info = adreno_info(config->rev); + + if (info && info->revn == 650) adreno_gpu->base.hw_apriv = true; ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index 03ba60d5b07f..3eeebf6a754b 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -19,8 +19,15 @@ struct a6xx_gpu { uint64_t sqe_iova; struct msm_ringbuffer *cur_ring; + struct msm_file_private *cur_ctx; struct a6xx_gmu gmu; + + struct drm_gem_object *shadow_bo; + uint64_t shadow_iova; + uint32_t *shadow; + + bool has_whereami; }; #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base) @@ -50,6 +57,9 @@ static inline bool a6xx_has_gbif(struct adreno_gpu *gpu) return true; } +#define shadowptr(_a6xx_gpu, _ring) ((_a6xx_gpu)->shadow_iova + \ + ((_ring)->id * sizeof(uint32_t))) + int a6xx_gmu_resume(struct a6xx_gpu *gpu); int a6xx_gmu_stop(struct a6xx_gpu *gpu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index b12f5b4a1bea..e9ede19193b0 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -875,7 +875,7 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu, int i; a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count, - sizeof(a6xx_state->indexed_regs)); + sizeof(*a6xx_state->indexed_regs)); if (!a6xx_state->indexed_regs) return; diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 9eeb46bf2a5d..58e03b20e1c7 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -282,7 +282,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) int ret; if (pdev) - gpu = platform_get_drvdata(pdev); + gpu = dev_to_gpu(&pdev->dev); if (!gpu) { dev_err_once(dev->dev, "no GPU device was found\n"); @@ -417,15 +417,13 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(gpu); } - dev_set_drvdata(dev, gpu); - return 0; } static void adreno_unbind(struct device *dev, struct device *master, void *data) { - struct msm_gpu *gpu = dev_get_drvdata(dev); + struct msm_gpu *gpu = dev_to_gpu(dev); pm_runtime_force_suspend(dev); gpu->funcs->destroy(gpu); @@ -490,16 +488,14 @@ static const struct of_device_id dt_match[] = { #ifdef CONFIG_PM static int adreno_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct msm_gpu *gpu = platform_get_drvdata(pdev); + struct msm_gpu *gpu = dev_to_gpu(dev); return gpu->funcs->pm_resume(gpu); } static int adreno_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct msm_gpu *gpu = platform_get_drvdata(pdev); + struct msm_gpu *gpu = dev_to_gpu(dev); return gpu->funcs->pm_suspend(gpu); } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 862dd35b27d3..458b5b26d3c2 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -189,12 +189,27 @@ struct msm_gem_address_space * adreno_iommu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) { - struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type); - struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu); + struct iommu_domain *iommu; + struct msm_mmu *mmu; struct msm_gem_address_space *aspace; + u64 start, size; - aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M, - 0xffffffff - SZ_16M); + iommu = iommu_domain_alloc(&platform_bus_type); + if (!iommu) + return NULL; + + mmu = msm_iommu_new(&pdev->dev, iommu); + + /* + * Use the aperture start or SZ_16M, whichever is greater. This will + * ensure that we align with the allocated pagetable range while still + * allowing room in the lower 32 bits for GMEM and whatnot + */ + start = max_t(u64, SZ_16M, iommu->geometry.aperture_start); + size = iommu->geometry.aperture_end - start + 1; + + aspace = msm_gem_address_space_create(mmu, "gpu", + start & GENMASK_ULL(48, 0), size); if (IS_ERR(aspace) && !IS_ERR(mmu)) mmu->funcs->destroy(mmu); @@ -407,8 +422,9 @@ int adreno_hw_init(struct msm_gpu *gpu) static uint32_t get_rptr(struct adreno_gpu *adreno_gpu, struct msm_ringbuffer *ring) { - return ring->memptrs->rptr = adreno_gpu_read( - adreno_gpu, REG_ADRENO_CP_RB_RPTR); + struct msm_gpu *gpu = &adreno_gpu->base; + + return gpu->funcs->get_rptr(gpu, ring); } struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu) @@ -434,81 +450,8 @@ void adreno_recover(struct msm_gpu *gpu) } } -void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, - struct msm_file_private *ctx) -{ - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - struct msm_drm_private *priv = gpu->dev->dev_private; - struct msm_ringbuffer *ring = submit->ring; - unsigned i; - - for (i = 0; i < submit->nr_cmds; i++) { - switch (submit->cmd[i].type) { - case MSM_SUBMIT_CMD_IB_TARGET_BUF: - /* ignore IB-targets */ - break; - case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - /* ignore if there has not been a ctx switch: */ - if (priv->lastctx == ctx) - break; - fallthrough; - case MSM_SUBMIT_CMD_BUF: - OUT_PKT3(ring, adreno_is_a4xx(adreno_gpu) ? - CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); - OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); - OUT_RING(ring, submit->cmd[i].size); - OUT_PKT2(ring); - break; - } - } - - OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); - OUT_RING(ring, submit->seqno); - - if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) { - /* Flush HLSQ lazy updates to make sure there is nothing - * pending for indirect loads after the timestamp has - * passed: - */ - OUT_PKT3(ring, CP_EVENT_WRITE, 1); - OUT_RING(ring, HLSQ_FLUSH); - } - - /* wait for idle before cache flush/interrupt */ - OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); - OUT_RING(ring, 0x00000000); - - if (!adreno_is_a2xx(adreno_gpu)) { - /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ - OUT_PKT3(ring, CP_EVENT_WRITE, 3); - OUT_RING(ring, CACHE_FLUSH_TS | BIT(31)); - OUT_RING(ring, rbmemptr(ring, fence)); - OUT_RING(ring, submit->seqno); - } else { - /* BIT(31) means something else on a2xx */ - OUT_PKT3(ring, CP_EVENT_WRITE, 3); - OUT_RING(ring, CACHE_FLUSH_TS); - OUT_RING(ring, rbmemptr(ring, fence)); - OUT_RING(ring, submit->seqno); - OUT_PKT3(ring, CP_INTERRUPT, 1); - OUT_RING(ring, 0x80000000); - } - -#if 0 - if (adreno_is_a3xx(adreno_gpu)) { - /* Dummy set-constant to trigger context rollover */ - OUT_PKT3(ring, CP_SET_CONSTANT, 2); - OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG)); - OUT_RING(ring, 0x00000000); - } -#endif - - gpu->funcs->flush(gpu, ring); -} - -void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg) { - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); uint32_t wptr; /* Copy the shadow to the actual register */ @@ -524,7 +467,7 @@ void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) /* ensure writes to ringbuffer have hit system memory: */ mb(); - adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr); + gpu_write(gpu, reg, wptr); } bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index e55abae365b5..c3775f79525a 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -17,29 +17,8 @@ #include "adreno_common.xml.h" #include "adreno_pm4.xml.h" -#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1 -#define REG_SKIP ~0 -#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP - extern bool snapshot_debugbus; -/** - * adreno_regs: List of registers that are used in across all - * 3D devices. Each device type has different offset value for the same - * register, so an array of register offsets are declared for every device - * and are indexed by the enumeration values defined in this enum - */ -enum adreno_regs { - REG_ADRENO_CP_RB_BASE, - REG_ADRENO_CP_RB_BASE_HI, - REG_ADRENO_CP_RB_RPTR_ADDR, - REG_ADRENO_CP_RB_RPTR_ADDR_HI, - REG_ADRENO_CP_RB_RPTR, - REG_ADRENO_CP_RB_WPTR, - REG_ADRENO_CP_RB_CNTL, - REG_ADRENO_REGISTER_MAX, -}; - enum { ADRENO_FW_PM4 = 0, ADRENO_FW_SQE = 0, /* a6xx */ @@ -176,11 +155,6 @@ static inline bool adreno_is_a225(struct adreno_gpu *gpu) return gpu->revn == 225; } -static inline bool adreno_is_a3xx(struct adreno_gpu *gpu) -{ - return (gpu->revn >= 300) && (gpu->revn < 400); -} - static inline bool adreno_is_a305(struct adreno_gpu *gpu) { return gpu->revn == 305; @@ -207,11 +181,6 @@ static inline bool adreno_is_a330v2(struct adreno_gpu *gpu) return adreno_is_a330(gpu) && (gpu->rev.patchid > 0); } -static inline bool adreno_is_a4xx(struct adreno_gpu *gpu) -{ - return (gpu->revn >= 400) && (gpu->revn < 500); -} - static inline int adreno_is_a405(struct adreno_gpu *gpu) { return gpu->revn == 405; @@ -269,9 +238,7 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, const struct firmware *fw, u64 *iova); int adreno_hw_init(struct msm_gpu *gpu); void adreno_recover(struct msm_gpu *gpu); -void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, - struct msm_file_private *ctx); -void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring); +void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg); bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring); #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state, @@ -365,59 +332,12 @@ OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23)); } -/* - * adreno_reg_check() - Checks the validity of a register enum - * @gpu: Pointer to struct adreno_gpu - * @offset_name: The register enum that is checked - */ -static inline bool adreno_reg_check(struct adreno_gpu *gpu, - enum adreno_regs offset_name) -{ - BUG_ON(offset_name >= REG_ADRENO_REGISTER_MAX || !gpu->reg_offsets[offset_name]); - - /* - * REG_SKIP is a special value that tell us that the register in - * question isn't implemented on target but don't trigger a BUG(). This - * is used to cleanly implement adreno_gpu_write64() and - * adreno_gpu_read64() in a generic fashion - */ - if (gpu->reg_offsets[offset_name] == REG_SKIP) - return false; - - return true; -} - -static inline u32 adreno_gpu_read(struct adreno_gpu *gpu, - enum adreno_regs offset_name) -{ - u32 reg = gpu->reg_offsets[offset_name]; - u32 val = 0; - if(adreno_reg_check(gpu,offset_name)) - val = gpu_read(&gpu->base, reg - 1); - return val; -} - -static inline void adreno_gpu_write(struct adreno_gpu *gpu, - enum adreno_regs offset_name, u32 data) -{ - u32 reg = gpu->reg_offsets[offset_name]; - if(adreno_reg_check(gpu, offset_name)) - gpu_write(&gpu->base, reg - 1, data); -} - struct msm_gpu *a2xx_gpu_init(struct drm_device *dev); struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); struct msm_gpu *a4xx_gpu_init(struct drm_device *dev); struct msm_gpu *a5xx_gpu_init(struct drm_device *dev); struct msm_gpu *a6xx_gpu_init(struct drm_device *dev); -static inline void adreno_gpu_write64(struct adreno_gpu *gpu, - enum adreno_regs lo, enum adreno_regs hi, u64 data) -{ - adreno_gpu_write(gpu, lo, lower_32_bits(data)); - adreno_gpu_write(gpu, hi, upper_32_bits(data)); -} - static inline uint32_t get_wptr(struct msm_ringbuffer *ring) { return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2); diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h index 3931eecadaff..59bb8c1ffce6 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h @@ -298,6 +298,7 @@ enum adreno_pm4_type3_packets { CP_SET_BIN_DATA5_OFFSET = 46, CP_SET_CTXSWITCH_IB = 85, CP_REG_WRITE = 109, + CP_WHERE_AM_I = 98, }; enum adreno_state_block { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c index f1bc6a1af7a7..84ea09d9692f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c @@ -288,19 +288,6 @@ static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms) } #ifdef CONFIG_DEBUG_FS -#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \ -static int __prefix ## _open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, __prefix ## _show, inode->i_private); \ -} \ -static const struct file_operations __prefix ## _fops = { \ - .owner = THIS_MODULE, \ - .open = __prefix ## _open, \ - .release = single_release, \ - .read = seq_read, \ - .llseek = seq_lseek, \ -} - static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v) { struct dpu_irq *irq_obj = s->private; @@ -328,7 +315,7 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v) return 0; } -DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq); +DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq); void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, struct dentry *parent) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c index b36919d95362..393858ef8a83 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c @@ -30,6 +30,74 @@ enum dpu_perf_mode { DPU_PERF_MODE_MAX }; +/** + * @_dpu_core_perf_calc_bw() - to calculate BW per crtc + * @kms - pointer to the dpu_kms + * @crtc - pointer to a crtc + * Return: returns aggregated BW for all planes in crtc. + */ +static u64 _dpu_core_perf_calc_bw(struct dpu_kms *kms, + struct drm_crtc *crtc) +{ + struct drm_plane *plane; + struct dpu_plane_state *pstate; + u64 crtc_plane_bw = 0; + u32 bw_factor; + + drm_atomic_crtc_for_each_plane(plane, crtc) { + pstate = to_dpu_plane_state(plane->state); + if (!pstate) + continue; + + crtc_plane_bw += pstate->plane_fetch_bw; + } + + bw_factor = kms->catalog->perf.bw_inefficiency_factor; + if (bw_factor) { + crtc_plane_bw *= bw_factor; + do_div(crtc_plane_bw, 100); + } + + return crtc_plane_bw; +} + +/** + * _dpu_core_perf_calc_clk() - to calculate clock per crtc + * @kms - pointer to the dpu_kms + * @crtc - pointer to a crtc + * @state - pointer to a crtc state + * Return: returns max clk for all planes in crtc. + */ +static u64 _dpu_core_perf_calc_clk(struct dpu_kms *kms, + struct drm_crtc *crtc, struct drm_crtc_state *state) +{ + struct drm_plane *plane; + struct dpu_plane_state *pstate; + struct drm_display_mode *mode; + u64 crtc_clk; + u32 clk_factor; + + mode = &state->adjusted_mode; + + crtc_clk = mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode); + + drm_atomic_crtc_for_each_plane(plane, crtc) { + pstate = to_dpu_plane_state(plane->state); + if (!pstate) + continue; + + crtc_clk = max(pstate->plane_clk, crtc_clk); + } + + clk_factor = kms->catalog->perf.clk_inefficiency_factor; + if (clk_factor) { + crtc_clk *= clk_factor; + do_div(crtc_clk, 100); + } + + return crtc_clk; +} + static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) { struct msm_drm_private *priv; @@ -52,12 +120,7 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms, dpu_cstate = to_dpu_crtc_state(state); memset(perf, 0, sizeof(struct dpu_core_perf_params)); - if (!dpu_cstate->bw_control) { - perf->bw_ctl = kms->catalog->perf.max_bw_high * - 1000ULL; - perf->max_per_pipe_ib = perf->bw_ctl; - perf->core_clk_rate = kms->perf.max_core_clk_rate; - } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) { + if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) { perf->bw_ctl = 0; perf->max_per_pipe_ib = 0; perf->core_clk_rate = 0; @@ -65,6 +128,10 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms, perf->bw_ctl = kms->perf.fix_core_ab_vote; perf->max_per_pipe_ib = kms->perf.fix_core_ib_vote; perf->core_clk_rate = kms->perf.fix_core_clk_rate; + } else { + perf->bw_ctl = _dpu_core_perf_calc_bw(kms, crtc); + perf->max_per_pipe_ib = kms->catalog->perf.min_dram_ib; + perf->core_clk_rate = _dpu_core_perf_calc_clk(kms, crtc, state); } DPU_DEBUG( @@ -116,11 +183,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc, DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n", tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl, tmp_cstate->bw_control); - /* - * For bw check only use the bw if the - * atomic property has been already set - */ - if (tmp_cstate->bw_control) + bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl; } @@ -132,9 +195,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc, DPU_DEBUG("final threshold bw limit = %d\n", threshold); - if (!dpu_cstate->bw_control) { - DPU_DEBUG("bypass bandwidth check\n"); - } else if (!threshold) { + if (!threshold) { DPU_ERROR("no bandwidth limits specified\n"); return -E2BIG; } else if (bw > threshold) { @@ -155,7 +216,11 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, = dpu_crtc_get_client_type(crtc); struct drm_crtc *tmp_crtc; struct dpu_crtc_state *dpu_cstate; - int ret = 0; + int i, ret = 0; + u64 avg_bw; + + if (!kms->num_paths) + return -EINVAL; drm_for_each_crtc(tmp_crtc, crtc->dev) { if (tmp_crtc->enabled && @@ -166,10 +231,20 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, perf.max_per_pipe_ib = max(perf.max_per_pipe_ib, dpu_cstate->new_perf.max_per_pipe_ib); - DPU_DEBUG("crtc=%d bw=%llu\n", tmp_crtc->base.id, - dpu_cstate->new_perf.bw_ctl); + perf.bw_ctl += dpu_cstate->new_perf.bw_ctl; + + DPU_DEBUG("crtc=%d bw=%llu paths:%d\n", + tmp_crtc->base.id, + dpu_cstate->new_perf.bw_ctl, kms->num_paths); } } + + avg_bw = perf.bw_ctl; + do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/ + + for (i = 0; i < kms->num_paths; i++) + icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib); + return ret; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index c2729f71e2fa..f56414a06ec4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -265,11 +265,6 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) { struct drm_encoder *encoder; - if (!crtc) { - DPU_ERROR("invalid crtc\n"); - return INTF_MODE_NONE; - } - /* * TODO: This function is called from dpu debugfs and as part of atomic * check. When called from debugfs, the crtc->mutex must be held to @@ -297,7 +292,6 @@ void dpu_crtc_vblank_callback(struct drm_crtc *crtc) dpu_crtc->vblank_cb_time = ktime_get(); else dpu_crtc->vblank_cb_count++; - _dpu_crtc_complete_flip(crtc); drm_crtc_handle_vblank(crtc); trace_dpu_crtc_vblank_cb(DRMID(crtc)); } @@ -402,6 +396,7 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event) void dpu_crtc_complete_commit(struct drm_crtc *crtc) { trace_dpu_crtc_complete_commit(DRMID(crtc)); + _dpu_crtc_complete_flip(crtc); } static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, @@ -421,8 +416,6 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r); } - - drm_mode_debug_printmodeline(adj_mode); } static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state, @@ -457,7 +450,6 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc) struct dpu_crtc_mixer *mixer = cstate->mixers; struct dpu_hw_pcc_cfg cfg; struct dpu_hw_ctl *ctl; - struct dpu_hw_mixer *lm; struct dpu_hw_dspp *dspp; int i; @@ -467,7 +459,6 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc) for (i = 0; i < cstate->num_mixers; i++) { ctl = mixer[i].lm_ctl; - lm = mixer[i].hw_lm; dspp = mixer[i].hw_dspp; if (!dspp || !dspp->ops.setup_pcc) @@ -496,16 +487,8 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc) static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { - struct dpu_crtc *dpu_crtc; - struct dpu_crtc_state *cstate; + struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); struct drm_encoder *encoder; - struct drm_device *dev; - unsigned long flags; - - if (!crtc) { - DPU_ERROR("invalid crtc\n"); - return; - } if (!crtc->state->enable) { DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n", @@ -515,21 +498,8 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, DPU_DEBUG("crtc%d\n", crtc->base.id); - dpu_crtc = to_dpu_crtc(crtc); - cstate = to_dpu_crtc_state(crtc->state); - dev = crtc->dev; - _dpu_crtc_setup_lm_bounds(crtc, crtc->state); - if (dpu_crtc->event) { - WARN_ON(dpu_crtc->event); - } else { - spin_lock_irqsave(&dev->event_lock, flags); - dpu_crtc->event = crtc->state->event; - crtc->state->event = NULL; - spin_unlock_irqrestore(&dev->event_lock, flags); - } - /* encoder will trigger pending mask now */ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) dpu_encoder_trigger_kickoff_pending(encoder); @@ -583,14 +553,11 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, return; } - if (dpu_crtc->event) { - DPU_DEBUG("already received dpu_crtc->event\n"); - } else { - spin_lock_irqsave(&dev->event_lock, flags); - dpu_crtc->event = crtc->state->event; - crtc->state->event = NULL; - spin_unlock_irqrestore(&dev->event_lock, flags); - } + WARN_ON(dpu_crtc->event); + spin_lock_irqsave(&dev->event_lock, flags); + dpu_crtc->event = crtc->state->event; + crtc->state->event = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); /* * If no mixers has been allocated in dpu_crtc_atomic_check(), @@ -635,14 +602,7 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, static void dpu_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) { - struct dpu_crtc_state *cstate; - - if (!crtc || !state) { - DPU_ERROR("invalid argument(s)\n"); - return; - } - - cstate = to_dpu_crtc_state(state); + struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); DPU_DEBUG("crtc%d\n", crtc->base.id); @@ -731,14 +691,8 @@ static void dpu_crtc_reset(struct drm_crtc *crtc) */ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc) { - struct dpu_crtc_state *cstate, *old_cstate; + struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state); - if (!crtc || !crtc->state) { - DPU_ERROR("invalid argument(s)\n"); - return NULL; - } - - old_cstate = to_dpu_crtc_state(crtc->state); cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL); if (!cstate) { DPU_ERROR("failed to allocate state\n"); @@ -754,19 +708,12 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc) static void dpu_crtc_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { - struct dpu_crtc *dpu_crtc; - struct dpu_crtc_state *cstate; + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); struct drm_encoder *encoder; unsigned long flags; bool release_bandwidth = false; - if (!crtc || !crtc->state) { - DPU_ERROR("invalid crtc\n"); - return; - } - dpu_crtc = to_dpu_crtc(crtc); - cstate = to_dpu_crtc_state(crtc->state); - DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); /* Disable/save vblank irq handling */ @@ -825,19 +772,13 @@ static void dpu_crtc_disable(struct drm_crtc *crtc, static void dpu_crtc_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { - struct dpu_crtc *dpu_crtc; + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); struct drm_encoder *encoder; bool request_bandwidth = false; - if (!crtc) { - DPU_ERROR("invalid crtc\n"); - return; - } - pm_runtime_get_sync(crtc->dev->dev); DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); - dpu_crtc = to_dpu_crtc(crtc); drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) { /* in video mode, we hold an extra bandwidth reference @@ -873,15 +814,15 @@ struct plane_state { static int dpu_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { - struct dpu_crtc *dpu_crtc; + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); struct plane_state *pstates; - struct dpu_crtc_state *cstate; const struct drm_plane_state *pstate; struct drm_plane *plane; struct drm_display_mode *mode; - int cnt = 0, rc = 0, mixer_width, i, z_pos; + int cnt = 0, rc = 0, mixer_width = 0, i, z_pos; struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2]; int multirect_count = 0; @@ -889,16 +830,8 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, int left_zpos_cnt = 0, right_zpos_cnt = 0; struct drm_rect crtc_rect = { 0 }; - if (!crtc) { - DPU_ERROR("invalid crtc\n"); - return -EINVAL; - } - pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL); - dpu_crtc = to_dpu_crtc(crtc); - cstate = to_dpu_crtc_state(state); - if (!state->enable || !state->active) { DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n", crtc->base.id, state->enable, state->active); @@ -914,9 +847,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, memset(pipe_staged, 0, sizeof(pipe_staged)); - mixer_width = mode->hdisplay / cstate->num_mixers; + if (cstate->num_mixers) { + mixer_width = mode->hdisplay / cstate->num_mixers; - _dpu_crtc_setup_lm_bounds(crtc, state); + _dpu_crtc_setup_lm_bounds(crtc, state); + } crtc_rect.x2 = mode->hdisplay; crtc_rect.y2 = mode->vdisplay; @@ -1242,23 +1177,7 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data) return 0; } -static int _dpu_debugfs_status_open(struct inode *inode, struct file *file) -{ - return single_open(file, _dpu_debugfs_status_show, inode->i_private); -} - -#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \ -static int __prefix ## _open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, __prefix ## _show, inode->i_private); \ -} \ -static const struct file_operations __prefix ## _fops = { \ - .owner = THIS_MODULE, \ - .open = __prefix ## _open, \ - .release = single_release, \ - .read = seq_read, \ - .llseek = seq_lseek, \ -} +DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status); static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v) { @@ -1275,25 +1194,18 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v) return 0; } -DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state); +DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state); static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) { struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); - static const struct file_operations debugfs_status_fops = { - .open = _dpu_debugfs_status_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - }; - dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name, crtc->dev->primary->debugfs_root); debugfs_create_file("status", 0400, dpu_crtc->debugfs_root, - dpu_crtc, &debugfs_status_fops); + dpu_crtc, &_dpu_debugfs_status_fops); debugfs_create_file("state", 0600, dpu_crtc->debugfs_root, &dpu_crtc->base, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index bd6def436c65..f7f5c258b553 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -1001,6 +1001,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, trace_dpu_enc_mode_set(DRMID(drm_enc)); + if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) + msm_dp_display_mode_set(priv->dp, drm_enc, mode, adj_mode); + list_for_each_entry(conn_iter, connector_list, head) if (conn_iter->encoder == drm_enc) conn = conn_iter; @@ -1109,6 +1112,13 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) return; } + + if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort && + dpu_enc->cur_master->hw_mdptop && + dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select) + dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select( + dpu_enc->cur_master->hw_mdptop); + _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info); if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI && @@ -1146,6 +1156,7 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc = NULL; int ret = 0; + struct msm_drm_private *priv; struct drm_display_mode *cur_mode = NULL; if (!drm_enc) { @@ -1156,6 +1167,7 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc) mutex_lock(&dpu_enc->enc_lock); cur_mode = &dpu_enc->base.crtc->state->adjusted_mode; + priv = drm_enc->dev->dev_private; trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay, cur_mode->vdisplay); @@ -1176,6 +1188,15 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc) _dpu_encoder_virt_enable_helper(drm_enc); + if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) { + ret = msm_dp_display_enable(priv->dp, + drm_enc); + if (ret) { + DPU_ERROR_ENC(dpu_enc, "dp display enable failed: %d\n", + ret); + goto out; + } + } dpu_enc->enabled = true; out: @@ -1211,6 +1232,11 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) /* wait for idle */ dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE); + if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) { + if (msm_dp_display_pre_disable(priv->dp, drm_enc)) + DPU_ERROR_ENC(dpu_enc, "dp display push idle failed\n"); + } + dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP); for (i = 0; i < dpu_enc->num_phys_encs; i++) { @@ -1220,6 +1246,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) phys->ops.disable(phys); } + /* after phys waits for frame-done, should be no more frames pending */ if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id); @@ -1234,6 +1261,11 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); + if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) { + if (msm_dp_display_disable(priv->dp, drm_enc)) + DPU_ERROR_ENC(dpu_enc, "dp display disable failed\n"); + } + mutex_unlock(&dpu_enc->enc_lock); } @@ -1880,24 +1912,13 @@ static int _dpu_encoder_status_show(struct seq_file *s, void *data) return 0; } -static int _dpu_encoder_debugfs_status_open(struct inode *inode, - struct file *file) -{ - return single_open(file, _dpu_encoder_status_show, inode->i_private); -} +DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status); static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); int i; - static const struct file_operations debugfs_status_fops = { - .open = _dpu_encoder_debugfs_status_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - }; - char name[DPU_NAME_SIZE]; if (!drm_enc->dev) { @@ -1913,7 +1934,7 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) /* don't error check these */ debugfs_create_file("status", 0600, - dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops); + dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops); for (i = 0; i < dpu_enc->num_phys_encs; i++) if (dpu_enc->phys_encs[i]->ops.late_register) @@ -2008,7 +2029,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, { int ret = 0; int i = 0; - enum dpu_intf_type intf_type; + enum dpu_intf_type intf_type = INTF_NONE; struct dpu_enc_phys_init_params phys_params; if (!dpu_enc) { @@ -2030,9 +2051,9 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, case DRM_MODE_ENCODER_DSI: intf_type = INTF_DSI; break; - default: - DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n"); - return -EINVAL; + case DRM_MODE_ENCODER_TMDS: + intf_type = INTF_DP; + break; } WARN_ON(disp_info->num_of_h_tiles < 1); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index b5a49050d131..805e059b50b7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -100,6 +100,14 @@ static void drm_mode_to_intf_timing_params( * display_v_end -= mode->hsync_start - mode->hdisplay; * } */ + /* for DP/EDP, Shift timings to align it to bottom right */ + if ((phys_enc->hw_intf->cap->type == INTF_DP) || + (phys_enc->hw_intf->cap->type == INTF_EDP)) { + timing->h_back_porch += timing->h_front_porch; + timing->h_front_porch = 0; + timing->v_back_porch += timing->v_front_porch; + timing->v_front_porch = 0; + } } static u32 get_horizontal_total(const struct intf_timing_params *timing) @@ -298,7 +306,6 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx) struct dpu_hw_ctl *hw_ctl; unsigned long lock_flags; u32 flush_register = 0; - int new_cnt = -1, old_cnt = -1; hw_ctl = phys_enc->hw_ctl; @@ -308,7 +315,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx) phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent, phys_enc); - old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt); + atomic_read(&phys_enc->pending_kickoff_cnt); /* * only decrement the pending flush count if we've actually flushed @@ -320,8 +327,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx) flush_register = hw_ctl->ops.get_flush_register(hw_ctl); if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl))) - new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, - -1, 0); + atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0); spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); /* Signal any waiting atomic commit thread */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 97d122eee96d..60b304b72b7c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -684,7 +684,8 @@ static const struct dpu_perf_cfg sc7180_perf_data = { .max_bw_high = 6800000, .min_core_ib = 2400000, .min_llcc_ib = 800000, - .min_dram_ib = 800000, + .min_dram_ib = 1600000, + .min_prefill_lines = 24, .danger_lut_tbl = {0xff, 0xffff, 0x0}, .qos_lut_tbl = { {.nentry = ARRAY_SIZE(sc7180_qos_linear), @@ -701,6 +702,8 @@ static const struct dpu_perf_cfg sc7180_perf_data = { {.rd_enable = 1, .wr_enable = 1}, {.rd_enable = 1, .wr_enable = 0} }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, }; static const struct dpu_perf_cfg sm8150_perf_data = { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index 1b7a9213a756..3544af1a45c5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -659,6 +659,8 @@ struct dpu_perf_cdp_cfg { * @downscaling_prefill_lines downscaling latency in lines * @amortizable_theshold minimum y position for traffic shaping prefill * @min_prefill_lines minimum pipeline latency in lines + * @clk_inefficiency_factor DPU src clock inefficiency factor + * @bw_inefficiency_factor DPU axi bus bw inefficiency factor * @safe_lut_tbl: LUT tables for safe signals * @danger_lut_tbl: LUT tables for danger signals * @qos_lut_tbl: LUT tables for QoS signals @@ -683,6 +685,8 @@ struct dpu_perf_cfg { u32 downscaling_prefill_lines; u32 amortizable_threshold; u32 min_prefill_lines; + u32 clk_inefficiency_factor; + u32 bw_inefficiency_factor; u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX]; u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX]; struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX]; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index c0a4d4e16d82..d93c44f6996d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -85,30 +85,17 @@ static int _dpu_danger_signal_status(struct seq_file *s, return 0; } -#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \ -static int __prefix ## _open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, __prefix ## _show, inode->i_private); \ -} \ -static const struct file_operations __prefix ## _fops = { \ - .owner = THIS_MODULE, \ - .open = __prefix ## _open, \ - .release = single_release, \ - .read = seq_read, \ - .llseek = seq_lseek, \ -} - static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v) { return _dpu_danger_signal_status(s, true); } -DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats); +DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats); static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v) { return _dpu_danger_signal_status(s, false); } -DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats); +DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats); static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms, struct dentry *parent) @@ -195,10 +182,15 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) struct dpu_kms *dpu_kms = to_dpu_kms(kms); void *p = dpu_hw_util_get_log_mask_ptr(); struct dentry *entry; + struct drm_device *dev; + struct msm_drm_private *priv; if (!p) return -EINVAL; + dev = dpu_kms->dev; + priv = dev->dev_private; + entry = debugfs_create_dir("debug", minor->debugfs_root); debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p); @@ -207,6 +199,9 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) dpu_debugfs_vbif_init(dpu_kms, entry); dpu_debugfs_core_irq_init(dpu_kms, entry); + if (priv->dp) + msm_dp_debugfs_init(priv->dp, minor); + return dpu_core_perf_debugfs_init(dpu_kms, entry); } #endif @@ -290,6 +285,28 @@ static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms) return 0; } +static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms) +{ + struct icc_path *path0; + struct icc_path *path1; + struct drm_device *dev = dpu_kms->dev; + + path0 = of_icc_get(dev->dev, "mdp0-mem"); + path1 = of_icc_get(dev->dev, "mdp1-mem"); + + if (IS_ERR_OR_NULL(path0)) + return PTR_ERR_OR_ZERO(path0); + + dpu_kms->path[0] = path0; + dpu_kms->num_paths = 1; + + if (!IS_ERR_OR_NULL(path1)) { + dpu_kms->path[1] = path1; + dpu_kms->num_paths++; + } + return 0; +} + static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { return dpu_crtc_vblank(crtc, true); @@ -479,6 +496,33 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev, return rc; } +static int _dpu_kms_initialize_displayport(struct drm_device *dev, + struct msm_drm_private *priv, + struct dpu_kms *dpu_kms) +{ + struct drm_encoder *encoder = NULL; + int rc = 0; + + if (!priv->dp) + return rc; + + encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS); + if (IS_ERR(encoder)) { + DPU_ERROR("encoder init failed for dsi display\n"); + return PTR_ERR(encoder); + } + + rc = msm_dp_modeset_init(priv->dp, dev, encoder); + if (rc) { + DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc); + drm_encoder_cleanup(encoder); + return rc; + } + + priv->encoders[priv->num_encoders++] = encoder; + return rc; +} + /** * _dpu_kms_setup_displays - create encoders, bridges and connectors * for underlying displays @@ -491,12 +535,21 @@ static int _dpu_kms_setup_displays(struct drm_device *dev, struct msm_drm_private *priv, struct dpu_kms *dpu_kms) { - /** - * Extend this function to initialize other - * types of displays - */ + int rc = 0; - return _dpu_kms_initialize_dsi(dev, priv, dpu_kms); + rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms); + if (rc) { + DPU_ERROR("initialize_dsi failed, rc = %d\n", rc); + return rc; + } + + rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms); + if (rc) { + DPU_ERROR("initialize_DP failed, rc = %d\n", rc); + return rc; + } + + return rc; } static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms) @@ -681,13 +734,20 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms, info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE : MSM_DISPLAY_CAP_VID_MODE; - /* TODO: No support for DSI swap */ - for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { - if (priv->dsi[i]) { - info.h_tile_instance[info.num_of_h_tiles] = i; - info.num_of_h_tiles++; + switch (info.intf_type) { + case DRM_MODE_ENCODER_DSI: + /* TODO: No support for DSI swap */ + for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { + if (priv->dsi[i]) { + info.h_tile_instance[info.num_of_h_tiles] = i; + info.num_of_h_tiles++; + } } - } + break; + case DRM_MODE_ENCODER_TMDS: + info.num_of_h_tiles = 1; + break; + }; rc = dpu_encoder_setup(encoder->dev, encoder, &info); if (rc) @@ -709,6 +769,23 @@ static void dpu_irq_preinstall(struct msm_kms *kms) dpu_core_irq_preinstall(dpu_kms); } +static int dpu_irq_postinstall(struct msm_kms *kms) +{ + struct msm_drm_private *priv; + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + + if (!dpu_kms || !dpu_kms->dev) + return -EINVAL; + + priv = dpu_kms->dev->dev_private; + if (!priv) + return -EINVAL; + + msm_dp_irq_postinstall(priv->dp); + + return 0; +} + static void dpu_irq_uninstall(struct msm_kms *kms) { struct dpu_kms *dpu_kms = to_dpu_kms(kms); @@ -719,6 +796,7 @@ static void dpu_irq_uninstall(struct msm_kms *kms) static const struct msm_kms_funcs kms_funcs = { .hw_init = dpu_kms_hw_init, .irq_preinstall = dpu_irq_preinstall, + .irq_postinstall = dpu_irq_postinstall, .irq_uninstall = dpu_irq_uninstall, .irq = dpu_irq, .enable_commit = dpu_kms_enable_commit, @@ -952,6 +1030,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms) dpu_vbif_init_memtypes(dpu_kms); + if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) + dpu_kms_parse_data_bus_icc_path(dpu_kms); + pm_runtime_put_sync(&dpu_kms->pdev->dev); return 0; @@ -1079,7 +1160,7 @@ static int dpu_dev_remove(struct platform_device *pdev) static int __maybe_unused dpu_runtime_suspend(struct device *dev) { - int rc = -1; + int i, rc = -1; struct platform_device *pdev = to_platform_device(dev); struct dpu_kms *dpu_kms = platform_get_drvdata(pdev); struct dss_module_power *mp = &dpu_kms->mp; @@ -1090,6 +1171,9 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev) if (rc) DPU_ERROR("clock disable failed rc:%d\n", rc); + for (i = 0; i < dpu_kms->num_paths; i++) + icc_set_bw(dpu_kms->path[i], 0, 0); + return rc; } @@ -1101,8 +1185,15 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev) struct drm_encoder *encoder; struct drm_device *ddev; struct dss_module_power *mp = &dpu_kms->mp; + int i; ddev = dpu_kms->dev; + + /* Min vote of BW is required before turning on AXI clk */ + for (i = 0; i < dpu_kms->num_paths; i++) + icc_set_bw(dpu_kms->path[i], 0, + dpu_kms->catalog->perf.min_dram_ib); + rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); if (rc) { DPU_ERROR("clock enable failed rc:%d\n", rc); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index e140cd633071..1c0e4c0c9ffb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -8,6 +8,8 @@ #ifndef __DPU_KMS_H__ #define __DPU_KMS_H__ +#include <linux/interconnect.h> + #include <drm/drm_drv.h> #include "msm_drv.h" @@ -140,6 +142,8 @@ struct dpu_kms { * when disabled. */ atomic_t bandwidth_ref; + struct icc_path *path[2]; + u32 num_paths; }; struct vsync_info { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c index 7d3fdbb00e7e..cd4078807db1 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c @@ -8,7 +8,6 @@ #include <linux/irqdesc.h> #include <linux/irqchip/chained_irq.h> #include "dpu_kms.h" -#include <linux/interconnect.h> #define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base) @@ -277,9 +276,11 @@ int dpu_mdss_init(struct drm_device *dev) DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio); - ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss); - if (ret) - return ret; + if (!of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) { + ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss); + if (ret) + return ret; + } mp = &dpu_mdss->mp; ret = msm_dss_parse_clock(pdev, mp); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index 29e373d2e7b5..7ea90d25a3b6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -132,6 +132,86 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane) } /** + * _dpu_plane_calc_bw - calculate bandwidth required for a plane + * @Plane: Pointer to drm plane. + * Result: Updates calculated bandwidth in the plane state. + * BW Equation: src_w * src_h * bpp * fps * (v_total / v_dest) + * Prefill BW Equation: line src bytes * line_time + */ +static void _dpu_plane_calc_bw(struct drm_plane *plane, + struct drm_framebuffer *fb) +{ + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_plane_state *pstate; + struct drm_display_mode *mode; + const struct dpu_format *fmt = NULL; + struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); + int src_width, src_height, dst_height, fps; + u64 plane_prefill_bw; + u64 plane_bw; + u32 hw_latency_lines; + u64 scale_factor; + int vbp, vpw; + + pstate = to_dpu_plane_state(plane->state); + mode = &plane->state->crtc->mode; + + fmt = dpu_get_dpu_format_ext(fb->format->format, fb->modifier); + + src_width = drm_rect_width(&pdpu->pipe_cfg.src_rect); + src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect); + dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect); + fps = drm_mode_vrefresh(mode); + vbp = mode->vtotal - mode->vsync_end; + vpw = mode->vsync_end - mode->vsync_start; + hw_latency_lines = dpu_kms->catalog->perf.min_prefill_lines; + scale_factor = src_height > dst_height ? + mult_frac(src_height, 1, dst_height) : 1; + + plane_bw = + src_width * mode->vtotal * fps * fmt->bpp * + scale_factor; + + plane_prefill_bw = + src_width * hw_latency_lines * fps * fmt->bpp * + scale_factor * mode->vtotal; + + do_div(plane_prefill_bw, (vbp+vpw)); + + pstate->plane_fetch_bw = max(plane_bw, plane_prefill_bw); +} + +/** + * _dpu_plane_calc_clk - calculate clock required for a plane + * @Plane: Pointer to drm plane. + * Result: Updates calculated clock in the plane state. + * Clock equation: dst_w * v_total * fps * (src_h / dst_h) + */ +static void _dpu_plane_calc_clk(struct drm_plane *plane) +{ + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_plane_state *pstate; + struct drm_display_mode *mode; + int dst_width, src_height, dst_height, fps; + + pstate = to_dpu_plane_state(plane->state); + mode = &plane->state->crtc->mode; + + src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect); + dst_width = drm_rect_width(&pdpu->pipe_cfg.dst_rect); + dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect); + fps = drm_mode_vrefresh(mode); + + pstate->plane_clk = + dst_width * mode->vtotal * fps; + + if (src_height > dst_height) { + pstate->plane_clk *= src_height; + do_div(pstate->plane_clk, dst_height); + } +} + +/** * _dpu_plane_calc_fill_level - calculate fill level of the given source format * @plane: Pointer to drm plane * @fmt: Pointer to source buffer format @@ -1102,6 +1182,10 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) } _dpu_plane_set_qos_remap(plane); + + _dpu_plane_calc_bw(plane, fb); + + _dpu_plane_calc_clk(plane); } static void _dpu_plane_atomic_disable(struct drm_plane *plane) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h index 456949713e90..ca83b8753d59 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h @@ -25,6 +25,8 @@ * @scaler3_cfg: configuration data for scaler3 * @pixel_ext: configuration data for pixel extensions * @cdp_cfg: CDP configuration + * @plane_fetch_bw: calculated BW per plane + * @plane_clk: calculated clk per plane */ struct dpu_plane_state { struct drm_plane_state base; @@ -39,6 +41,8 @@ struct dpu_plane_state { struct dpu_hw_pixel_ext pixel_ext; struct dpu_hw_pipe_cdp_cfg cdp_cfg; + u64 plane_fetch_bw; + u64 plane_clk; }; /** diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c index 5d8956055286..88645dbc3785 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c @@ -25,54 +25,9 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder) return to_mdp4_kms(to_mdp_kms(priv->kms)); } -#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING -#include <mach/board.h> -/* not ironically named at all.. no, really.. */ -static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) -{ - struct drm_device *dev = mdp4_dtv_encoder->base.dev; - struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0"); - - if (!dtv_pdata) { - DRM_DEV_ERROR(dev->dev, "could not find dtv pdata\n"); - return; - } - - if (dtv_pdata->bus_scale_table) { - mdp4_dtv_encoder->bsc = msm_bus_scale_register_client( - dtv_pdata->bus_scale_table); - DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc); - DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save); - if (dtv_pdata->lcdc_power_save) - dtv_pdata->lcdc_power_save(1); - } -} - -static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) -{ - if (mdp4_dtv_encoder->bsc) { - msm_bus_scale_unregister_client(mdp4_dtv_encoder->bsc); - mdp4_dtv_encoder->bsc = 0; - } -} - -static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) -{ - if (mdp4_dtv_encoder->bsc) { - DBG("set bus scaling: %d", idx); - msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx); - } -} -#else -static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {} -static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {} -static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) {} -#endif - static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder) { struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); - bs_fini(mdp4_dtv_encoder); drm_encoder_cleanup(encoder); kfree(mdp4_dtv_encoder); } @@ -162,8 +117,6 @@ static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder) clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk); - bs_set(mdp4_dtv_encoder, 0); - mdp4_dtv_encoder->enabled = false; } @@ -185,8 +138,6 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder) MDP4_DMA_CONFIG_PACK(0x21)); mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 1); - bs_set(mdp4_dtv_encoder, 1); - DBG("setting mdp_clk=%lu", pc); ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc); @@ -252,8 +203,6 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev) goto fail; } - bs_init(mdp4_dtv_encoder); - return encoder; fail: diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h index 18933bd81c77..e8ee92ab7956 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h @@ -222,17 +222,4 @@ static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev) } #endif -#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING -/* bus scaling data is associated with extra pointless platform devices, - * "dtv", etc.. this is a bit of a hack, but we need a way for encoders - * to find their pdata to make the bus-scaling stuff work. - */ -static inline void *mdp4_find_pdata(const char *devname) -{ - struct device *dev; - dev = bus_find_device_by_name(&platform_bus_type, NULL, devname); - return dev ? dev->platform_data : NULL; -} -#endif - #endif /* __MDP4_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c index 871f3514ef69..10eb3e5b218e 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c @@ -30,51 +30,10 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder) return to_mdp4_kms(to_mdp_kms(priv->kms)); } -#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING -#include <mach/board.h> -static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) -{ - struct drm_device *dev = mdp4_lcdc_encoder->base.dev; - struct lcdc_platform_data *lcdc_pdata = mdp4_find_pdata("lvds.0"); - - if (!lcdc_pdata) { - DRM_DEV_ERROR(dev->dev, "could not find lvds pdata\n"); - return; - } - - if (lcdc_pdata->bus_scale_table) { - mdp4_lcdc_encoder->bsc = msm_bus_scale_register_client( - lcdc_pdata->bus_scale_table); - DBG("lvds : bus scale client: %08x", mdp4_lcdc_encoder->bsc); - } -} - -static void bs_fini(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) -{ - if (mdp4_lcdc_encoder->bsc) { - msm_bus_scale_unregister_client(mdp4_lcdc_encoder->bsc); - mdp4_lcdc_encoder->bsc = 0; - } -} - -static void bs_set(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder, int idx) -{ - if (mdp4_lcdc_encoder->bsc) { - DBG("set bus scaling: %d", idx); - msm_bus_scale_client_update_request(mdp4_lcdc_encoder->bsc, idx); - } -} -#else -static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) {} -static void bs_fini(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) {} -static void bs_set(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder, int idx) {} -#endif - static void mdp4_lcdc_encoder_destroy(struct drm_encoder *encoder) { struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = to_mdp4_lcdc_encoder(encoder); - bs_fini(mdp4_lcdc_encoder); drm_encoder_cleanup(encoder); kfree(mdp4_lcdc_encoder); } @@ -348,8 +307,6 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder) DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret); } - bs_set(mdp4_lcdc_encoder, 0); - mdp4_lcdc_encoder->enabled = false; } @@ -382,8 +339,6 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder) mdp4_crtc_set_config(encoder->crtc, config); mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0); - bs_set(mdp4_lcdc_encoder, 1); - for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { ret = regulator_enable(mdp4_lcdc_encoder->regs[i]); if (ret) @@ -480,8 +435,6 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev, } mdp4_lcdc_encoder->regs[2] = reg; - bs_init(mdp4_lcdc_encoder); - return encoder; fail: diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c index eeef41fcd4e1..ff2c1d583c79 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c @@ -14,27 +14,6 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder) return to_mdp5_kms(to_mdp_kms(priv->kms)); } -#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING -#include <mach/board.h> -#include <linux/msm-bus.h> -#include <linux/msm-bus-board.h> - -static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) -{ - if (mdp5_cmd_enc->bsc) { - DBG("set bus scaling: %d", idx); - /* HACK: scaling down, and then immediately back up - * seems to leave things broken (underflow).. so - * never disable: - */ - idx = 1; - msm_bus_scale_client_update_request(mdp5_cmd_enc->bsc, idx); - } -} -#else -static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) {} -#endif - #define VSYNC_CLK_RATE 19200000 static int pingpong_tearcheck_setup(struct drm_encoder *encoder, struct drm_display_mode *mode) @@ -146,8 +125,6 @@ void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) mdp5_ctl_set_encoder_state(ctl, pipeline, false); mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true); - bs_set(mdp5_cmd_enc, 0); - mdp5_cmd_enc->enabled = false; } @@ -161,7 +138,6 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) if (WARN_ON(mdp5_cmd_enc->enabled)) return; - bs_set(mdp5_cmd_enc, 1); if (pingpong_tearcheck_enable(encoder)) return; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c index f48827283c2b..79d67c495780 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c @@ -16,72 +16,9 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder) return to_mdp5_kms(to_mdp_kms(priv->kms)); } -#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING -#include <mach/board.h> -#include <mach/msm_bus.h> -#include <mach/msm_bus_board.h> -#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \ - { \ - .src = MSM_BUS_MASTER_MDP_PORT0, \ - .dst = MSM_BUS_SLAVE_EBI_CH0, \ - .ab = (ab_val), \ - .ib = (ib_val), \ - } - -static struct msm_bus_vectors mdp_bus_vectors[] = { - MDP_BUS_VECTOR_ENTRY(0, 0), - MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000), -}; -static struct msm_bus_paths mdp_bus_usecases[] = { { - .num_paths = 1, - .vectors = &mdp_bus_vectors[0], -}, { - .num_paths = 1, - .vectors = &mdp_bus_vectors[1], -} }; -static struct msm_bus_scale_pdata mdp_bus_scale_table = { - .usecase = mdp_bus_usecases, - .num_usecases = ARRAY_SIZE(mdp_bus_usecases), - .name = "mdss_mdp", -}; - -static void bs_init(struct mdp5_encoder *mdp5_encoder) -{ - mdp5_encoder->bsc = msm_bus_scale_register_client( - &mdp_bus_scale_table); - DBG("bus scale client: %08x", mdp5_encoder->bsc); -} - -static void bs_fini(struct mdp5_encoder *mdp5_encoder) -{ - if (mdp5_encoder->bsc) { - msm_bus_scale_unregister_client(mdp5_encoder->bsc); - mdp5_encoder->bsc = 0; - } -} - -static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) -{ - if (mdp5_encoder->bsc) { - DBG("set bus scaling: %d", idx); - /* HACK: scaling down, and then immediately back up - * seems to leave things broken (underflow).. so - * never disable: - */ - idx = 1; - msm_bus_scale_client_update_request(mdp5_encoder->bsc, idx); - } -} -#else -static void bs_init(struct mdp5_encoder *mdp5_encoder) {} -static void bs_fini(struct mdp5_encoder *mdp5_encoder) {} -static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) {} -#endif - static void mdp5_encoder_destroy(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - bs_fini(mdp5_encoder); drm_encoder_cleanup(encoder); kfree(mdp5_encoder); } @@ -222,8 +159,6 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder) */ mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer, intf)); - bs_set(mdp5_encoder, 0); - mdp5_encoder->enabled = false; } @@ -240,7 +175,6 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder) if (WARN_ON(mdp5_encoder->enabled)) return; - bs_set(mdp5_encoder, 1); spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1); spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); @@ -426,8 +360,6 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); - bs_init(mdp5_encoder); - return encoder; fail: diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c new file mode 100644 index 000000000000..82a8673ab8da --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_audio.c @@ -0,0 +1,638 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + */ + + +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include <linux/of_platform.h> + +#include <drm/drm_dp_helper.h> +#include <drm/drm_edid.h> + +#include "dp_catalog.h" +#include "dp_audio.h" +#include "dp_panel.h" +#include "dp_display.h" + +#define HEADER_BYTE_2_BIT 0 +#define PARITY_BYTE_2_BIT 8 +#define HEADER_BYTE_1_BIT 16 +#define PARITY_BYTE_1_BIT 24 +#define HEADER_BYTE_3_BIT 16 +#define PARITY_BYTE_3_BIT 24 + +struct dp_audio_private { + struct platform_device *audio_pdev; + struct platform_device *pdev; + struct dp_catalog *catalog; + struct dp_panel *panel; + + bool engine_on; + u32 channels; + + struct dp_audio dp_audio; +}; + +static u8 dp_audio_get_g0_value(u8 data) +{ + u8 c[4]; + u8 g[4]; + u8 ret_data = 0; + u8 i; + + for (i = 0; i < 4; i++) + c[i] = (data >> i) & 0x01; + + g[0] = c[3]; + g[1] = c[0] ^ c[3]; + g[2] = c[1]; + g[3] = c[2]; + + for (i = 0; i < 4; i++) + ret_data = ((g[i] & 0x01) << i) | ret_data; + + return ret_data; +} + +static u8 dp_audio_get_g1_value(u8 data) +{ + u8 c[4]; + u8 g[4]; + u8 ret_data = 0; + u8 i; + + for (i = 0; i < 4; i++) + c[i] = (data >> i) & 0x01; + + g[0] = c[0] ^ c[3]; + g[1] = c[0] ^ c[1] ^ c[3]; + g[2] = c[1] ^ c[2]; + g[3] = c[2] ^ c[3]; + + for (i = 0; i < 4; i++) + ret_data = ((g[i] & 0x01) << i) | ret_data; + + return ret_data; +} + +static u8 dp_audio_calculate_parity(u32 data) +{ + u8 x0 = 0; + u8 x1 = 0; + u8 ci = 0; + u8 iData = 0; + u8 i = 0; + u8 parity_byte; + u8 num_byte = (data & 0xFF00) > 0 ? 8 : 2; + + for (i = 0; i < num_byte; i++) { + iData = (data >> i*4) & 0xF; + + ci = iData ^ x1; + x1 = x0 ^ dp_audio_get_g1_value(ci); + x0 = dp_audio_get_g0_value(ci); + } + + parity_byte = x1 | (x0 << 4); + + return parity_byte; +} + +static u32 dp_audio_get_header(struct dp_catalog *catalog, + enum dp_catalog_audio_sdp_type sdp, + enum dp_catalog_audio_header_type header) +{ + catalog->sdp_type = sdp; + catalog->sdp_header = header; + dp_catalog_audio_get_header(catalog); + + return catalog->audio_data; +} + +static void dp_audio_set_header(struct dp_catalog *catalog, + u32 data, + enum dp_catalog_audio_sdp_type sdp, + enum dp_catalog_audio_header_type header) +{ + catalog->sdp_type = sdp; + catalog->sdp_header = header; + catalog->audio_data = data; + dp_catalog_audio_set_header(catalog); +} + +static void dp_audio_stream_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1); + + new_value = 0x02; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2); + new_value = value; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3); + + new_value = audio->channels - 1; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_timestamp_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1); + + new_value = 0x1; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2); + + new_value = 0x17; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3); + + new_value = (0x0 | (0x11 << 2)); + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_infoframe_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1); + + new_value = 0x84; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2); + + new_value = 0x1b; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3); + + new_value = (0x0 | (0x11 << 2)); + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + new_value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_copy_management_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1); + + new_value = 0x05; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2); + + new_value = 0x0F; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3); + + new_value = 0x0; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_isrc_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1); + + new_value = 0x06; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2); + + new_value = 0x0F; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2); +} + +static void dp_audio_setup_sdp(struct dp_audio_private *audio) +{ + dp_catalog_audio_config_sdp(audio->catalog); + + dp_audio_stream_sdp(audio); + dp_audio_timestamp_sdp(audio); + dp_audio_infoframe_sdp(audio); + dp_audio_copy_management_sdp(audio); + dp_audio_isrc_sdp(audio); +} + +static void dp_audio_setup_acr(struct dp_audio_private *audio) +{ + u32 select = 0; + struct dp_catalog *catalog = audio->catalog; + + switch (audio->dp_audio.bw_code) { + case DP_LINK_BW_1_62: + select = 0; + break; + case DP_LINK_BW_2_7: + select = 1; + break; + case DP_LINK_BW_5_4: + select = 2; + break; + case DP_LINK_BW_8_1: + select = 3; + break; + default: + DRM_DEBUG_DP("Unknown link rate\n"); + select = 0; + break; + } + + catalog->audio_data = select; + dp_catalog_audio_config_acr(catalog); +} + +static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio) +{ + struct dp_catalog *catalog = audio->catalog; + u32 safe_to_exit_level = 0; + + switch (audio->dp_audio.lane_count) { + case 1: + safe_to_exit_level = 14; + break; + case 2: + safe_to_exit_level = 8; + break; + case 4: + safe_to_exit_level = 5; + break; + default: + DRM_DEBUG_DP("setting the default safe_to_exit_level = %u\n", + safe_to_exit_level); + safe_to_exit_level = 14; + break; + } + + catalog->audio_data = safe_to_exit_level; + dp_catalog_audio_sfe_level(catalog); +} + +static void dp_audio_enable(struct dp_audio_private *audio, bool enable) +{ + struct dp_catalog *catalog = audio->catalog; + + catalog->audio_data = enable; + dp_catalog_audio_enable(catalog); + + audio->engine_on = enable; +} + +static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev) +{ + struct dp_audio *dp_audio; + struct msm_dp *dp_display; + + if (!pdev) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-ENODEV); + } + + dp_display = platform_get_drvdata(pdev); + if (!dp_display) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-ENODEV); + } + + dp_audio = dp_display->dp_audio; + + if (!dp_audio) { + DRM_ERROR("invalid dp_audio data\n"); + return ERR_PTR(-EINVAL); + } + + return container_of(dp_audio, struct dp_audio_private, dp_audio); +} + +static int dp_audio_hook_plugged_cb(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + + struct platform_device *pdev; + struct msm_dp *dp_display; + + pdev = to_platform_device(dev); + if (!pdev) { + pr_err("invalid input\n"); + return -ENODEV; + } + + dp_display = platform_get_drvdata(pdev); + if (!dp_display) { + pr_err("invalid input\n"); + return -ENODEV; + } + + return dp_display_set_plugged_cb(dp_display, fn, codec_dev); +} + +static int dp_audio_get_eld(struct device *dev, + void *data, uint8_t *buf, size_t len) +{ + struct platform_device *pdev; + struct msm_dp *dp_display; + + pdev = to_platform_device(dev); + + if (!pdev) { + DRM_ERROR("invalid input\n"); + return -ENODEV; + } + + dp_display = platform_get_drvdata(pdev); + if (!dp_display) { + DRM_ERROR("invalid input\n"); + return -ENODEV; + } + + memcpy(buf, dp_display->connector->eld, + min(sizeof(dp_display->connector->eld), len)); + + return 0; +} + +int dp_audio_hw_params(struct device *dev, + void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + int rc = 0; + struct dp_audio_private *audio; + struct platform_device *pdev; + struct msm_dp *dp_display; + + pdev = to_platform_device(dev); + dp_display = platform_get_drvdata(pdev); + + /* + * there could be cases where sound card can be opened even + * before OR even when DP is not connected . This can cause + * unclocked access as the audio subsystem relies on the DP + * driver to maintain the correct state of clocks. To protect + * such cases check for connection status and bail out if not + * connected. + */ + if (!dp_display->power_on) { + rc = -EINVAL; + goto end; + } + + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) { + rc = PTR_ERR(audio); + goto end; + } + + audio->channels = params->channels; + + dp_audio_setup_sdp(audio); + dp_audio_setup_acr(audio); + dp_audio_safe_to_exit_level(audio); + dp_audio_enable(audio, true); + dp_display->audio_enabled = true; + +end: + return rc; +} + +static void dp_audio_shutdown(struct device *dev, void *data) +{ + struct dp_audio_private *audio; + struct platform_device *pdev; + struct msm_dp *dp_display; + + pdev = to_platform_device(dev); + dp_display = platform_get_drvdata(pdev); + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) { + DRM_ERROR("failed to get audio data\n"); + return; + } + + /* + * if audio was not enabled there is no need + * to execute the shutdown and we can bail out early. + * This also makes sure that we dont cause an unclocked + * access when audio subsystem calls this without DP being + * connected. is_connected cannot be used here as its set + * to false earlier than this call + */ + if (!dp_display->audio_enabled) + return; + + dp_audio_enable(audio, false); + /* signal the dp display to safely shutdown clocks */ + dp_display_signal_audio_complete(dp_display); +} + +static const struct hdmi_codec_ops dp_audio_codec_ops = { + .hw_params = dp_audio_hw_params, + .audio_shutdown = dp_audio_shutdown, + .get_eld = dp_audio_get_eld, + .hook_plugged_cb = dp_audio_hook_plugged_cb, +}; + +static struct hdmi_codec_pdata codec_data = { + .ops = &dp_audio_codec_ops, + .max_i2s_channels = 8, + .i2s = 1, +}; + +int dp_register_audio_driver(struct device *dev, + struct dp_audio *dp_audio) +{ + struct dp_audio_private *audio_priv; + + audio_priv = container_of(dp_audio, + struct dp_audio_private, dp_audio); + + audio_priv->audio_pdev = platform_device_register_data(dev, + HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, + sizeof(codec_data)); + return PTR_ERR_OR_ZERO(audio_priv->audio_pdev); +} + +struct dp_audio *dp_audio_get(struct platform_device *pdev, + struct dp_panel *panel, + struct dp_catalog *catalog) +{ + int rc = 0; + struct dp_audio_private *audio; + struct dp_audio *dp_audio; + + if (!pdev || !panel || !catalog) { + DRM_ERROR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + audio = devm_kzalloc(&pdev->dev, sizeof(*audio), GFP_KERNEL); + if (!audio) { + rc = -ENOMEM; + goto error; + } + + audio->pdev = pdev; + audio->panel = panel; + audio->catalog = catalog; + + dp_audio = &audio->dp_audio; + + dp_catalog_audio_init(catalog); + + return dp_audio; +error: + return ERR_PTR(rc); +} + +void dp_audio_put(struct dp_audio *dp_audio) +{ + struct dp_audio_private *audio; + + if (!dp_audio) + return; + + audio = container_of(dp_audio, struct dp_audio_private, dp_audio); + + devm_kfree(&audio->pdev->dev, audio); +} diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h new file mode 100644 index 000000000000..84e5f4a5d26b --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_audio.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_AUDIO_H_ +#define _DP_AUDIO_H_ + +#include <linux/platform_device.h> + +#include "dp_panel.h" +#include "dp_catalog.h" +#include <sound/hdmi-codec.h> + +/** + * struct dp_audio + * @lane_count: number of lanes configured in current session + * @bw_code: link rate's bandwidth code for current session + */ +struct dp_audio { + u32 lane_count; + u32 bw_code; +}; + +/** + * dp_audio_get() + * + * Creates and instance of dp audio. + * + * @pdev: caller's platform device instance. + * @panel: an instance of dp_panel module. + * @catalog: an instance of dp_catalog module. + * + * Returns the error code in case of failure, otherwize + * an instance of newly created dp_module. + */ +struct dp_audio *dp_audio_get(struct platform_device *pdev, + struct dp_panel *panel, + struct dp_catalog *catalog); + +/** + * dp_register_audio_driver() + * + * Registers DP device with hdmi_codec interface. + * + * @dev: DP device instance. + * @dp_audio: an instance of dp_audio module. + * + * + * Returns the error code in case of failure, otherwise + * zero on success. + */ +int dp_register_audio_driver(struct device *dev, + struct dp_audio *dp_audio); + +/** + * dp_audio_put() + * + * Cleans the dp_audio instance. + * + * @dp_audio: an instance of dp_audio. + */ +void dp_audio_put(struct dp_audio *dp_audio); + +int dp_audio_hw_params(struct device *dev, + void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params); + +#endif /* _DP_AUDIO_H_ */ + + diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c new file mode 100644 index 000000000000..19b35ae3e927 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -0,0 +1,535 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#include <linux/delay.h> +#include <drm/drm_print.h> + +#include "dp_reg.h" +#include "dp_aux.h" + +#define DP_AUX_ENUM_STR(x) #x + +struct dp_aux_private { + struct device *dev; + struct dp_catalog *catalog; + + struct mutex mutex; + struct completion comp; + + u32 aux_error_num; + u32 retry_cnt; + bool cmd_busy; + bool native; + bool read; + bool no_send_addr; + bool no_send_stop; + u32 offset; + u32 segment; + u32 isr; + + struct drm_dp_aux dp_aux; +}; + +static const char *dp_aux_get_error(u32 aux_error) +{ + switch (aux_error) { + case DP_AUX_ERR_NONE: + return DP_AUX_ENUM_STR(DP_AUX_ERR_NONE); + case DP_AUX_ERR_ADDR: + return DP_AUX_ENUM_STR(DP_AUX_ERR_ADDR); + case DP_AUX_ERR_TOUT: + return DP_AUX_ENUM_STR(DP_AUX_ERR_TOUT); + case DP_AUX_ERR_NACK: + return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK); + case DP_AUX_ERR_DEFER: + return DP_AUX_ENUM_STR(DP_AUX_ERR_DEFER); + case DP_AUX_ERR_NACK_DEFER: + return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK_DEFER); + default: + return "unknown"; + } +} + +static u32 dp_aux_write(struct dp_aux_private *aux, + struct drm_dp_aux_msg *msg) +{ + u32 data[4], reg, len; + u8 *msgdata = msg->buffer; + int const AUX_CMD_FIFO_LEN = 128; + int i = 0; + + if (aux->read) + len = 4; + else + len = msg->size + 4; + + /* + * cmd fifo only has depth of 144 bytes + * limit buf length to 128 bytes here + */ + if (len > AUX_CMD_FIFO_LEN) { + DRM_ERROR("buf size greater than allowed size of 128 bytes\n"); + return 0; + } + + /* Pack cmd and write to HW */ + data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */ + if (aux->read) + data[0] |= BIT(4); /* R/W */ + + data[1] = (msg->address >> 8) & 0xff; /* addr[15:8] */ + data[2] = msg->address & 0xff; /* addr[7:0] */ + data[3] = (msg->size - 1) & 0xff; /* len[7:0] */ + + for (i = 0; i < len; i++) { + reg = (i < 4) ? data[i] : msgdata[i - 4]; + /* index = 0, write */ + reg = (((reg) << DP_AUX_DATA_OFFSET) + & DP_AUX_DATA_MASK) | DP_AUX_DATA_WRITE; + if (i == 0) + reg |= DP_AUX_DATA_INDEX_WRITE; + aux->catalog->aux_data = reg; + dp_catalog_aux_write_data(aux->catalog); + } + + dp_catalog_aux_clear_trans(aux->catalog, false); + dp_catalog_aux_clear_hw_interrupts(aux->catalog); + + reg = 0; /* Transaction number == 1 */ + if (!aux->native) { /* i2c */ + reg |= DP_AUX_TRANS_CTRL_I2C; + + if (aux->no_send_addr) + reg |= DP_AUX_TRANS_CTRL_NO_SEND_ADDR; + + if (aux->no_send_stop) + reg |= DP_AUX_TRANS_CTRL_NO_SEND_STOP; + } + + reg |= DP_AUX_TRANS_CTRL_GO; + aux->catalog->aux_data = reg; + dp_catalog_aux_write_trans(aux->catalog); + + return len; +} + +static int dp_aux_cmd_fifo_tx(struct dp_aux_private *aux, + struct drm_dp_aux_msg *msg) +{ + u32 ret, len, timeout; + int aux_timeout_ms = HZ/4; + + reinit_completion(&aux->comp); + + len = dp_aux_write(aux, msg); + if (len == 0) { + DRM_ERROR("DP AUX write failed\n"); + return -EINVAL; + } + + timeout = wait_for_completion_timeout(&aux->comp, aux_timeout_ms); + if (!timeout) { + DRM_ERROR("aux %s timeout\n", (aux->read ? "read" : "write")); + return -ETIMEDOUT; + } + + if (aux->aux_error_num == DP_AUX_ERR_NONE) { + ret = len; + } else { + DRM_ERROR_RATELIMITED("aux err: %s\n", + dp_aux_get_error(aux->aux_error_num)); + + ret = -EINVAL; + } + + return ret; +} + +static void dp_aux_cmd_fifo_rx(struct dp_aux_private *aux, + struct drm_dp_aux_msg *msg) +{ + u32 data; + u8 *dp; + u32 i, actual_i; + u32 len = msg->size; + + dp_catalog_aux_clear_trans(aux->catalog, true); + + data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */ + data |= DP_AUX_DATA_READ; /* read */ + + aux->catalog->aux_data = data; + dp_catalog_aux_write_data(aux->catalog); + + dp = msg->buffer; + + /* discard first byte */ + data = dp_catalog_aux_read_data(aux->catalog); + + for (i = 0; i < len; i++) { + data = dp_catalog_aux_read_data(aux->catalog); + *dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff); + + actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF; + if (i != actual_i) + DRM_ERROR("Index mismatch: expected %d, found %d\n", + i, actual_i); + } +} + +static void dp_aux_native_handler(struct dp_aux_private *aux) +{ + u32 isr = aux->isr; + + if (isr & DP_INTR_AUX_I2C_DONE) + aux->aux_error_num = DP_AUX_ERR_NONE; + else if (isr & DP_INTR_WRONG_ADDR) + aux->aux_error_num = DP_AUX_ERR_ADDR; + else if (isr & DP_INTR_TIMEOUT) + aux->aux_error_num = DP_AUX_ERR_TOUT; + if (isr & DP_INTR_NACK_DEFER) + aux->aux_error_num = DP_AUX_ERR_NACK; + if (isr & DP_INTR_AUX_ERROR) { + aux->aux_error_num = DP_AUX_ERR_PHY; + dp_catalog_aux_clear_hw_interrupts(aux->catalog); + } + + complete(&aux->comp); +} + +static void dp_aux_i2c_handler(struct dp_aux_private *aux) +{ + u32 isr = aux->isr; + + if (isr & DP_INTR_AUX_I2C_DONE) { + if (isr & (DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER)) + aux->aux_error_num = DP_AUX_ERR_NACK; + else + aux->aux_error_num = DP_AUX_ERR_NONE; + } else { + if (isr & DP_INTR_WRONG_ADDR) + aux->aux_error_num = DP_AUX_ERR_ADDR; + else if (isr & DP_INTR_TIMEOUT) + aux->aux_error_num = DP_AUX_ERR_TOUT; + if (isr & DP_INTR_NACK_DEFER) + aux->aux_error_num = DP_AUX_ERR_NACK_DEFER; + if (isr & DP_INTR_I2C_NACK) + aux->aux_error_num = DP_AUX_ERR_NACK; + if (isr & DP_INTR_I2C_DEFER) + aux->aux_error_num = DP_AUX_ERR_DEFER; + if (isr & DP_INTR_AUX_ERROR) { + aux->aux_error_num = DP_AUX_ERR_PHY; + dp_catalog_aux_clear_hw_interrupts(aux->catalog); + } + } + + complete(&aux->comp); +} + +static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux, + struct drm_dp_aux_msg *input_msg) +{ + u32 edid_address = 0x50; + u32 segment_address = 0x30; + bool i2c_read = input_msg->request & + (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); + u8 *data; + + if (aux->native || i2c_read || ((input_msg->address != edid_address) && + (input_msg->address != segment_address))) + return; + + + data = input_msg->buffer; + if (input_msg->address == segment_address) + aux->segment = *data; + else + aux->offset = *data; +} + +/** + * dp_aux_transfer_helper() - helper function for EDID read transactions + * + * @aux: DP AUX private structure + * @input_msg: input message from DRM upstream APIs + * @send_seg: send the segment to sink + * + * return: void + * + * This helper function is used to fix EDID reads for non-compliant + * sinks that do not handle the i2c middle-of-transaction flag correctly. + */ +static void dp_aux_transfer_helper(struct dp_aux_private *aux, + struct drm_dp_aux_msg *input_msg, + bool send_seg) +{ + struct drm_dp_aux_msg helper_msg; + u32 message_size = 0x10; + u32 segment_address = 0x30; + u32 const edid_block_length = 0x80; + bool i2c_mot = input_msg->request & DP_AUX_I2C_MOT; + bool i2c_read = input_msg->request & + (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); + + if (!i2c_mot || !i2c_read || (input_msg->size == 0)) + return; + + /* + * Sending the segment value and EDID offset will be performed + * from the DRM upstream EDID driver for each block. Avoid + * duplicate AUX transactions related to this while reading the + * first 16 bytes of each block. + */ + if (!(aux->offset % edid_block_length) || !send_seg) + goto end; + + aux->read = false; + aux->cmd_busy = true; + aux->no_send_addr = true; + aux->no_send_stop = true; + + /* + * Send the segment address for every i2c read in which the + * middle-of-tranaction flag is set. This is required to support EDID + * reads of more than 2 blocks as the segment address is reset to 0 + * since we are overriding the middle-of-transaction flag for read + * transactions. + */ + + if (aux->segment) { + memset(&helper_msg, 0, sizeof(helper_msg)); + helper_msg.address = segment_address; + helper_msg.buffer = &aux->segment; + helper_msg.size = 1; + dp_aux_cmd_fifo_tx(aux, &helper_msg); + } + + /* + * Send the offset address for every i2c read in which the + * middle-of-transaction flag is set. This will ensure that the sink + * will update its read pointer and return the correct portion of the + * EDID buffer in the subsequent i2c read trasntion triggered in the + * native AUX transfer function. + */ + memset(&helper_msg, 0, sizeof(helper_msg)); + helper_msg.address = input_msg->address; + helper_msg.buffer = &aux->offset; + helper_msg.size = 1; + dp_aux_cmd_fifo_tx(aux, &helper_msg); + +end: + aux->offset += message_size; + if (aux->offset == 0x80 || aux->offset == 0x100) + aux->segment = 0x0; /* reset segment at end of block */ +} + +/* + * This function does the real job to process an AUX transaction. + * It will call aux_reset() function to reset the AUX channel, + * if the waiting is timeout. + */ +static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, + struct drm_dp_aux_msg *msg) +{ + ssize_t ret; + int const aux_cmd_native_max = 16; + int const aux_cmd_i2c_max = 128; + int const retry_count = 5; + struct dp_aux_private *aux = container_of(dp_aux, + struct dp_aux_private, dp_aux); + + mutex_lock(&aux->mutex); + + aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); + + /* Ignore address only message */ + if ((msg->size == 0) || (msg->buffer == NULL)) { + msg->reply = aux->native ? + DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; + ret = msg->size; + goto unlock_exit; + } + + /* msg sanity check */ + if ((aux->native && (msg->size > aux_cmd_native_max)) || + (msg->size > aux_cmd_i2c_max)) { + DRM_ERROR("%s: invalid msg: size(%zu), request(%x)\n", + __func__, msg->size, msg->request); + ret = -EINVAL; + goto unlock_exit; + } + + dp_aux_update_offset_and_segment(aux, msg); + dp_aux_transfer_helper(aux, msg, true); + + aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); + aux->cmd_busy = true; + + if (aux->read) { + aux->no_send_addr = true; + aux->no_send_stop = false; + } else { + aux->no_send_addr = true; + aux->no_send_stop = true; + } + + ret = dp_aux_cmd_fifo_tx(aux, msg); + + if (ret < 0) { + if (aux->native) { + aux->retry_cnt++; + if (!(aux->retry_cnt % retry_count)) + dp_catalog_aux_update_cfg(aux->catalog); + dp_catalog_aux_reset(aux->catalog); + } + usleep_range(400, 500); /* at least 400us to next try */ + goto unlock_exit; + } + + if (aux->aux_error_num == DP_AUX_ERR_NONE) { + if (aux->read) + dp_aux_cmd_fifo_rx(aux, msg); + + msg->reply = aux->native ? + DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; + } else { + /* Reply defer to retry */ + msg->reply = aux->native ? + DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER; + } + + /* Return requested size for success or retry */ + ret = msg->size; + aux->retry_cnt = 0; + +unlock_exit: + aux->cmd_busy = false; + mutex_unlock(&aux->mutex); + return ret; +} + +void dp_aux_isr(struct drm_dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + if (!dp_aux) { + DRM_ERROR("invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + aux->isr = dp_catalog_aux_get_irq(aux->catalog); + + if (!aux->cmd_busy) + return; + + if (aux->native) + dp_aux_native_handler(aux); + else + dp_aux_i2c_handler(aux); +} + +void dp_aux_reconfig(struct drm_dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + dp_catalog_aux_update_cfg(aux->catalog); + dp_catalog_aux_reset(aux->catalog); +} + +void dp_aux_init(struct drm_dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + if (!dp_aux) { + DRM_ERROR("invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + dp_catalog_aux_enable(aux->catalog, true); + aux->retry_cnt = 0; +} + +void dp_aux_deinit(struct drm_dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + dp_catalog_aux_enable(aux->catalog, false); +} + +int dp_aux_register(struct drm_dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + int ret; + + if (!dp_aux) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + aux->dp_aux.name = "dpu_dp_aux"; + aux->dp_aux.dev = aux->dev; + aux->dp_aux.transfer = dp_aux_transfer; + ret = drm_dp_aux_register(&aux->dp_aux); + if (ret) { + DRM_ERROR("%s: failed to register drm aux: %d\n", __func__, + ret); + return ret; + } + + return 0; +} + +void dp_aux_unregister(struct drm_dp_aux *dp_aux) +{ + drm_dp_aux_unregister(dp_aux); +} + +struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog) +{ + struct dp_aux_private *aux; + + if (!catalog) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-ENODEV); + } + + aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL); + if (!aux) + return ERR_PTR(-ENOMEM); + + init_completion(&aux->comp); + aux->cmd_busy = false; + mutex_init(&aux->mutex); + + aux->dev = dev; + aux->catalog = catalog; + aux->retry_cnt = 0; + + return &aux->dp_aux; +} + +void dp_aux_put(struct drm_dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + if (!dp_aux) + return; + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + mutex_destroy(&aux->mutex); + + devm_kfree(aux->dev, aux); +} diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h new file mode 100644 index 000000000000..f8b8ba919465 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_aux.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_AUX_H_ +#define _DP_AUX_H_ + +#include "dp_catalog.h" +#include <drm/drm_dp_helper.h> + +#define DP_AUX_ERR_NONE 0 +#define DP_AUX_ERR_ADDR -1 +#define DP_AUX_ERR_TOUT -2 +#define DP_AUX_ERR_NACK -3 +#define DP_AUX_ERR_DEFER -4 +#define DP_AUX_ERR_NACK_DEFER -5 +#define DP_AUX_ERR_PHY -6 + +int dp_aux_register(struct drm_dp_aux *dp_aux); +void dp_aux_unregister(struct drm_dp_aux *dp_aux); +void dp_aux_isr(struct drm_dp_aux *dp_aux); +void dp_aux_init(struct drm_dp_aux *dp_aux); +void dp_aux_deinit(struct drm_dp_aux *dp_aux); +void dp_aux_reconfig(struct drm_dp_aux *dp_aux); + +struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog); +void dp_aux_put(struct drm_dp_aux *aux); + +#endif /*__DP_AUX_H_*/ diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c new file mode 100644 index 000000000000..b15b4ce4ba35 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -0,0 +1,1019 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include <linux/rational.h> +#include <linux/delay.h> +#include <linux/iopoll.h> +#include <linux/phy/phy.h> +#include <linux/phy/phy-dp.h> +#include <linux/rational.h> +#include <drm/drm_dp_helper.h> +#include <drm/drm_print.h> + +#include "dp_catalog.h" +#include "dp_reg.h" + +#define POLLING_SLEEP_US 1000 +#define POLLING_TIMEOUT_US 10000 + +#define SCRAMBLER_RESET_COUNT_VALUE 0xFC + +#define DP_INTERRUPT_STATUS_ACK_SHIFT 1 +#define DP_INTERRUPT_STATUS_MASK_SHIFT 2 + +#define MSM_DP_CONTROLLER_AHB_OFFSET 0x0000 +#define MSM_DP_CONTROLLER_AHB_SIZE 0x0200 +#define MSM_DP_CONTROLLER_AUX_OFFSET 0x0200 +#define MSM_DP_CONTROLLER_AUX_SIZE 0x0200 +#define MSM_DP_CONTROLLER_LINK_OFFSET 0x0400 +#define MSM_DP_CONTROLLER_LINK_SIZE 0x0C00 +#define MSM_DP_CONTROLLER_P0_OFFSET 0x1000 +#define MSM_DP_CONTROLLER_P0_SIZE 0x0400 + +#define DP_INTERRUPT_STATUS1 \ + (DP_INTR_AUX_I2C_DONE| \ + DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \ + DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \ + DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \ + DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR) + +#define DP_INTERRUPT_STATUS1_ACK \ + (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_ACK_SHIFT) +#define DP_INTERRUPT_STATUS1_MASK \ + (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_MASK_SHIFT) + +#define DP_INTERRUPT_STATUS2 \ + (DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \ + DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED) + +#define DP_INTERRUPT_STATUS2_ACK \ + (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_ACK_SHIFT) +#define DP_INTERRUPT_STATUS2_MASK \ + (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_MASK_SHIFT) + +struct dp_catalog_private { + struct device *dev; + struct dp_io *io; + u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX]; + struct dp_catalog dp_catalog; + u8 aux_lut_cfg_index[PHY_AUX_CFG_MAX]; +}; + +static inline u32 dp_read_aux(struct dp_catalog_private *catalog, u32 offset) +{ + offset += MSM_DP_CONTROLLER_AUX_OFFSET; + return readl_relaxed(catalog->io->dp_controller.base + offset); +} + +static inline void dp_write_aux(struct dp_catalog_private *catalog, + u32 offset, u32 data) +{ + offset += MSM_DP_CONTROLLER_AUX_OFFSET; + /* + * To make sure aux reg writes happens before any other operation, + * this function uses writel() instread of writel_relaxed() + */ + writel(data, catalog->io->dp_controller.base + offset); +} + +static inline u32 dp_read_ahb(struct dp_catalog_private *catalog, u32 offset) +{ + offset += MSM_DP_CONTROLLER_AHB_OFFSET; + return readl_relaxed(catalog->io->dp_controller.base + offset); +} + +static inline void dp_write_ahb(struct dp_catalog_private *catalog, + u32 offset, u32 data) +{ + offset += MSM_DP_CONTROLLER_AHB_OFFSET; + /* + * To make sure phy reg writes happens before any other operation, + * this function uses writel() instread of writel_relaxed() + */ + writel(data, catalog->io->dp_controller.base + offset); +} + +static inline void dp_write_p0(struct dp_catalog_private *catalog, + u32 offset, u32 data) +{ + offset += MSM_DP_CONTROLLER_P0_OFFSET; + /* + * To make sure interface reg writes happens before any other operation, + * this function uses writel() instread of writel_relaxed() + */ + writel(data, catalog->io->dp_controller.base + offset); +} + +static inline u32 dp_read_p0(struct dp_catalog_private *catalog, + u32 offset) +{ + offset += MSM_DP_CONTROLLER_P0_OFFSET; + /* + * To make sure interface reg writes happens before any other operation, + * this function uses writel() instread of writel_relaxed() + */ + return readl_relaxed(catalog->io->dp_controller.base + offset); +} + +static inline u32 dp_read_link(struct dp_catalog_private *catalog, u32 offset) +{ + offset += MSM_DP_CONTROLLER_LINK_OFFSET; + return readl_relaxed(catalog->io->dp_controller.base + offset); +} + +static inline void dp_write_link(struct dp_catalog_private *catalog, + u32 offset, u32 data) +{ + offset += MSM_DP_CONTROLLER_LINK_OFFSET; + /* + * To make sure link reg writes happens before any other operation, + * this function uses writel() instread of writel_relaxed() + */ + writel(data, catalog->io->dp_controller.base + offset); +} + +/* aux related catalog functions */ +u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + return dp_read_aux(catalog, REG_DP_AUX_DATA); +} + +int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_aux(catalog, REG_DP_AUX_DATA, dp_catalog->aux_data); + return 0; +} + +int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, dp_catalog->aux_data); + return 0; +} + +int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read) +{ + u32 data; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + if (read) { + data = dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL); + data &= ~DP_AUX_TRANS_CTRL_GO; + dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data); + } else { + dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0); + } + return 0; +} + +int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS); + dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f); + dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f); + dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0); + return 0; +} + +void dp_catalog_aux_reset(struct dp_catalog *dp_catalog) +{ + u32 aux_ctrl; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL); + + aux_ctrl |= DP_AUX_CTRL_RESET; + dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); + usleep_range(1000, 1100); /* h/w recommended delay */ + + aux_ctrl &= ~DP_AUX_CTRL_RESET; + dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); +} + +void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable) +{ + u32 aux_ctrl; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL); + + if (enable) { + dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff); + dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff); + aux_ctrl |= DP_AUX_CTRL_ENABLE; + } else { + aux_ctrl &= ~DP_AUX_CTRL_ENABLE; + } + + dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); +} + +void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + struct dp_io *dp_io = catalog->io; + struct phy *phy = dp_io->phy; + + phy_calibrate(phy); +} + +static void dump_regs(void __iomem *base, int len) +{ + int i; + u32 x0, x4, x8, xc; + u32 addr_off = 0; + + len = DIV_ROUND_UP(len, 16); + for (i = 0; i < len; i++) { + x0 = readl_relaxed(base + addr_off); + x4 = readl_relaxed(base + addr_off + 0x04); + x8 = readl_relaxed(base + addr_off + 0x08); + xc = readl_relaxed(base + addr_off + 0x0c); + + pr_info("%08x: %08x %08x %08x %08x", addr_off, x0, x4, x8, xc); + addr_off += 16; + } +} + +void dp_catalog_dump_regs(struct dp_catalog *dp_catalog) +{ + u32 offset, len; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + pr_info("AHB regs\n"); + offset = MSM_DP_CONTROLLER_AHB_OFFSET; + len = MSM_DP_CONTROLLER_AHB_SIZE; + dump_regs(catalog->io->dp_controller.base + offset, len); + + pr_info("AUXCLK regs\n"); + offset = MSM_DP_CONTROLLER_AUX_OFFSET; + len = MSM_DP_CONTROLLER_AUX_SIZE; + dump_regs(catalog->io->dp_controller.base + offset, len); + + pr_info("LCLK regs\n"); + offset = MSM_DP_CONTROLLER_LINK_OFFSET; + len = MSM_DP_CONTROLLER_LINK_SIZE; + dump_regs(catalog->io->dp_controller.base + offset, len); + + pr_info("P0CLK regs\n"); + offset = MSM_DP_CONTROLLER_P0_OFFSET; + len = MSM_DP_CONTROLLER_P0_SIZE; + dump_regs(catalog->io->dp_controller.base + offset, len); +} + +int dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 intr, intr_ack; + + intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS); + intr &= ~DP_INTERRUPT_STATUS1_MASK; + intr_ack = (intr & DP_INTERRUPT_STATUS1) + << DP_INTERRUPT_STATUS_ACK_SHIFT; + dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack | + DP_INTERRUPT_STATUS1_MASK); + + return intr; + +} + +/* controller related catalog functions */ +void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog, + u32 dp_tu, u32 valid_boundary, + u32 valid_boundary2) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary); + dp_write_link(catalog, REG_DP_TU, dp_tu); + dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2); +} + +void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_link(catalog, REG_DP_STATE_CTRL, state); +} + +void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 cfg) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + DRM_DEBUG_DP("DP_CONFIGURATION_CTRL=0x%x\n", cfg); + + dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg); +} + +void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */ + u32 ln_mapping; + + ln_mapping = ln_0 << LANE0_MAPPING_SHIFT; + ln_mapping |= ln_1 << LANE1_MAPPING_SHIFT; + ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT; + ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT; + + dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING, + ln_mapping); +} + +void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, + bool enable) +{ + u32 mainlink_ctrl; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + if (enable) { + /* + * To make sure link reg writes happens before other operation, + * dp_write_link() function uses writel() + */ + mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + + mainlink_ctrl &= ~(DP_MAINLINK_CTRL_RESET | + DP_MAINLINK_CTRL_ENABLE); + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + + mainlink_ctrl |= DP_MAINLINK_CTRL_RESET; + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + + mainlink_ctrl &= ~DP_MAINLINK_CTRL_RESET; + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + + mainlink_ctrl |= (DP_MAINLINK_CTRL_ENABLE | + DP_MAINLINK_FB_BOUNDARY_SEL); + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + } else { + mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + mainlink_ctrl &= ~DP_MAINLINK_CTRL_ENABLE; + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + } +} + +void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, + u32 colorimetry_cfg, + u32 test_bits_depth) +{ + u32 misc_val; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + misc_val = dp_read_link(catalog, REG_DP_MISC1_MISC0); + + /* clear bpp bits */ + misc_val &= ~(0x07 << DP_MISC0_TEST_BITS_DEPTH_SHIFT); + misc_val |= colorimetry_cfg << DP_MISC0_COLORIMETRY_CFG_SHIFT; + misc_val |= test_bits_depth << DP_MISC0_TEST_BITS_DEPTH_SHIFT; + /* Configure clock to synchronous mode */ + misc_val |= DP_MISC0_SYNCHRONOUS_CLK; + + DRM_DEBUG_DP("misc settings = 0x%x\n", misc_val); + dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val); +} + +void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, + u32 rate, u32 stream_rate_khz, + bool fixed_nvid) +{ + u32 pixel_m, pixel_n; + u32 mvid, nvid, pixel_div = 0, dispcc_input_rate; + u32 const nvid_fixed = DP_LINK_CONSTANT_N_VALUE; + u32 const link_rate_hbr2 = 540000; + u32 const link_rate_hbr3 = 810000; + unsigned long den, num; + + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + if (rate == link_rate_hbr3) + pixel_div = 6; + else if (rate == 1620000 || rate == 270000) + pixel_div = 2; + else if (rate == link_rate_hbr2) + pixel_div = 4; + else + DRM_ERROR("Invalid pixel mux divider\n"); + + dispcc_input_rate = (rate * 10) / pixel_div; + + rational_best_approximation(dispcc_input_rate, stream_rate_khz, + (unsigned long)(1 << 16) - 1, + (unsigned long)(1 << 16) - 1, &den, &num); + + den = ~(den - num); + den = den & 0xFFFF; + pixel_m = num; + pixel_n = den; + + mvid = (pixel_m & 0xFFFF) * 5; + nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF); + + if (nvid < nvid_fixed) { + u32 temp; + + temp = (nvid_fixed / nvid) * nvid; + mvid = (nvid_fixed / nvid) * mvid; + nvid = temp; + } + + if (link_rate_hbr2 == rate) + nvid *= 2; + + if (link_rate_hbr3 == rate) + nvid *= 3; + + DRM_DEBUG_DP("mvid=0x%x, nvid=0x%x\n", mvid, nvid); + dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid); + dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid); + dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0); +} + +int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog, + u32 pattern) +{ + int bit, ret; + u32 data; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + bit = BIT(pattern - 1); + DRM_DEBUG_DP("hw: bit=%d train=%d\n", bit, pattern); + dp_catalog_ctrl_state_ctrl(dp_catalog, bit); + + bit = BIT(pattern - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT; + + /* Poll for mainlink ready status */ + ret = readx_poll_timeout(readl, catalog->io->dp_controller.base + + MSM_DP_CONTROLLER_LINK_OFFSET + + REG_DP_MAINLINK_READY, + data, data & bit, + POLLING_SLEEP_US, POLLING_TIMEOUT_US); + if (ret < 0) { + DRM_ERROR("set pattern for link_train=%d failed\n", pattern); + return ret; + } + return 0; +} + +void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog) +{ + u32 sw_reset; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + sw_reset = dp_read_ahb(catalog, REG_DP_SW_RESET); + + sw_reset |= DP_SW_RESET; + dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset); + usleep_range(1000, 1100); /* h/w recommended delay */ + + sw_reset &= ~DP_SW_RESET; + dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset); +} + +bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog) +{ + u32 data; + int ret; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + /* Poll for mainlink ready status */ + ret = readl_poll_timeout(catalog->io->dp_controller.base + + MSM_DP_CONTROLLER_LINK_OFFSET + + REG_DP_MAINLINK_READY, + data, data & DP_MAINLINK_READY_FOR_VIDEO, + POLLING_SLEEP_US, POLLING_TIMEOUT_US); + if (ret < 0) { + DRM_ERROR("mainlink not ready\n"); + return false; + } + + return true; +} + +void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, + bool enable) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + if (enable) { + dp_write_ahb(catalog, REG_DP_INTR_STATUS, + DP_INTERRUPT_STATUS1_MASK); + dp_write_ahb(catalog, REG_DP_INTR_STATUS2, + DP_INTERRUPT_STATUS2_MASK); + } else { + dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00); + dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00); + } +} + +void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, + u32 intr_mask, bool en) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + u32 config = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK); + + config = (en ? config | intr_mask : config & ~intr_mask); + + dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK, + config & DP_DP_HPD_INT_MASK); +} + +void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER); + + /* enable HPD interrupts */ + dp_catalog_hpd_config_intr(dp_catalog, + DP_DP_HPD_PLUG_INT_MASK | DP_DP_IRQ_HPD_INT_MASK + | DP_DP_HPD_UNPLUG_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, true); + + /* Configure REFTIMER and enable it */ + reftimer |= DP_DP_HPD_REFTIMER_ENABLE; + dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer); + + /* Enable HPD */ + dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN); +} + +u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + int isr = 0; + + isr = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); + dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK, + (isr & DP_DP_HPD_INT_MASK)); + + return isr; +} + +int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 intr, intr_ack; + + intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS2); + intr &= ~DP_INTERRUPT_STATUS2_MASK; + intr_ack = (intr & DP_INTERRUPT_STATUS2) + << DP_INTERRUPT_STATUS_ACK_SHIFT; + dp_write_ahb(catalog, REG_DP_INTR_STATUS2, + intr_ack | DP_INTERRUPT_STATUS2_MASK); + + return intr; +} + +void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_ahb(catalog, REG_DP_PHY_CTRL, + DP_PHY_CTRL_SW_RESET | DP_PHY_CTRL_SW_RESET_PLL); + usleep_range(1000, 1100); /* h/w recommended delay */ + dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0); +} + +int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog, + u8 v_level, u8 p_level) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + struct dp_io *dp_io = catalog->io; + struct phy *phy = dp_io->phy; + struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp; + + /* TODO: Update for all lanes instead of just first one */ + opts_dp->voltage[0] = v_level; + opts_dp->pre[0] = p_level; + opts_dp->set_voltages = 1; + phy_configure(phy, &dp_io->phy_opts); + opts_dp->set_voltages = 0; + + return 0; +} + +void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, + u32 pattern) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 value = 0x0; + + /* Make sure to clear the current pattern before starting a new one */ + dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0); + + switch (pattern) { + case DP_PHY_TEST_PATTERN_D10_2: + dp_write_link(catalog, REG_DP_STATE_CTRL, + DP_STATE_CTRL_LINK_TRAINING_PATTERN1); + break; + case DP_PHY_TEST_PATTERN_ERROR_COUNT: + value &= ~(1 << 16); + dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + value); + value |= SCRAMBLER_RESET_COUNT_VALUE; + dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + value); + dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, + DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2); + dp_write_link(catalog, REG_DP_STATE_CTRL, + DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE); + break; + case DP_PHY_TEST_PATTERN_PRBS7: + dp_write_link(catalog, REG_DP_STATE_CTRL, + DP_STATE_CTRL_LINK_PRBS7); + break; + case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: + dp_write_link(catalog, REG_DP_STATE_CTRL, + DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN); + /* 00111110000011111000001111100000 */ + dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0, + 0x3E0F83E0); + /* 00001111100000111110000011111000 */ + dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1, + 0x0F83E0F8); + /* 1111100000111110 */ + dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2, + 0x0000F83E); + break; + case DP_PHY_TEST_PATTERN_CP2520: + value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + value &= ~DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER; + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value); + + value = DP_HBR2_ERM_PATTERN; + dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + value); + value |= SCRAMBLER_RESET_COUNT_VALUE; + dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + value); + dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, + DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2); + dp_write_link(catalog, REG_DP_STATE_CTRL, + DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE); + value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + value |= DP_MAINLINK_CTRL_ENABLE; + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value); + break; + case DP_PHY_TEST_PATTERN_SEL_MASK: + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, + DP_MAINLINK_CTRL_ENABLE); + dp_write_link(catalog, REG_DP_STATE_CTRL, + DP_STATE_CTRL_LINK_TRAINING_PATTERN4); + break; + default: + DRM_DEBUG_DP("No valid test pattern requested:0x%x\n", pattern); + break; + } +} + +u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + return dp_read_link(catalog, REG_DP_MAINLINK_READY); +} + +/* panel related catalog functions */ +int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_link(catalog, REG_DP_TOTAL_HOR_VER, + dp_catalog->total); + dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC, + dp_catalog->sync_start); + dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, + dp_catalog->width_blanking); + dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_catalog->dp_active); + return 0; +} + +void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog, + struct drm_display_mode *drm_mode) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 hsync_period, vsync_period; + u32 display_v_start, display_v_end; + u32 hsync_start_x, hsync_end_x; + u32 v_sync_width; + u32 hsync_ctl; + u32 display_hctl; + + /* TPG config parameters*/ + hsync_period = drm_mode->htotal; + vsync_period = drm_mode->vtotal; + + display_v_start = ((drm_mode->vtotal - drm_mode->vsync_start) * + hsync_period); + display_v_end = ((vsync_period - (drm_mode->vsync_start - + drm_mode->vdisplay)) + * hsync_period) - 1; + + display_v_start += drm_mode->htotal - drm_mode->hsync_start; + display_v_end -= (drm_mode->hsync_start - drm_mode->hdisplay); + + hsync_start_x = drm_mode->htotal - drm_mode->hsync_start; + hsync_end_x = hsync_period - (drm_mode->hsync_start - + drm_mode->hdisplay) - 1; + + v_sync_width = drm_mode->vsync_end - drm_mode->vsync_start; + + hsync_ctl = (hsync_period << 16) | + (drm_mode->hsync_end - drm_mode->hsync_start); + display_hctl = (hsync_end_x << 16) | hsync_start_x; + + + dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0); + dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl); + dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period * + hsync_period); + dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width * + hsync_period); + dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0); + dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0); + dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl); + dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0); + dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start); + dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end); + dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0); + dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0); + dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0); + dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0); + dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0); + dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0); + dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0); + + dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, + DP_TPG_CHECKERED_RECT_PATTERN); + dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG, + DP_TPG_VIDEO_CONFIG_BPP_8BIT | + DP_TPG_VIDEO_CONFIG_RGB); + dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, + DP_BIST_ENABLE_DPBIST_EN); + dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, + DP_TIMING_ENGINE_EN_EN); + DRM_DEBUG_DP("%s: enabled tpg\n", __func__); +} + +void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0); + dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0); + dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0); +} + +struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io) +{ + struct dp_catalog_private *catalog; + + if (!io) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + catalog = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL); + if (!catalog) + return ERR_PTR(-ENOMEM); + + catalog->dev = dev; + catalog->io = io; + + return &catalog->dp_catalog; +} + +void dp_catalog_audio_get_header(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX]; + enum dp_catalog_audio_sdp_type sdp; + enum dp_catalog_audio_header_type header; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + sdp_map = catalog->audio_map; + sdp = dp_catalog->sdp_type; + header = dp_catalog->sdp_header; + + dp_catalog->audio_data = dp_read_link(catalog, + sdp_map[sdp][header]); +} + +void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX]; + enum dp_catalog_audio_sdp_type sdp; + enum dp_catalog_audio_header_type header; + u32 data; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + sdp_map = catalog->audio_map; + sdp = dp_catalog->sdp_type; + header = dp_catalog->sdp_header; + data = dp_catalog->audio_data; + + dp_write_link(catalog, sdp_map[sdp][header], data); +} + +void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + u32 acr_ctrl, select; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + select = dp_catalog->audio_data; + acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14); + + DRM_DEBUG_DP("select = 0x%x, acr_ctrl = 0x%x\n", select, acr_ctrl); + + dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl); +} + +void dp_catalog_audio_enable(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + bool enable; + u32 audio_ctrl; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + enable = !!dp_catalog->audio_data; + audio_ctrl = dp_read_link(catalog, MMSS_DP_AUDIO_CFG); + + if (enable) + audio_ctrl |= BIT(0); + else + audio_ctrl &= ~BIT(0); + + DRM_DEBUG_DP("dp_audio_cfg = 0x%x\n", audio_ctrl); + + dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl); + /* make sure audio engine is disabled */ + wmb(); +} + +void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + u32 sdp_cfg = 0; + u32 sdp_cfg2 = 0; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + sdp_cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG); + /* AUDIO_TIMESTAMP_SDP_EN */ + sdp_cfg |= BIT(1); + /* AUDIO_STREAM_SDP_EN */ + sdp_cfg |= BIT(2); + /* AUDIO_COPY_MANAGEMENT_SDP_EN */ + sdp_cfg |= BIT(5); + /* AUDIO_ISRC_SDP_EN */ + sdp_cfg |= BIT(6); + /* AUDIO_INFOFRAME_SDP_EN */ + sdp_cfg |= BIT(20); + + DRM_DEBUG_DP("sdp_cfg = 0x%x\n", sdp_cfg); + + dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg); + + sdp_cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2); + /* IFRM_REGSRC -> Do not use reg values */ + sdp_cfg2 &= ~BIT(0); + /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */ + sdp_cfg2 &= ~BIT(1); + + DRM_DEBUG_DP("sdp_cfg2 = 0x%x\n", sdp_cfg2); + + dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2); +} + +void dp_catalog_audio_init(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + + static u32 sdp_map[][DP_AUDIO_SDP_HEADER_MAX] = { + { + MMSS_DP_AUDIO_STREAM_0, + MMSS_DP_AUDIO_STREAM_1, + MMSS_DP_AUDIO_STREAM_1, + }, + { + MMSS_DP_AUDIO_TIMESTAMP_0, + MMSS_DP_AUDIO_TIMESTAMP_1, + MMSS_DP_AUDIO_TIMESTAMP_1, + }, + { + MMSS_DP_AUDIO_INFOFRAME_0, + MMSS_DP_AUDIO_INFOFRAME_1, + MMSS_DP_AUDIO_INFOFRAME_1, + }, + { + MMSS_DP_AUDIO_COPYMANAGEMENT_0, + MMSS_DP_AUDIO_COPYMANAGEMENT_1, + MMSS_DP_AUDIO_COPYMANAGEMENT_1, + }, + { + MMSS_DP_AUDIO_ISRC_0, + MMSS_DP_AUDIO_ISRC_1, + MMSS_DP_AUDIO_ISRC_1, + }, + }; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + catalog->audio_map = sdp_map; +} + +void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + u32 mainlink_levels, safe_to_exit_level; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + safe_to_exit_level = dp_catalog->audio_data; + mainlink_levels = dp_read_link(catalog, REG_DP_MAINLINK_LEVELS); + mainlink_levels &= 0xFE0; + mainlink_levels |= safe_to_exit_level; + + DRM_DEBUG_DP("mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n", + mainlink_levels, safe_to_exit_level); + + dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels); +} diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h new file mode 100644 index 000000000000..4b7666f1fe6f --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_catalog.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_CATALOG_H_ +#define _DP_CATALOG_H_ + +#include <drm/drm_modes.h> + +#include "dp_parser.h" + +/* interrupts */ +#define DP_INTR_HPD BIT(0) +#define DP_INTR_AUX_I2C_DONE BIT(3) +#define DP_INTR_WRONG_ADDR BIT(6) +#define DP_INTR_TIMEOUT BIT(9) +#define DP_INTR_NACK_DEFER BIT(12) +#define DP_INTR_WRONG_DATA_CNT BIT(15) +#define DP_INTR_I2C_NACK BIT(18) +#define DP_INTR_I2C_DEFER BIT(21) +#define DP_INTR_PLL_UNLOCKED BIT(24) +#define DP_INTR_AUX_ERROR BIT(27) + +#define DP_INTR_READY_FOR_VIDEO BIT(0) +#define DP_INTR_IDLE_PATTERN_SENT BIT(3) +#define DP_INTR_FRAME_END BIT(6) +#define DP_INTR_CRC_UPDATED BIT(9) + +#define DP_AUX_CFG_MAX_VALUE_CNT 3 + +/* PHY AUX config registers */ +enum dp_phy_aux_config_type { + PHY_AUX_CFG0, + PHY_AUX_CFG1, + PHY_AUX_CFG2, + PHY_AUX_CFG3, + PHY_AUX_CFG4, + PHY_AUX_CFG5, + PHY_AUX_CFG6, + PHY_AUX_CFG7, + PHY_AUX_CFG8, + PHY_AUX_CFG9, + PHY_AUX_CFG_MAX, +}; + +enum dp_catalog_audio_sdp_type { + DP_AUDIO_SDP_STREAM, + DP_AUDIO_SDP_TIMESTAMP, + DP_AUDIO_SDP_INFOFRAME, + DP_AUDIO_SDP_COPYMANAGEMENT, + DP_AUDIO_SDP_ISRC, + DP_AUDIO_SDP_MAX, +}; + +enum dp_catalog_audio_header_type { + DP_AUDIO_SDP_HEADER_1, + DP_AUDIO_SDP_HEADER_2, + DP_AUDIO_SDP_HEADER_3, + DP_AUDIO_SDP_HEADER_MAX, +}; + +struct dp_catalog { + u32 aux_data; + u32 total; + u32 sync_start; + u32 width_blanking; + u32 dp_active; + enum dp_catalog_audio_sdp_type sdp_type; + enum dp_catalog_audio_header_type sdp_header; + u32 audio_data; +}; + +/* AUX APIs */ +u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog); +int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog); +int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog); +int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read); +int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog); +void dp_catalog_aux_reset(struct dp_catalog *dp_catalog); +void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable); +void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog); +int dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog); + +/* DP Controller APIs */ +void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state); +void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 config); +void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog); +void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, bool enable); +void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb); +void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate, + u32 stream_rate_khz, bool fixed_nvid); +int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog, u32 pattern); +void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog); +bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog); +void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable); +void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, + u32 intr_mask, bool en); +void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog); +u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog); +void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog); +int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog, u8 v_level, + u8 p_level); +int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog); +void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog, + u32 dp_tu, u32 valid_boundary, + u32 valid_boundary2); +void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, + u32 pattern); +u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog); + +/* DP Panel APIs */ +int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog); +void dp_catalog_dump_regs(struct dp_catalog *dp_catalog); +void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog, + struct drm_display_mode *drm_mode); +void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog); + +struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io); + +/* DP Audio APIs */ +void dp_catalog_audio_get_header(struct dp_catalog *catalog); +void dp_catalog_audio_set_header(struct dp_catalog *catalog); +void dp_catalog_audio_config_acr(struct dp_catalog *catalog); +void dp_catalog_audio_enable(struct dp_catalog *catalog); +void dp_catalog_audio_enable(struct dp_catalog *catalog); +void dp_catalog_audio_config_sdp(struct dp_catalog *catalog); +void dp_catalog_audio_init(struct dp_catalog *catalog); +void dp_catalog_audio_sfe_level(struct dp_catalog *catalog); + +#endif /* _DP_CATALOG_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c new file mode 100644 index 000000000000..2e3e1917351f --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -0,0 +1,1869 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include <linux/types.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/phy/phy.h> +#include <linux/phy/phy-dp.h> +#include <drm/drm_fixed.h> +#include <drm/drm_dp_helper.h> +#include <drm/drm_print.h> + +#include "dp_reg.h" +#include "dp_ctrl.h" +#include "dp_link.h" + +#define DP_KHZ_TO_HZ 1000 +#define IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES (30 * HZ / 1000) /* 30 ms */ +#define WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES (HZ / 2) + +#define DP_CTRL_INTR_READY_FOR_VIDEO BIT(0) +#define DP_CTRL_INTR_IDLE_PATTERN_SENT BIT(3) + +#define MR_LINK_TRAINING1 0x8 +#define MR_LINK_SYMBOL_ERM 0x80 +#define MR_LINK_PRBS7 0x100 +#define MR_LINK_CUSTOM80 0x200 +#define MR_LINK_TRAINING4 0x40 + +enum { + DP_TRAINING_NONE, + DP_TRAINING_1, + DP_TRAINING_2, +}; + +struct dp_tu_calc_input { + u64 lclk; /* 162, 270, 540 and 810 */ + u64 pclk_khz; /* in KHz */ + u64 hactive; /* active h-width */ + u64 hporch; /* bp + fp + pulse */ + int nlanes; /* no.of.lanes */ + int bpp; /* bits */ + int pixel_enc; /* 444, 420, 422 */ + int dsc_en; /* dsc on/off */ + int async_en; /* async mode */ + int fec_en; /* fec */ + int compress_ratio; /* 2:1 = 200, 3:1 = 300, 3.75:1 = 375 */ + int num_of_dsc_slices; /* number of slices per line */ +}; + +struct dp_vc_tu_mapping_table { + u32 vic; + u8 lanes; + u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */ + u8 bpp; + u8 valid_boundary_link; + u16 delay_start_link; + bool boundary_moderation_en; + u8 valid_lower_boundary_link; + u8 upper_boundary_count; + u8 lower_boundary_count; + u8 tu_size_minus1; +}; + +struct dp_ctrl_private { + struct dp_ctrl dp_ctrl; + struct device *dev; + struct drm_dp_aux *aux; + struct dp_panel *panel; + struct dp_link *link; + struct dp_power *power; + struct dp_parser *parser; + struct dp_catalog *catalog; + + struct completion idle_comp; + struct completion video_comp; +}; + +struct dp_cr_status { + u8 lane_0_1; + u8 lane_2_3; +}; + +#define DP_LANE0_1_CR_DONE 0x11 + +static int dp_aux_link_configure(struct drm_dp_aux *aux, + struct dp_link_info *link) +{ + u8 values[2]; + int err; + + values[0] = drm_dp_link_rate_to_bw_code(link->rate); + values[1] = link->num_lanes; + + if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); + if (err < 0) + return err; + + return 0; +} + +void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + reinit_completion(&ctrl->idle_comp); + dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE); + + if (!wait_for_completion_timeout(&ctrl->idle_comp, + IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES)) + pr_warn("PUSH_IDLE pattern timedout\n"); + + pr_debug("mainlink off done\n"); +} + +static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl) +{ + u32 config = 0, tbd; + u8 *dpcd = ctrl->panel->dpcd; + + /* Default-> LSCLK DIV: 1/4 LCLK */ + config |= (2 << DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT); + + /* Scrambler reset enable */ + if (dpcd[DP_EDP_CONFIGURATION_CAP] & DP_ALTERNATE_SCRAMBLER_RESET_CAP) + config |= DP_CONFIGURATION_CTRL_ASSR; + + tbd = dp_link_get_test_bits_depth(ctrl->link, + ctrl->panel->dp_mode.bpp); + + if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) { + pr_debug("BIT_DEPTH not set. Configure default\n"); + tbd = DP_TEST_BIT_DEPTH_8; + } + + config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT; + + /* Num of Lanes */ + config |= ((ctrl->link->link_params.num_lanes - 1) + << DP_CONFIGURATION_CTRL_NUM_OF_LANES_SHIFT); + + if (drm_dp_enhanced_frame_cap(dpcd)) + config |= DP_CONFIGURATION_CTRL_ENHANCED_FRAMING; + + config |= DP_CONFIGURATION_CTRL_P_INTERLACED; /* progressive video */ + + /* sync clock & static Mvid */ + config |= DP_CONFIGURATION_CTRL_STATIC_DYNAMIC_CN; + config |= DP_CONFIGURATION_CTRL_SYNC_ASYNC_CLK; + + dp_catalog_ctrl_config_ctrl(ctrl->catalog, config); +} + +static void dp_ctrl_configure_source_params(struct dp_ctrl_private *ctrl) +{ + u32 cc, tb; + + dp_catalog_ctrl_lane_mapping(ctrl->catalog); + dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true); + + dp_ctrl_config_ctrl(ctrl); + + tb = dp_link_get_test_bits_depth(ctrl->link, + ctrl->panel->dp_mode.bpp); + cc = dp_link_get_colorimetry_config(ctrl->link); + dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb); + dp_panel_timing_cfg(ctrl->panel); +} + +/* + * The structure and few functions present below are IP/Hardware + * specific implementation. Most of the implementation will not + * have coding comments + */ +struct tu_algo_data { + s64 lclk_fp; + s64 pclk_fp; + s64 lwidth; + s64 lwidth_fp; + s64 hbp_relative_to_pclk; + s64 hbp_relative_to_pclk_fp; + int nlanes; + int bpp; + int pixelEnc; + int dsc_en; + int async_en; + int bpc; + + uint delay_start_link_extra_pixclk; + int extra_buffer_margin; + s64 ratio_fp; + s64 original_ratio_fp; + + s64 err_fp; + s64 n_err_fp; + s64 n_n_err_fp; + int tu_size; + int tu_size_desired; + int tu_size_minus1; + + int valid_boundary_link; + s64 resulting_valid_fp; + s64 total_valid_fp; + s64 effective_valid_fp; + s64 effective_valid_recorded_fp; + int n_tus; + int n_tus_per_lane; + int paired_tus; + int remainder_tus; + int remainder_tus_upper; + int remainder_tus_lower; + int extra_bytes; + int filler_size; + int delay_start_link; + + int extra_pclk_cycles; + int extra_pclk_cycles_in_link_clk; + s64 ratio_by_tu_fp; + s64 average_valid2_fp; + int new_valid_boundary_link; + int remainder_symbols_exist; + int n_symbols; + s64 n_remainder_symbols_per_lane_fp; + s64 last_partial_tu_fp; + s64 TU_ratio_err_fp; + + int n_tus_incl_last_incomplete_tu; + int extra_pclk_cycles_tmp; + int extra_pclk_cycles_in_link_clk_tmp; + int extra_required_bytes_new_tmp; + int filler_size_tmp; + int lower_filler_size_tmp; + int delay_start_link_tmp; + + bool boundary_moderation_en; + int boundary_mod_lower_err; + int upper_boundary_count; + int lower_boundary_count; + int i_upper_boundary_count; + int i_lower_boundary_count; + int valid_lower_boundary_link; + int even_distribution_BF; + int even_distribution_legacy; + int even_distribution; + int min_hblank_violated; + s64 delay_start_time_fp; + s64 hbp_time_fp; + s64 hactive_time_fp; + s64 diff_abs_fp; + + s64 ratio; +}; + +static int _tu_param_compare(s64 a, s64 b) +{ + u32 a_sign; + u32 b_sign; + s64 a_temp, b_temp, minus_1; + + if (a == b) + return 0; + + minus_1 = drm_fixp_from_fraction(-1, 1); + + a_sign = (a >> 32) & 0x80000000 ? 1 : 0; + + b_sign = (b >> 32) & 0x80000000 ? 1 : 0; + + if (a_sign > b_sign) + return 2; + else if (b_sign > a_sign) + return 1; + + if (!a_sign && !b_sign) { /* positive */ + if (a > b) + return 1; + else + return 2; + } else { /* negative */ + a_temp = drm_fixp_mul(a, minus_1); + b_temp = drm_fixp_mul(b, minus_1); + + if (a_temp > b_temp) + return 2; + else + return 1; + } +} + +static void dp_panel_update_tu_timings(struct dp_tu_calc_input *in, + struct tu_algo_data *tu) +{ + int nlanes = in->nlanes; + int dsc_num_slices = in->num_of_dsc_slices; + int dsc_num_bytes = 0; + int numerator; + s64 pclk_dsc_fp; + s64 dwidth_dsc_fp; + s64 hbp_dsc_fp; + + int tot_num_eoc_symbols = 0; + int tot_num_hor_bytes = 0; + int tot_num_dummy_bytes = 0; + int dwidth_dsc_bytes = 0; + int eoc_bytes = 0; + + s64 temp1_fp, temp2_fp, temp3_fp; + + tu->lclk_fp = drm_fixp_from_fraction(in->lclk, 1); + tu->pclk_fp = drm_fixp_from_fraction(in->pclk_khz, 1000); + tu->lwidth = in->hactive; + tu->hbp_relative_to_pclk = in->hporch; + tu->nlanes = in->nlanes; + tu->bpp = in->bpp; + tu->pixelEnc = in->pixel_enc; + tu->dsc_en = in->dsc_en; + tu->async_en = in->async_en; + tu->lwidth_fp = drm_fixp_from_fraction(in->hactive, 1); + tu->hbp_relative_to_pclk_fp = drm_fixp_from_fraction(in->hporch, 1); + + if (tu->pixelEnc == 420) { + temp1_fp = drm_fixp_from_fraction(2, 1); + tu->pclk_fp = drm_fixp_div(tu->pclk_fp, temp1_fp); + tu->lwidth_fp = drm_fixp_div(tu->lwidth_fp, temp1_fp); + tu->hbp_relative_to_pclk_fp = + drm_fixp_div(tu->hbp_relative_to_pclk_fp, 2); + } + + if (tu->pixelEnc == 422) { + switch (tu->bpp) { + case 24: + tu->bpp = 16; + tu->bpc = 8; + break; + case 30: + tu->bpp = 20; + tu->bpc = 10; + break; + default: + tu->bpp = 16; + tu->bpc = 8; + break; + } + } else { + tu->bpc = tu->bpp/3; + } + + if (!in->dsc_en) + goto fec_check; + + temp1_fp = drm_fixp_from_fraction(in->compress_ratio, 100); + temp2_fp = drm_fixp_from_fraction(in->bpp, 1); + temp3_fp = drm_fixp_div(temp2_fp, temp1_fp); + temp2_fp = drm_fixp_mul(tu->lwidth_fp, temp3_fp); + + temp1_fp = drm_fixp_from_fraction(8, 1); + temp3_fp = drm_fixp_div(temp2_fp, temp1_fp); + + numerator = drm_fixp2int(temp3_fp); + + dsc_num_bytes = numerator / dsc_num_slices; + eoc_bytes = dsc_num_bytes % nlanes; + tot_num_eoc_symbols = nlanes * dsc_num_slices; + tot_num_hor_bytes = dsc_num_bytes * dsc_num_slices; + tot_num_dummy_bytes = (nlanes - eoc_bytes) * dsc_num_slices; + + if (dsc_num_bytes == 0) + pr_info("incorrect no of bytes per slice=%d\n", dsc_num_bytes); + + dwidth_dsc_bytes = (tot_num_hor_bytes + + tot_num_eoc_symbols + + (eoc_bytes == 0 ? 0 : tot_num_dummy_bytes)); + + dwidth_dsc_fp = drm_fixp_from_fraction(dwidth_dsc_bytes, 3); + + temp2_fp = drm_fixp_mul(tu->pclk_fp, dwidth_dsc_fp); + temp1_fp = drm_fixp_div(temp2_fp, tu->lwidth_fp); + pclk_dsc_fp = temp1_fp; + + temp1_fp = drm_fixp_div(pclk_dsc_fp, tu->pclk_fp); + temp2_fp = drm_fixp_mul(tu->hbp_relative_to_pclk_fp, temp1_fp); + hbp_dsc_fp = temp2_fp; + + /* output */ + tu->pclk_fp = pclk_dsc_fp; + tu->lwidth_fp = dwidth_dsc_fp; + tu->hbp_relative_to_pclk_fp = hbp_dsc_fp; + +fec_check: + if (in->fec_en) { + temp1_fp = drm_fixp_from_fraction(976, 1000); /* 0.976 */ + tu->lclk_fp = drm_fixp_mul(tu->lclk_fp, temp1_fp); + } +} + +static void _tu_valid_boundary_calc(struct tu_algo_data *tu) +{ + s64 temp1_fp, temp2_fp, temp, temp1, temp2; + int compare_result_1, compare_result_2, compare_result_3; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); + + tu->new_valid_boundary_link = drm_fixp2int_ceil(temp2_fp); + + temp = (tu->i_upper_boundary_count * + tu->new_valid_boundary_link + + tu->i_lower_boundary_count * + (tu->new_valid_boundary_link-1)); + tu->average_valid2_fp = drm_fixp_from_fraction(temp, + (tu->i_upper_boundary_count + + tu->i_lower_boundary_count)); + + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp2_fp = tu->lwidth_fp; + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp); + tu->n_tus = drm_fixp2int(temp2_fp); + if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000) + tu->n_tus += 1; + + temp1_fp = drm_fixp_from_fraction(tu->n_tus, 1); + temp2_fp = drm_fixp_mul(temp1_fp, tu->average_valid2_fp); + temp1_fp = drm_fixp_from_fraction(tu->n_symbols, 1); + temp2_fp = temp1_fp - temp2_fp; + temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1); + temp2_fp = drm_fixp_div(temp2_fp, temp1_fp); + tu->n_remainder_symbols_per_lane_fp = temp2_fp; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + tu->last_partial_tu_fp = + drm_fixp_div(tu->n_remainder_symbols_per_lane_fp, + temp1_fp); + + if (tu->n_remainder_symbols_per_lane_fp != 0) + tu->remainder_symbols_exist = 1; + else + tu->remainder_symbols_exist = 0; + + temp1_fp = drm_fixp_from_fraction(tu->n_tus, tu->nlanes); + tu->n_tus_per_lane = drm_fixp2int(temp1_fp); + + tu->paired_tus = (int)((tu->n_tus_per_lane) / + (tu->i_upper_boundary_count + + tu->i_lower_boundary_count)); + + tu->remainder_tus = tu->n_tus_per_lane - tu->paired_tus * + (tu->i_upper_boundary_count + + tu->i_lower_boundary_count); + + if ((tu->remainder_tus - tu->i_upper_boundary_count) > 0) { + tu->remainder_tus_upper = tu->i_upper_boundary_count; + tu->remainder_tus_lower = tu->remainder_tus - + tu->i_upper_boundary_count; + } else { + tu->remainder_tus_upper = tu->remainder_tus; + tu->remainder_tus_lower = 0; + } + + temp = tu->paired_tus * (tu->i_upper_boundary_count * + tu->new_valid_boundary_link + + tu->i_lower_boundary_count * + (tu->new_valid_boundary_link - 1)) + + (tu->remainder_tus_upper * + tu->new_valid_boundary_link) + + (tu->remainder_tus_lower * + (tu->new_valid_boundary_link - 1)); + tu->total_valid_fp = drm_fixp_from_fraction(temp, 1); + + if (tu->remainder_symbols_exist) { + temp1_fp = tu->total_valid_fp + + tu->n_remainder_symbols_per_lane_fp; + temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1); + temp2_fp = temp2_fp + tu->last_partial_tu_fp; + temp1_fp = drm_fixp_div(temp1_fp, temp2_fp); + } else { + temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1); + temp1_fp = drm_fixp_div(tu->total_valid_fp, temp2_fp); + } + tu->effective_valid_fp = temp1_fp; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); + tu->n_n_err_fp = tu->effective_valid_fp - temp2_fp; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); + tu->n_err_fp = tu->average_valid2_fp - temp2_fp; + + tu->even_distribution = tu->n_tus % tu->nlanes == 0 ? 1 : 0; + + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp2_fp = tu->lwidth_fp; + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp); + + if (temp2_fp) + tu->n_tus_incl_last_incomplete_tu = drm_fixp2int_ceil(temp2_fp); + else + tu->n_tus_incl_last_incomplete_tu = 0; + + temp1 = 0; + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp); + temp1_fp = tu->average_valid2_fp - temp2_fp; + temp2_fp = drm_fixp_from_fraction(tu->n_tus_incl_last_incomplete_tu, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + if (temp1_fp) + temp1 = drm_fixp2int_ceil(temp1_fp); + + temp = tu->i_upper_boundary_count * tu->nlanes; + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu->new_valid_boundary_link, 1); + temp2_fp = temp1_fp - temp2_fp; + temp1_fp = drm_fixp_from_fraction(temp, 1); + temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp); + + if (temp2_fp) + temp2 = drm_fixp2int_ceil(temp2_fp); + else + temp2 = 0; + tu->extra_required_bytes_new_tmp = (int)(temp1 + temp2); + + temp1_fp = drm_fixp_from_fraction(8, tu->bpp); + temp2_fp = drm_fixp_from_fraction( + tu->extra_required_bytes_new_tmp, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + if (temp1_fp) + tu->extra_pclk_cycles_tmp = drm_fixp2int_ceil(temp1_fp); + else + tu->extra_pclk_cycles_tmp = 0; + + temp1_fp = drm_fixp_from_fraction(tu->extra_pclk_cycles_tmp, 1); + temp2_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp); + temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp); + + if (temp1_fp) + tu->extra_pclk_cycles_in_link_clk_tmp = + drm_fixp2int_ceil(temp1_fp); + else + tu->extra_pclk_cycles_in_link_clk_tmp = 0; + + tu->filler_size_tmp = tu->tu_size - tu->new_valid_boundary_link; + + tu->lower_filler_size_tmp = tu->filler_size_tmp + 1; + + tu->delay_start_link_tmp = tu->extra_pclk_cycles_in_link_clk_tmp + + tu->lower_filler_size_tmp + + tu->extra_buffer_margin; + + temp1_fp = drm_fixp_from_fraction(tu->delay_start_link_tmp, 1); + tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp); + + compare_result_1 = _tu_param_compare(tu->n_n_err_fp, tu->diff_abs_fp); + if (compare_result_1 == 2) + compare_result_1 = 1; + else + compare_result_1 = 0; + + compare_result_2 = _tu_param_compare(tu->n_n_err_fp, tu->err_fp); + if (compare_result_2 == 2) + compare_result_2 = 1; + else + compare_result_2 = 0; + + compare_result_3 = _tu_param_compare(tu->hbp_time_fp, + tu->delay_start_time_fp); + if (compare_result_3 == 2) + compare_result_3 = 0; + else + compare_result_3 = 1; + + if (((tu->even_distribution == 1) || + ((tu->even_distribution_BF == 0) && + (tu->even_distribution_legacy == 0))) && + tu->n_err_fp >= 0 && tu->n_n_err_fp >= 0 && + compare_result_2 && + (compare_result_1 || (tu->min_hblank_violated == 1)) && + (tu->new_valid_boundary_link - 1) > 0 && + compare_result_3 && + (tu->delay_start_link_tmp <= 1023)) { + tu->upper_boundary_count = tu->i_upper_boundary_count; + tu->lower_boundary_count = tu->i_lower_boundary_count; + tu->err_fp = tu->n_n_err_fp; + tu->boundary_moderation_en = true; + tu->tu_size_desired = tu->tu_size; + tu->valid_boundary_link = tu->new_valid_boundary_link; + tu->effective_valid_recorded_fp = tu->effective_valid_fp; + tu->even_distribution_BF = 1; + tu->delay_start_link = tu->delay_start_link_tmp; + } else if (tu->boundary_mod_lower_err == 0) { + compare_result_1 = _tu_param_compare(tu->n_n_err_fp, + tu->diff_abs_fp); + if (compare_result_1 == 2) + tu->boundary_mod_lower_err = 1; + } +} + +static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in, + struct dp_vc_tu_mapping_table *tu_table) +{ + struct tu_algo_data tu; + int compare_result_1, compare_result_2; + u64 temp = 0; + s64 temp_fp = 0, temp1_fp = 0, temp2_fp = 0; + + s64 LCLK_FAST_SKEW_fp = drm_fixp_from_fraction(6, 10000); /* 0.0006 */ + s64 const_p49_fp = drm_fixp_from_fraction(49, 100); /* 0.49 */ + s64 const_p56_fp = drm_fixp_from_fraction(56, 100); /* 0.56 */ + s64 RATIO_SCALE_fp = drm_fixp_from_fraction(1001, 1000); + + u8 DP_BRUTE_FORCE = 1; + s64 BRUTE_FORCE_THRESHOLD_fp = drm_fixp_from_fraction(1, 10); /* 0.1 */ + uint EXTRA_PIXCLK_CYCLE_DELAY = 4; + uint HBLANK_MARGIN = 4; + + memset(&tu, 0, sizeof(tu)); + + dp_panel_update_tu_timings(in, &tu); + + tu.err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */ + + temp1_fp = drm_fixp_from_fraction(4, 1); + temp2_fp = drm_fixp_mul(temp1_fp, tu.lclk_fp); + temp_fp = drm_fixp_div(temp2_fp, tu.pclk_fp); + tu.extra_buffer_margin = drm_fixp2int_ceil(temp_fp); + + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp2_fp = drm_fixp_mul(tu.pclk_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1); + temp2_fp = drm_fixp_div(temp2_fp, temp1_fp); + tu.ratio_fp = drm_fixp_div(temp2_fp, tu.lclk_fp); + + tu.original_ratio_fp = tu.ratio_fp; + tu.boundary_moderation_en = false; + tu.upper_boundary_count = 0; + tu.lower_boundary_count = 0; + tu.i_upper_boundary_count = 0; + tu.i_lower_boundary_count = 0; + tu.valid_lower_boundary_link = 0; + tu.even_distribution_BF = 0; + tu.even_distribution_legacy = 0; + tu.even_distribution = 0; + tu.delay_start_time_fp = 0; + + tu.err_fp = drm_fixp_from_fraction(1000, 1); + tu.n_err_fp = 0; + tu.n_n_err_fp = 0; + + tu.ratio = drm_fixp2int(tu.ratio_fp); + temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1); + div64_u64_rem(tu.lwidth_fp, temp1_fp, &temp2_fp); + if (temp2_fp != 0 && + !tu.ratio && tu.dsc_en == 0) { + tu.ratio_fp = drm_fixp_mul(tu.ratio_fp, RATIO_SCALE_fp); + tu.ratio = drm_fixp2int(tu.ratio_fp); + if (tu.ratio) + tu.ratio_fp = drm_fixp_from_fraction(1, 1); + } + + if (tu.ratio > 1) + tu.ratio = 1; + + if (tu.ratio == 1) + goto tu_size_calc; + + compare_result_1 = _tu_param_compare(tu.ratio_fp, const_p49_fp); + if (!compare_result_1 || compare_result_1 == 1) + compare_result_1 = 1; + else + compare_result_1 = 0; + + compare_result_2 = _tu_param_compare(tu.ratio_fp, const_p56_fp); + if (!compare_result_2 || compare_result_2 == 2) + compare_result_2 = 1; + else + compare_result_2 = 0; + + if (tu.dsc_en && compare_result_1 && compare_result_2) { + HBLANK_MARGIN += 4; + DRM_DEBUG_DP("Info: increase HBLANK_MARGIN to %d\n", + HBLANK_MARGIN); + } + +tu_size_calc: + for (tu.tu_size = 32; tu.tu_size <= 64; tu.tu_size++) { + temp1_fp = drm_fixp_from_fraction(tu.tu_size, 1); + temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp); + temp = drm_fixp2int_ceil(temp2_fp); + temp1_fp = drm_fixp_from_fraction(temp, 1); + tu.n_err_fp = temp1_fp - temp2_fp; + + if (tu.n_err_fp < tu.err_fp) { + tu.err_fp = tu.n_err_fp; + tu.tu_size_desired = tu.tu_size; + } + } + + tu.tu_size_minus1 = tu.tu_size_desired - 1; + + temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1); + temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp); + tu.valid_boundary_link = drm_fixp2int_ceil(temp2_fp); + + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp2_fp = tu.lwidth_fp; + temp2_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + temp1_fp = drm_fixp_from_fraction(tu.valid_boundary_link, 1); + temp2_fp = drm_fixp_div(temp2_fp, temp1_fp); + tu.n_tus = drm_fixp2int(temp2_fp); + if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000) + tu.n_tus += 1; + + tu.even_distribution_legacy = tu.n_tus % tu.nlanes == 0 ? 1 : 0; + DRM_DEBUG_DP("Info: n_sym = %d, num_of_tus = %d\n", + tu.valid_boundary_link, tu.n_tus); + + temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1); + temp2_fp = drm_fixp_mul(tu.original_ratio_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu.valid_boundary_link, 1); + temp2_fp = temp1_fp - temp2_fp; + temp1_fp = drm_fixp_from_fraction(tu.n_tus + 1, 1); + temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp); + + temp = drm_fixp2int(temp2_fp); + if (temp && temp2_fp) + tu.extra_bytes = drm_fixp2int_ceil(temp2_fp); + else + tu.extra_bytes = 0; + + temp1_fp = drm_fixp_from_fraction(tu.extra_bytes, 1); + temp2_fp = drm_fixp_from_fraction(8, tu.bpp); + temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp); + + if (temp && temp1_fp) + tu.extra_pclk_cycles = drm_fixp2int_ceil(temp1_fp); + else + tu.extra_pclk_cycles = drm_fixp2int(temp1_fp); + + temp1_fp = drm_fixp_div(tu.lclk_fp, tu.pclk_fp); + temp2_fp = drm_fixp_from_fraction(tu.extra_pclk_cycles, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + if (temp1_fp) + tu.extra_pclk_cycles_in_link_clk = drm_fixp2int_ceil(temp1_fp); + else + tu.extra_pclk_cycles_in_link_clk = drm_fixp2int(temp1_fp); + + tu.filler_size = tu.tu_size_desired - tu.valid_boundary_link; + + temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1); + tu.ratio_by_tu_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp); + + tu.delay_start_link = tu.extra_pclk_cycles_in_link_clk + + tu.filler_size + tu.extra_buffer_margin; + + tu.resulting_valid_fp = + drm_fixp_from_fraction(tu.valid_boundary_link, 1); + + temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1); + temp2_fp = drm_fixp_div(tu.resulting_valid_fp, temp1_fp); + tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp; + + temp1_fp = drm_fixp_from_fraction(HBLANK_MARGIN, 1); + temp1_fp = tu.hbp_relative_to_pclk_fp - temp1_fp; + tu.hbp_time_fp = drm_fixp_div(temp1_fp, tu.pclk_fp); + + temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1); + tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp); + + compare_result_1 = _tu_param_compare(tu.hbp_time_fp, + tu.delay_start_time_fp); + if (compare_result_1 == 2) /* if (hbp_time_fp < delay_start_time_fp) */ + tu.min_hblank_violated = 1; + + tu.hactive_time_fp = drm_fixp_div(tu.lwidth_fp, tu.pclk_fp); + + compare_result_2 = _tu_param_compare(tu.hactive_time_fp, + tu.delay_start_time_fp); + if (compare_result_2 == 2) + tu.min_hblank_violated = 1; + + tu.delay_start_time_fp = 0; + + /* brute force */ + + tu.delay_start_link_extra_pixclk = EXTRA_PIXCLK_CYCLE_DELAY; + tu.diff_abs_fp = tu.resulting_valid_fp - tu.ratio_by_tu_fp; + + temp = drm_fixp2int(tu.diff_abs_fp); + if (!temp && tu.diff_abs_fp <= 0xffff) + tu.diff_abs_fp = 0; + + /* if(diff_abs < 0) diff_abs *= -1 */ + if (tu.diff_abs_fp < 0) + tu.diff_abs_fp = drm_fixp_mul(tu.diff_abs_fp, -1); + + tu.boundary_mod_lower_err = 0; + if ((tu.diff_abs_fp != 0 && + ((tu.diff_abs_fp > BRUTE_FORCE_THRESHOLD_fp) || + (tu.even_distribution_legacy == 0) || + (DP_BRUTE_FORCE == 1))) || + (tu.min_hblank_violated == 1)) { + do { + tu.err_fp = drm_fixp_from_fraction(1000, 1); + + temp1_fp = drm_fixp_div(tu.lclk_fp, tu.pclk_fp); + temp2_fp = drm_fixp_from_fraction( + tu.delay_start_link_extra_pixclk, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + if (temp1_fp) + tu.extra_buffer_margin = + drm_fixp2int_ceil(temp1_fp); + else + tu.extra_buffer_margin = 0; + + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp1_fp = drm_fixp_mul(tu.lwidth_fp, temp1_fp); + + if (temp1_fp) + tu.n_symbols = drm_fixp2int_ceil(temp1_fp); + else + tu.n_symbols = 0; + + for (tu.tu_size = 32; tu.tu_size <= 64; tu.tu_size++) { + for (tu.i_upper_boundary_count = 1; + tu.i_upper_boundary_count <= 15; + tu.i_upper_boundary_count++) { + for (tu.i_lower_boundary_count = 1; + tu.i_lower_boundary_count <= 15; + tu.i_lower_boundary_count++) { + _tu_valid_boundary_calc(&tu); + } + } + } + tu.delay_start_link_extra_pixclk--; + } while (tu.boundary_moderation_en != true && + tu.boundary_mod_lower_err == 1 && + tu.delay_start_link_extra_pixclk != 0); + + if (tu.boundary_moderation_en == true) { + temp1_fp = drm_fixp_from_fraction( + (tu.upper_boundary_count * + tu.valid_boundary_link + + tu.lower_boundary_count * + (tu.valid_boundary_link - 1)), 1); + temp2_fp = drm_fixp_from_fraction( + (tu.upper_boundary_count + + tu.lower_boundary_count), 1); + tu.resulting_valid_fp = + drm_fixp_div(temp1_fp, temp2_fp); + + temp1_fp = drm_fixp_from_fraction( + tu.tu_size_desired, 1); + tu.ratio_by_tu_fp = + drm_fixp_mul(tu.original_ratio_fp, temp1_fp); + + tu.valid_lower_boundary_link = + tu.valid_boundary_link - 1; + + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp1_fp = drm_fixp_mul(tu.lwidth_fp, temp1_fp); + temp2_fp = drm_fixp_div(temp1_fp, + tu.resulting_valid_fp); + tu.n_tus = drm_fixp2int(temp2_fp); + + tu.tu_size_minus1 = tu.tu_size_desired - 1; + tu.even_distribution_BF = 1; + + temp1_fp = + drm_fixp_from_fraction(tu.tu_size_desired, 1); + temp2_fp = + drm_fixp_div(tu.resulting_valid_fp, temp1_fp); + tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp; + } + } + + temp2_fp = drm_fixp_mul(LCLK_FAST_SKEW_fp, tu.lwidth_fp); + + if (temp2_fp) + temp = drm_fixp2int_ceil(temp2_fp); + else + temp = 0; + + temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1); + temp2_fp = drm_fixp_mul(tu.original_ratio_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp2_fp = drm_fixp_div(temp1_fp, temp2_fp); + temp1_fp = drm_fixp_from_fraction(temp, 1); + temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp); + temp = drm_fixp2int(temp2_fp); + + if (tu.async_en) + tu.delay_start_link += (int)temp; + + temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1); + tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp); + + /* OUTPUTS */ + tu_table->valid_boundary_link = tu.valid_boundary_link; + tu_table->delay_start_link = tu.delay_start_link; + tu_table->boundary_moderation_en = tu.boundary_moderation_en; + tu_table->valid_lower_boundary_link = tu.valid_lower_boundary_link; + tu_table->upper_boundary_count = tu.upper_boundary_count; + tu_table->lower_boundary_count = tu.lower_boundary_count; + tu_table->tu_size_minus1 = tu.tu_size_minus1; + + DRM_DEBUG_DP("TU: valid_boundary_link: %d\n", + tu_table->valid_boundary_link); + DRM_DEBUG_DP("TU: delay_start_link: %d\n", + tu_table->delay_start_link); + DRM_DEBUG_DP("TU: boundary_moderation_en: %d\n", + tu_table->boundary_moderation_en); + DRM_DEBUG_DP("TU: valid_lower_boundary_link: %d\n", + tu_table->valid_lower_boundary_link); + DRM_DEBUG_DP("TU: upper_boundary_count: %d\n", + tu_table->upper_boundary_count); + DRM_DEBUG_DP("TU: lower_boundary_count: %d\n", + tu_table->lower_boundary_count); + DRM_DEBUG_DP("TU: tu_size_minus1: %d\n", tu_table->tu_size_minus1); +} + +static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl, + struct dp_vc_tu_mapping_table *tu_table) +{ + struct dp_tu_calc_input in; + struct drm_display_mode *drm_mode; + + drm_mode = &ctrl->panel->dp_mode.drm_mode; + + in.lclk = ctrl->link->link_params.rate / 1000; + in.pclk_khz = drm_mode->clock; + in.hactive = drm_mode->hdisplay; + in.hporch = drm_mode->htotal - drm_mode->hdisplay; + in.nlanes = ctrl->link->link_params.num_lanes; + in.bpp = ctrl->panel->dp_mode.bpp; + in.pixel_enc = 444; + in.dsc_en = 0; + in.async_en = 0; + in.fec_en = 0; + in.num_of_dsc_slices = 0; + in.compress_ratio = 100; + + _dp_ctrl_calc_tu(&in, tu_table); +} + +static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl) +{ + u32 dp_tu = 0x0; + u32 valid_boundary = 0x0; + u32 valid_boundary2 = 0x0; + struct dp_vc_tu_mapping_table tu_calc_table; + + dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table); + + dp_tu |= tu_calc_table.tu_size_minus1; + valid_boundary |= tu_calc_table.valid_boundary_link; + valid_boundary |= (tu_calc_table.delay_start_link << 16); + + valid_boundary2 |= (tu_calc_table.valid_lower_boundary_link << 1); + valid_boundary2 |= (tu_calc_table.upper_boundary_count << 16); + valid_boundary2 |= (tu_calc_table.lower_boundary_count << 20); + + if (tu_calc_table.boundary_moderation_en) + valid_boundary2 |= BIT(0); + + pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n", + dp_tu, valid_boundary, valid_boundary2); + + dp_catalog_ctrl_update_transfer_unit(ctrl->catalog, + dp_tu, valid_boundary, valid_boundary2); +} + +static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + + if (!wait_for_completion_timeout(&ctrl->video_comp, + WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES)) { + DRM_ERROR("wait4video timedout\n"); + ret = -ETIMEDOUT; + } + return ret; +} + +static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl) +{ + struct dp_link *link = ctrl->link; + int ret = 0, lane, lane_cnt; + u8 buf[4]; + u32 max_level_reached = 0; + u32 voltage_swing_level = link->phy_params.v_level; + u32 pre_emphasis_level = link->phy_params.p_level; + + ret = dp_catalog_ctrl_update_vx_px(ctrl->catalog, + voltage_swing_level, pre_emphasis_level); + + if (ret) + return ret; + + if (voltage_swing_level >= DP_TRAIN_VOLTAGE_SWING_MAX) { + DRM_DEBUG_DP("max. voltage swing level reached %d\n", + voltage_swing_level); + max_level_reached |= DP_TRAIN_MAX_SWING_REACHED; + } + + if (pre_emphasis_level >= DP_TRAIN_PRE_EMPHASIS_MAX) { + DRM_DEBUG_DP("max. pre-emphasis level reached %d\n", + pre_emphasis_level); + max_level_reached |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + } + + pre_emphasis_level <<= DP_TRAIN_PRE_EMPHASIS_SHIFT; + + lane_cnt = ctrl->link->link_params.num_lanes; + for (lane = 0; lane < lane_cnt; lane++) + buf[lane] = voltage_swing_level | pre_emphasis_level + | max_level_reached; + + DRM_DEBUG_DP("sink: p|v=0x%x\n", voltage_swing_level + | pre_emphasis_level); + ret = drm_dp_dpcd_write(ctrl->aux, DP_TRAINING_LANE0_SET, + buf, lane_cnt); + if (ret == lane_cnt) + ret = 0; + + return ret; +} + +static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl, + u8 pattern) +{ + u8 buf; + int ret = 0; + + DRM_DEBUG_DP("sink: pattern=%x\n", pattern); + + buf = pattern; + + if (pattern && pattern != DP_TRAINING_PATTERN_4) + buf |= DP_LINK_SCRAMBLING_DISABLE; + + ret = drm_dp_dpcd_writeb(ctrl->aux, DP_TRAINING_PATTERN_SET, buf); + return ret == 1; +} + +static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl, + u8 *link_status) +{ + int len = 0; + u32 const offset = DP_LANE_ALIGN_STATUS_UPDATED - DP_LANE0_1_STATUS; + u32 link_status_read_max_retries = 100; + + while (--link_status_read_max_retries) { + len = drm_dp_dpcd_read_link_status(ctrl->aux, + link_status); + if (len != DP_LINK_STATUS_SIZE) { + DRM_ERROR("DP link status read failed, err: %d\n", len); + return len; + } + + if (!(link_status[offset] & DP_LINK_STATUS_UPDATED)) + return 0; + } + + return -ETIMEDOUT; +} + +static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, + struct dp_cr_status *cr, int *training_step) +{ + int tries, old_v_level, ret = 0; + u8 link_status[DP_LINK_STATUS_SIZE]; + int const maximum_retries = 4; + + dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + + *training_step = DP_TRAINING_1; + + ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, DP_TRAINING_PATTERN_1); + if (ret) + return ret; + dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 | + DP_LINK_SCRAMBLING_DISABLE); + + ret = dp_ctrl_update_vx_px(ctrl); + if (ret) + return ret; + + tries = 0; + old_v_level = ctrl->link->phy_params.v_level; + for (tries = 0; tries < maximum_retries; tries++) { + drm_dp_link_train_clock_recovery_delay(ctrl->panel->dpcd); + + ret = dp_ctrl_read_link_status(ctrl, link_status); + if (ret) + return ret; + + cr->lane_0_1 = link_status[0]; + cr->lane_2_3 = link_status[1]; + + if (drm_dp_clock_recovery_ok(link_status, + ctrl->link->link_params.num_lanes)) { + return 0; + } + + if (ctrl->link->phy_params.v_level >= + DP_TRAIN_VOLTAGE_SWING_MAX) { + DRM_ERROR_RATELIMITED("max v_level reached\n"); + return -EAGAIN; + } + + if (old_v_level != ctrl->link->phy_params.v_level) { + tries = 0; + old_v_level = ctrl->link->phy_params.v_level; + } + + DRM_DEBUG_DP("clock recovery not done, adjusting vx px\n"); + + dp_link_adjust_levels(ctrl->link, link_status); + ret = dp_ctrl_update_vx_px(ctrl); + if (ret) + return ret; + } + + DRM_ERROR("max tries reached\n"); + return -ETIMEDOUT; +} + +static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + + switch (ctrl->link->link_params.rate) { + case 810000: + ctrl->link->link_params.rate = 540000; + break; + case 540000: + ctrl->link->link_params.rate = 270000; + break; + case 270000: + ctrl->link->link_params.rate = 162000; + break; + case 162000: + default: + ret = -EINVAL; + break; + }; + + if (!ret) + DRM_DEBUG_DP("new rate=0x%x\n", ctrl->link->link_params.rate); + + return ret; +} + +static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl) +{ + + if (ctrl->link->link_params.num_lanes == 1) + return -1; + + ctrl->link->link_params.num_lanes /= 2; + ctrl->link->link_params.rate = ctrl->panel->link_info.rate; + + ctrl->link->phy_params.p_level = 0; + ctrl->link->phy_params.v_level = 0; + + return 0; +} + +static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl) +{ + dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE); + drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd); +} + +static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, + struct dp_cr_status *cr, int *training_step) +{ + int tries = 0, ret = 0; + char pattern; + int const maximum_retries = 5; + u8 link_status[DP_LINK_STATUS_SIZE]; + + dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + + *training_step = DP_TRAINING_2; + + if (drm_dp_tps3_supported(ctrl->panel->dpcd)) + pattern = DP_TRAINING_PATTERN_3; + else + pattern = DP_TRAINING_PATTERN_2; + + ret = dp_ctrl_update_vx_px(ctrl); + if (ret) + return ret; + + ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, pattern); + if (ret) + return ret; + + dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN); + + for (tries = 0; tries <= maximum_retries; tries++) { + drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd); + + ret = dp_ctrl_read_link_status(ctrl, link_status); + if (ret) + return ret; + cr->lane_0_1 = link_status[0]; + cr->lane_2_3 = link_status[1]; + + if (drm_dp_channel_eq_ok(link_status, + ctrl->link->link_params.num_lanes)) { + return 0; + } + + dp_link_adjust_levels(ctrl->link, link_status); + ret = dp_ctrl_update_vx_px(ctrl); + if (ret) + return ret; + + } + + return -ETIMEDOUT; +} + +static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl); + +static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, + struct dp_cr_status *cr, int *training_step) +{ + int ret = 0; + u8 encoding = DP_SET_ANSI_8B10B; + struct dp_link_info link_info = {0}; + + dp_ctrl_config_ctrl(ctrl); + + link_info.num_lanes = ctrl->link->link_params.num_lanes; + link_info.rate = ctrl->link->link_params.rate; + link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING; + + dp_aux_link_configure(ctrl->aux, &link_info); + drm_dp_dpcd_write(ctrl->aux, DP_MAIN_LINK_CHANNEL_CODING_SET, + &encoding, 1); + + ret = dp_ctrl_link_train_1(ctrl, cr, training_step); + if (ret) { + DRM_ERROR("link training #1 failed. ret=%d\n", ret); + goto end; + } + + /* print success info as this is a result of user initiated action */ + DRM_DEBUG_DP("link training #1 successful\n"); + + ret = dp_ctrl_link_train_2(ctrl, cr, training_step); + if (ret) { + DRM_ERROR("link training #2 failed. ret=%d\n", ret); + goto end; + } + + /* print success info as this is a result of user initiated action */ + DRM_DEBUG_DP("link training #2 successful\n"); + +end: + dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + + return ret; +} + +static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, + struct dp_cr_status *cr, int *training_step) +{ + int ret = 0; + + dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true); + + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) + return ret; + + /* + * As part of previous calls, DP controller state might have + * transitioned to PUSH_IDLE. In order to start transmitting + * a link training pattern, we have to first do soft reset. + */ + dp_catalog_ctrl_reset(ctrl->catalog); + + ret = dp_ctrl_link_train(ctrl, cr, training_step); + + return ret; +} + +static void dp_ctrl_set_clock_rate(struct dp_ctrl_private *ctrl, + enum dp_pm_type module, char *name, unsigned long rate) +{ + u32 num = ctrl->parser->mp[module].num_clk; + struct dss_clk *cfg = ctrl->parser->mp[module].clk_config; + + while (num && strcmp(cfg->clk_name, name)) { + num--; + cfg++; + } + + DRM_DEBUG_DP("setting rate=%lu on clk=%s\n", rate, name); + + if (num) + cfg->rate = rate; + else + DRM_ERROR("%s clock doesn't exit to set rate %lu\n", + name, rate); +} + +static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + struct dp_io *dp_io = &ctrl->parser->io; + struct phy *phy = dp_io->phy; + struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp; + + opts_dp->lanes = ctrl->link->link_params.num_lanes; + opts_dp->link_rate = ctrl->link->link_params.rate / 100; + dp_ctrl_set_clock_rate(ctrl, DP_CTRL_PM, "ctrl_link", + ctrl->link->link_params.rate * 1000); + + phy_configure(phy, &dp_io->phy_opts); + phy_power_on(phy); + + ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, true); + if (ret) + DRM_ERROR("Unable to start link clocks. ret=%d\n", ret); + + DRM_DEBUG_DP("link rate=%d pixel_clk=%d\n", + ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate); + + return ret; +} + +static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + + dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", + ctrl->dp_ctrl.pixel_rate * 1000); + + ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true); + if (ret) + DRM_ERROR("Unabled to start pixel clocks. ret=%d\n", ret); + + DRM_DEBUG_DP("link rate=%d pixel_clk=%d\n", + ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate); + + return ret; +} + +int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip) +{ + struct dp_ctrl_private *ctrl; + struct dp_io *dp_io; + struct phy *phy; + + if (!dp_ctrl) { + DRM_ERROR("Invalid input data\n"); + return -EINVAL; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + dp_io = &ctrl->parser->io; + phy = dp_io->phy; + + ctrl->dp_ctrl.orientation = flip; + + dp_catalog_ctrl_phy_reset(ctrl->catalog); + phy_init(phy); + dp_catalog_ctrl_enable_irq(ctrl->catalog, true); + + return 0; +} + +/** + * dp_ctrl_host_deinit() - Uninitialize DP controller + * @dp_ctrl: Display Port Driver data + * + * Perform required steps to uninitialize DP controller + * and its resources. + */ +void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) { + DRM_ERROR("Invalid input data\n"); + return; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + dp_catalog_ctrl_enable_irq(ctrl->catalog, false); + + DRM_DEBUG_DP("Host deinitialized successfully\n"); +} + +static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl) +{ + u8 *dpcd = ctrl->panel->dpcd; + u32 edid_quirks = 0; + + edid_quirks = drm_dp_get_edid_quirks(ctrl->panel->edid); + /* + * For better interop experience, used a fixed NVID=0x8000 + * whenever connected to a VGA dongle downstream. + */ + if (drm_dp_is_branch(dpcd)) + return (drm_dp_has_quirk(&ctrl->panel->desc, edid_quirks, + DP_DPCD_QUIRK_CONSTANT_N)); + + return false; +} + +static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + struct dp_io *dp_io = &ctrl->parser->io; + struct phy *phy = dp_io->phy; + struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp; + + dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + opts_dp->lanes = ctrl->link->link_params.num_lanes; + phy_configure(phy, &dp_io->phy_opts); + /* + * Disable and re-enable the mainlink clock since the + * link clock might have been adjusted as part of the + * link maintenance. + */ + ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false); + if (ret) { + DRM_ERROR("Failed to disable clocks. ret=%d\n", ret); + return ret; + } + phy_power_off(phy); + /* hw recommended delay before re-enabling clocks */ + msleep(20); + + ret = dp_ctrl_enable_mainlink_clocks(ctrl); + if (ret) { + DRM_ERROR("Failed to enable mainlink clks. ret=%d\n", ret); + return ret; + } + + return ret; +} + +static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + struct dp_cr_status cr; + int training_step = DP_TRAINING_NONE; + + dp_ctrl_push_idle(&ctrl->dp_ctrl); + dp_catalog_ctrl_reset(ctrl->catalog); + + ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; + + ret = dp_ctrl_setup_main_link(ctrl, &cr, &training_step); + if (ret) + goto end; + + dp_ctrl_clear_training_pattern(ctrl); + + dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); + + ret = dp_ctrl_wait4video_ready(ctrl); +end: + return ret; +} + +static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + + if (!ctrl->link->phy_params.phy_test_pattern_sel) { + DRM_DEBUG_DP("no test pattern selected by sink\n"); + return ret; + } + + /* + * The global reset will need DP link related clocks to be + * running. Add the global reset just before disabling the + * link clocks and core clocks. + */ + ret = dp_ctrl_off(&ctrl->dp_ctrl); + if (ret) { + DRM_ERROR("failed to disable DP controller\n"); + return ret; + } + + ret = dp_ctrl_on_link(&ctrl->dp_ctrl); + if (!ret) + ret = dp_ctrl_on_stream(&ctrl->dp_ctrl); + else + DRM_ERROR("failed to enable DP link controller\n"); + + return ret; +} + +static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl) +{ + bool success = false; + u32 pattern_sent = 0x0; + u32 pattern_requested = ctrl->link->phy_params.phy_test_pattern_sel; + + DRM_DEBUG_DP("request: 0x%x\n", pattern_requested); + + if (dp_catalog_ctrl_update_vx_px(ctrl->catalog, + ctrl->link->phy_params.v_level, + ctrl->link->phy_params.p_level)) { + DRM_ERROR("Failed to set v/p levels\n"); + return false; + } + dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested); + dp_ctrl_update_vx_px(ctrl); + dp_link_send_test_response(ctrl->link); + + pattern_sent = dp_catalog_ctrl_read_phy_pattern(ctrl->catalog); + + switch (pattern_sent) { + case MR_LINK_TRAINING1: + success = (pattern_requested == + DP_PHY_TEST_PATTERN_D10_2); + break; + case MR_LINK_SYMBOL_ERM: + success = ((pattern_requested == + DP_PHY_TEST_PATTERN_ERROR_COUNT) || + (pattern_requested == + DP_PHY_TEST_PATTERN_CP2520)); + break; + case MR_LINK_PRBS7: + success = (pattern_requested == + DP_PHY_TEST_PATTERN_PRBS7); + break; + case MR_LINK_CUSTOM80: + success = (pattern_requested == + DP_PHY_TEST_PATTERN_80BIT_CUSTOM); + break; + case MR_LINK_TRAINING4: + success = (pattern_requested == + DP_PHY_TEST_PATTERN_SEL_MASK); + break; + default: + success = false; + } + + DRM_DEBUG_DP("%s: test->0x%x\n", success ? "success" : "failed", + pattern_requested); + return success; +} + +void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + u32 sink_request = 0x0; + + if (!dp_ctrl) { + DRM_ERROR("invalid input\n"); + return; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + sink_request = ctrl->link->sink_request; + + if (sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { + DRM_DEBUG_DP("PHY_TEST_PATTERN request\n"); + if (dp_ctrl_process_phy_test_request(ctrl)) { + DRM_ERROR("process phy_test_req failed\n"); + return; + } + } + + if (sink_request & DP_LINK_STATUS_UPDATED) { + if (dp_ctrl_link_maintenance(ctrl)) { + DRM_ERROR("LM failed: TEST_LINK_TRAINING\n"); + return; + } + } + + if (sink_request & DP_TEST_LINK_TRAINING) { + dp_link_send_test_response(ctrl->link); + if (dp_ctrl_link_maintenance(ctrl)) { + DRM_ERROR("LM failed: TEST_LINK_TRAINING\n"); + return; + } + } +} + +int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) +{ + int rc = 0; + struct dp_ctrl_private *ctrl; + u32 rate = 0; + int link_train_max_retries = 5; + u32 const phy_cts_pixel_clk_khz = 148500; + struct dp_cr_status cr; + unsigned int training_step; + + if (!dp_ctrl) + return -EINVAL; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + rate = ctrl->panel->link_info.rate; + + dp_power_clk_enable(ctrl->power, DP_CORE_PM, true); + + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { + DRM_DEBUG_DP("using phy test link parameters\n"); + if (!ctrl->panel->dp_mode.drm_mode.clock) + ctrl->dp_ctrl.pixel_rate = phy_cts_pixel_clk_khz; + } else { + ctrl->link->link_params.rate = rate; + ctrl->link->link_params.num_lanes = + ctrl->panel->link_info.num_lanes; + ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; + } + + DRM_DEBUG_DP("rate=%d, num_lanes=%d, pixel_rate=%d\n", + ctrl->link->link_params.rate, + ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate); + + rc = dp_ctrl_enable_mainlink_clocks(ctrl); + if (rc) + return rc; + + ctrl->link->phy_params.p_level = 0; + ctrl->link->phy_params.v_level = 0; + + while (--link_train_max_retries && + !atomic_read(&ctrl->dp_ctrl.aborted)) { + rc = dp_ctrl_reinitialize_mainlink(ctrl); + if (rc) { + DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", + rc); + break; + } + + training_step = DP_TRAINING_NONE; + rc = dp_ctrl_setup_main_link(ctrl, &cr, &training_step); + if (rc == 0) { + /* training completed successfully */ + break; + } else if (training_step == DP_TRAINING_1) { + /* link train_1 failed */ + rc = dp_ctrl_link_rate_down_shift(ctrl); + if (rc < 0) { /* already in RBR = 1.6G */ + if (cr.lane_0_1 & DP_LANE0_1_CR_DONE) { + /* + * some lanes are ready, + * reduce lane number + */ + rc = dp_ctrl_link_lane_down_shift(ctrl); + if (rc < 0) { /* lane == 1 already */ + /* end with failure */ + break; + } + } else { + /* end with failure */ + break; /* lane == 1 already */ + } + } + } else if (training_step == DP_TRAINING_2) { + /* link train_2 failed, lower lane rate */ + rc = dp_ctrl_link_lane_down_shift(ctrl); + if (rc < 0) { + /* end with failure */ + break; /* lane == 1 already */ + } + } + } + + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) + return rc; + + /* stop txing train pattern */ + dp_ctrl_clear_training_pattern(ctrl); + + /* + * keep transmitting idle pattern until video ready + * to avoid main link from loss of sync + */ + if (rc == 0) /* link train successfully */ + dp_ctrl_push_idle(dp_ctrl); + + return rc; +} + +int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl) +{ + u32 rate = 0; + int ret = 0; + bool mainlink_ready = false; + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) + return -EINVAL; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + rate = ctrl->panel->link_info.rate; + + ctrl->link->link_params.rate = rate; + ctrl->link->link_params.num_lanes = ctrl->panel->link_info.num_lanes; + ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; + + DRM_DEBUG_DP("rate=%d, num_lanes=%d, pixel_rate=%d\n", + ctrl->link->link_params.rate, + ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate); + + if (!dp_power_clk_status(ctrl->power, DP_CTRL_PM)) { /* link clk is off */ + ret = dp_ctrl_enable_mainlink_clocks(ctrl); + if (ret) { + DRM_ERROR("Failed to start link clocks. ret=%d\n", ret); + goto end; + } + } + + ret = dp_ctrl_enable_stream_clocks(ctrl); + if (ret) { + DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret); + goto end; + } + + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { + dp_ctrl_send_phy_test_pattern(ctrl); + return 0; + } + + /* + * Set up transfer unit values and set controller state to send + * video. + */ + dp_ctrl_configure_source_params(ctrl); + + dp_catalog_ctrl_config_msa(ctrl->catalog, + ctrl->link->link_params.rate, + ctrl->dp_ctrl.pixel_rate, dp_ctrl_use_fixed_nvid(ctrl)); + + reinit_completion(&ctrl->video_comp); + + dp_ctrl_setup_tr_unit(ctrl); + + dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); + + ret = dp_ctrl_wait4video_ready(ctrl); + if (ret) + return ret; + + mainlink_ready = dp_catalog_ctrl_mainlink_ready(ctrl->catalog); + DRM_DEBUG_DP("mainlink %s\n", mainlink_ready ? "READY" : "NOT READY"); + +end: + return ret; +} + +int dp_ctrl_off(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + struct dp_io *dp_io; + struct phy *phy; + int ret = 0; + + if (!dp_ctrl) + return -EINVAL; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + dp_io = &ctrl->parser->io; + phy = dp_io->phy; + + dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + + dp_catalog_ctrl_reset(ctrl->catalog); + + ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, false); + if (ret) + DRM_ERROR("Failed to disable pixel clocks. ret=%d\n", ret); + + ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false); + if (ret) { + DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret); + } + + phy_power_off(phy); + phy_exit(phy); + + DRM_DEBUG_DP("DP off done\n"); + return ret; +} + +void dp_ctrl_isr(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + u32 isr; + + if (!dp_ctrl) + return; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + isr = dp_catalog_ctrl_get_interrupt(ctrl->catalog); + + if (isr & DP_CTRL_INTR_READY_FOR_VIDEO) { + DRM_DEBUG_DP("dp_video_ready\n"); + complete(&ctrl->video_comp); + } + + if (isr & DP_CTRL_INTR_IDLE_PATTERN_SENT) { + DRM_DEBUG_DP("idle_patterns_sent\n"); + complete(&ctrl->idle_comp); + } +} + +struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, + struct dp_panel *panel, struct drm_dp_aux *aux, + struct dp_power *power, struct dp_catalog *catalog, + struct dp_parser *parser) +{ + struct dp_ctrl_private *ctrl; + + if (!dev || !panel || !aux || + !link || !catalog) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) { + DRM_ERROR("Mem allocation failure\n"); + return ERR_PTR(-ENOMEM); + } + + init_completion(&ctrl->idle_comp); + init_completion(&ctrl->video_comp); + + /* in parameters */ + ctrl->parser = parser; + ctrl->panel = panel; + ctrl->power = power; + ctrl->aux = aux; + ctrl->link = link; + ctrl->catalog = catalog; + ctrl->dev = dev; + + return &ctrl->dp_ctrl; +} + +void dp_ctrl_put(struct dp_ctrl *dp_ctrl) +{ +} diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h new file mode 100644 index 000000000000..f60ba93c8678 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_CTRL_H_ +#define _DP_CTRL_H_ + +#include "dp_aux.h" +#include "dp_panel.h" +#include "dp_link.h" +#include "dp_parser.h" +#include "dp_power.h" +#include "dp_catalog.h" + +struct dp_ctrl { + bool orientation; + atomic_t aborted; + u32 pixel_rate; +}; + +int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip); +void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl); +int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl); +int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl); +int dp_ctrl_off(struct dp_ctrl *dp_ctrl); +void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl); +void dp_ctrl_isr(struct dp_ctrl *dp_ctrl); +void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl); +struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, + struct dp_panel *panel, struct drm_dp_aux *aux, + struct dp_power *power, struct dp_catalog *catalog, + struct dp_parser *parser); +void dp_ctrl_put(struct dp_ctrl *dp_ctrl); + +#endif /* _DP_CTRL_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c new file mode 100644 index 000000000000..84670bcdcfea --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_debug.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt)"[drm-dp] %s: " fmt, __func__ + +#include <linux/debugfs.h> +#include <drm/drm_connector.h> +#include <drm/drm_file.h> + +#include "dp_parser.h" +#include "dp_catalog.h" +#include "dp_aux.h" +#include "dp_ctrl.h" +#include "dp_debug.h" +#include "dp_display.h" + +#define DEBUG_NAME "msm_dp" + +struct dp_debug_private { + struct dentry *root; + + struct dp_usbpd *usbpd; + struct dp_link *link; + struct dp_panel *panel; + struct drm_connector **connector; + struct device *dev; + struct drm_device *drm_dev; + + struct dp_debug dp_debug; +}; + +static int dp_debug_check_buffer_overflow(int rc, int *max_size, int *len) +{ + if (rc >= *max_size) { + DRM_ERROR("buffer overflow\n"); + return -EINVAL; + } + *len += rc; + *max_size = SZ_4K - *len; + + return 0; +} + +static ssize_t dp_debug_read_info(struct file *file, char __user *user_buff, + size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf; + u32 len = 0, rc = 0; + u64 lclk = 0; + u32 max_size = SZ_4K; + u32 link_params_rate; + struct drm_display_mode *drm_mode; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + drm_mode = &debug->panel->dp_mode.drm_mode; + + rc = snprintf(buf + len, max_size, "\tname = %s\n", DEBUG_NAME); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\tdp_panel\n\t\tmax_pclk_khz = %d\n", + debug->panel->max_pclk_khz); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\tdrm_dp_link\n\t\trate = %u\n", + debug->panel->link_info.rate); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tnum_lanes = %u\n", + debug->panel->link_info.num_lanes); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tcapabilities = %lu\n", + debug->panel->link_info.capabilities); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\tdp_panel_info:\n\t\tactive = %dx%d\n", + drm_mode->hdisplay, + drm_mode->vdisplay); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tback_porch = %dx%d\n", + drm_mode->htotal - drm_mode->hsync_end, + drm_mode->vtotal - drm_mode->vsync_end); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tfront_porch = %dx%d\n", + drm_mode->hsync_start - drm_mode->hdisplay, + drm_mode->vsync_start - drm_mode->vdisplay); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tsync_width = %dx%d\n", + drm_mode->hsync_end - drm_mode->hsync_start, + drm_mode->vsync_end - drm_mode->vsync_start); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tactive_low = %dx%d\n", + debug->panel->dp_mode.h_active_low, + debug->panel->dp_mode.v_active_low); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\th_skew = %d\n", + drm_mode->hskew); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\trefresh rate = %d\n", + drm_mode_vrefresh(drm_mode)); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tpixel clock khz = %d\n", + drm_mode->clock); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tbpp = %d\n", + debug->panel->dp_mode.bpp); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + /* Link Information */ + rc = snprintf(buf + len, max_size, + "\tdp_link:\n\t\ttest_requested = %d\n", + debug->link->sink_request); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tnum_lanes = %d\n", + debug->link->link_params.num_lanes); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + link_params_rate = debug->link->link_params.rate; + rc = snprintf(buf + len, max_size, + "\t\tbw_code = %d\n", + drm_dp_link_rate_to_bw_code(link_params_rate)); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + lclk = debug->link->link_params.rate * 1000; + rc = snprintf(buf + len, max_size, + "\t\tlclk = %lld\n", lclk); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tv_level = %d\n", + debug->link->phy_params.v_level); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tp_level = %d\n", + debug->link->phy_params.p_level); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + if (copy_to_user(user_buff, buf, len)) + goto error; + + *ppos += len; + + kfree(buf); + return len; + error: + kfree(buf); + return -EINVAL; +} + +static int dp_test_data_show(struct seq_file *m, void *data) +{ + struct drm_device *dev; + struct dp_debug_private *debug; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + u32 bpc; + + debug = m->private; + dev = debug->drm_dev; + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + if (connector->status == connector_status_connected) { + bpc = debug->link->test_video.test_bit_depth; + seq_printf(m, "hdisplay: %d\n", + debug->link->test_video.test_h_width); + seq_printf(m, "vdisplay: %d\n", + debug->link->test_video.test_v_height); + seq_printf(m, "bpc: %u\n", + dp_link_bit_depth_to_bpc(bpc)); + } else + seq_puts(m, "0"); + } + + drm_connector_list_iter_end(&conn_iter); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(dp_test_data); + +static int dp_test_type_show(struct seq_file *m, void *data) +{ + struct dp_debug_private *debug = m->private; + struct drm_device *dev = debug->drm_dev; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + if (connector->status == connector_status_connected) + seq_printf(m, "%02x", DP_TEST_LINK_VIDEO_PATTERN); + else + seq_puts(m, "0"); + } + drm_connector_list_iter_end(&conn_iter); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(dp_test_type); + +static ssize_t dp_test_active_write(struct file *file, + const char __user *ubuf, + size_t len, loff_t *offp) +{ + char *input_buffer; + int status = 0; + struct dp_debug_private *debug; + struct drm_device *dev; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + int val = 0; + + debug = ((struct seq_file *)file->private_data)->private; + dev = debug->drm_dev; + + if (len == 0) + return 0; + + input_buffer = memdup_user_nul(ubuf, len); + if (IS_ERR(input_buffer)) + return PTR_ERR(input_buffer); + + DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len); + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + if (connector->status == connector_status_connected) { + status = kstrtoint(input_buffer, 10, &val); + if (status < 0) + break; + DRM_DEBUG_DRIVER("Got %d for test active\n", val); + /* To prevent erroneous activation of the compliance + * testing code, only accept an actual value of 1 here + */ + if (val == 1) + debug->panel->video_test = true; + else + debug->panel->video_test = false; + } + } + drm_connector_list_iter_end(&conn_iter); + kfree(input_buffer); + if (status < 0) + return status; + + *offp += len; + return len; +} + +static int dp_test_active_show(struct seq_file *m, void *data) +{ + struct dp_debug_private *debug = m->private; + struct drm_device *dev = debug->drm_dev; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + if (connector->status == connector_status_connected) { + if (debug->panel->video_test) + seq_puts(m, "1"); + else + seq_puts(m, "0"); + } else + seq_puts(m, "0"); + } + drm_connector_list_iter_end(&conn_iter); + + return 0; +} + +static int dp_test_active_open(struct inode *inode, + struct file *file) +{ + return single_open(file, dp_test_active_show, + inode->i_private); +} + +static const struct file_operations dp_debug_fops = { + .open = simple_open, + .read = dp_debug_read_info, +}; + +static const struct file_operations test_active_fops = { + .owner = THIS_MODULE, + .open = dp_test_active_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = dp_test_active_write +}; + +static int dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor) +{ + int rc = 0; + struct dp_debug_private *debug = container_of(dp_debug, + struct dp_debug_private, dp_debug); + struct dentry *file; + struct dentry *test_active; + struct dentry *test_data, *test_type; + + file = debugfs_create_file("dp_debug", 0444, minor->debugfs_root, + debug, &dp_debug_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DRM_ERROR("[%s] debugfs create file failed, rc=%d\n", + DEBUG_NAME, rc); + } + + test_active = debugfs_create_file("msm_dp_test_active", 0444, + minor->debugfs_root, + debug, &test_active_fops); + if (IS_ERR_OR_NULL(test_active)) { + rc = PTR_ERR(test_active); + DRM_ERROR("[%s] debugfs test_active failed, rc=%d\n", + DEBUG_NAME, rc); + } + + test_data = debugfs_create_file("msm_dp_test_data", 0444, + minor->debugfs_root, + debug, &dp_test_data_fops); + if (IS_ERR_OR_NULL(test_data)) { + rc = PTR_ERR(test_data); + DRM_ERROR("[%s] debugfs test_data failed, rc=%d\n", + DEBUG_NAME, rc); + } + + test_type = debugfs_create_file("msm_dp_test_type", 0444, + minor->debugfs_root, + debug, &dp_test_type_fops); + if (IS_ERR_OR_NULL(test_type)) { + rc = PTR_ERR(test_type); + DRM_ERROR("[%s] debugfs test_type failed, rc=%d\n", + DEBUG_NAME, rc); + } + + debug->root = minor->debugfs_root; + + return rc; +} + +struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, + struct dp_usbpd *usbpd, struct dp_link *link, + struct drm_connector **connector, struct drm_minor *minor) +{ + int rc = 0; + struct dp_debug_private *debug; + struct dp_debug *dp_debug; + + if (!dev || !panel || !usbpd || !link) { + DRM_ERROR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + debug = devm_kzalloc(dev, sizeof(*debug), GFP_KERNEL); + if (!debug) { + rc = -ENOMEM; + goto error; + } + + debug->dp_debug.debug_en = false; + debug->usbpd = usbpd; + debug->link = link; + debug->panel = panel; + debug->dev = dev; + debug->drm_dev = minor->dev; + debug->connector = connector; + + dp_debug = &debug->dp_debug; + dp_debug->vdisplay = 0; + dp_debug->hdisplay = 0; + dp_debug->vrefresh = 0; + + rc = dp_debug_init(dp_debug, minor); + if (rc) { + devm_kfree(dev, debug); + goto error; + } + + return dp_debug; + error: + return ERR_PTR(rc); +} + +static int dp_debug_deinit(struct dp_debug *dp_debug) +{ + struct dp_debug_private *debug; + + if (!dp_debug) + return -EINVAL; + + debug = container_of(dp_debug, struct dp_debug_private, dp_debug); + + debugfs_remove_recursive(debug->root); + + return 0; +} + +void dp_debug_put(struct dp_debug *dp_debug) +{ + struct dp_debug_private *debug; + + if (!dp_debug) + return; + + debug = container_of(dp_debug, struct dp_debug_private, dp_debug); + + dp_debug_deinit(dp_debug); + + devm_kfree(debug->dev, debug); +} diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h new file mode 100644 index 000000000000..7eaedfbb149c --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_debug.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_DEBUG_H_ +#define _DP_DEBUG_H_ + +#include "dp_panel.h" +#include "dp_link.h" + +/** + * struct dp_debug + * @debug_en: specifies whether debug mode enabled + * @vdisplay: used to filter out vdisplay value + * @hdisplay: used to filter out hdisplay value + * @vrefresh: used to filter out vrefresh value + * @tpg_state: specifies whether tpg feature is enabled + */ +struct dp_debug { + bool debug_en; + int aspect_ratio; + int vdisplay; + int hdisplay; + int vrefresh; +}; + +#if defined(CONFIG_DEBUG_FS) + +/** + * dp_debug_get() - configure and get the DisplayPlot debug module data + * + * @dev: device instance of the caller + * @panel: instance of panel module + * @usbpd: instance of usbpd module + * @link: instance of link module + * @connector: double pointer to display connector + * @minor: pointer to drm minor number after device registration + * return: pointer to allocated debug module data + * + * This function sets up the debug module and provides a way + * for debugfs input to be communicated with existing modules + */ +struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, + struct dp_usbpd *usbpd, struct dp_link *link, + struct drm_connector **connector, + struct drm_minor *minor); + +/** + * dp_debug_put() + * + * Cleans up dp_debug instance + * + * @dp_debug: instance of dp_debug + */ +void dp_debug_put(struct dp_debug *dp_debug); + +#else + +static inline +struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, + struct dp_usbpd *usbpd, struct dp_link *link, + struct drm_connector **connector, struct drm_minor *minor) +{ + return ERR_PTR(-EINVAL); +} + +static inline void dp_debug_put(struct dp_debug *dp_debug) +{ +} + +#endif /* defined(CONFIG_DEBUG_FS) */ + +#endif /* _DP_DEBUG_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c new file mode 100644 index 000000000000..e175aa3fd3a9 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -0,0 +1,1463 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/debugfs.h> +#include <linux/component.h> +#include <linux/of_irq.h> +#include <linux/delay.h> + +#include "msm_drv.h" +#include "msm_kms.h" +#include "dp_hpd.h" +#include "dp_parser.h" +#include "dp_power.h" +#include "dp_catalog.h" +#include "dp_aux.h" +#include "dp_reg.h" +#include "dp_link.h" +#include "dp_panel.h" +#include "dp_ctrl.h" +#include "dp_display.h" +#include "dp_drm.h" +#include "dp_audio.h" +#include "dp_debug.h" + +static struct msm_dp *g_dp_display; +#define HPD_STRING_SIZE 30 + +enum { + ISR_DISCONNECTED, + ISR_CONNECT_PENDING, + ISR_CONNECTED, + ISR_HPD_REPLUG_COUNT, + ISR_IRQ_HPD_PULSE_COUNT, + ISR_HPD_LO_GLITH_COUNT, +}; + +/* event thread connection state */ +enum { + ST_DISCONNECTED, + ST_CONNECT_PENDING, + ST_CONNECTED, + ST_DISCONNECT_PENDING, + ST_SUSPEND_PENDING, + ST_SUSPENDED, +}; + +enum { + EV_NO_EVENT, + /* hpd events */ + EV_HPD_INIT_SETUP, + EV_HPD_PLUG_INT, + EV_IRQ_HPD_INT, + EV_HPD_REPLUG_INT, + EV_HPD_UNPLUG_INT, + EV_USER_NOTIFICATION, + EV_CONNECT_PENDING_TIMEOUT, + EV_DISCONNECT_PENDING_TIMEOUT, +}; + +#define EVENT_TIMEOUT (HZ/10) /* 100ms */ +#define DP_EVENT_Q_MAX 8 + +#define DP_TIMEOUT_5_SECOND (5000/EVENT_TIMEOUT) +#define DP_TIMEOUT_NONE 0 + +#define WAIT_FOR_RESUME_TIMEOUT_JIFFIES (HZ / 2) + +struct dp_event { + u32 event_id; + u32 data; + u32 delay; +}; + +struct dp_display_private { + char *name; + int irq; + + /* state variables */ + bool core_initialized; + bool hpd_irq_on; + bool audio_supported; + + struct platform_device *pdev; + struct dentry *root; + + struct dp_usbpd *usbpd; + struct dp_parser *parser; + struct dp_power *power; + struct dp_catalog *catalog; + struct drm_dp_aux *aux; + struct dp_link *link; + struct dp_panel *panel; + struct dp_ctrl *ctrl; + struct dp_debug *debug; + + struct dp_usbpd_cb usbpd_cb; + struct dp_display_mode dp_mode; + struct msm_dp dp_display; + + /* wait for audio signaling */ + struct completion audio_comp; + + /* event related only access by event thread */ + struct mutex event_mutex; + wait_queue_head_t event_q; + atomic_t hpd_state; + u32 event_pndx; + u32 event_gndx; + struct dp_event event_list[DP_EVENT_Q_MAX]; + spinlock_t event_lock; + + struct completion resume_comp; + + struct dp_audio *audio; +}; + +static const struct of_device_id dp_dt_match[] = { + {.compatible = "qcom,sc7180-dp"}, + {} +}; + +static int dp_add_event(struct dp_display_private *dp_priv, u32 event, + u32 data, u32 delay) +{ + unsigned long flag; + struct dp_event *todo; + int pndx; + + spin_lock_irqsave(&dp_priv->event_lock, flag); + pndx = dp_priv->event_pndx + 1; + pndx %= DP_EVENT_Q_MAX; + if (pndx == dp_priv->event_gndx) { + pr_err("event_q is full: pndx=%d gndx=%d\n", + dp_priv->event_pndx, dp_priv->event_gndx); + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + return -EPERM; + } + todo = &dp_priv->event_list[dp_priv->event_pndx++]; + dp_priv->event_pndx %= DP_EVENT_Q_MAX; + todo->event_id = event; + todo->data = data; + todo->delay = delay; + wake_up(&dp_priv->event_q); + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + + return 0; +} + +static int dp_del_event(struct dp_display_private *dp_priv, u32 event) +{ + unsigned long flag; + struct dp_event *todo; + u32 gndx; + + spin_lock_irqsave(&dp_priv->event_lock, flag); + if (dp_priv->event_pndx == dp_priv->event_gndx) { + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + return -ENOENT; + } + + gndx = dp_priv->event_gndx; + while (dp_priv->event_pndx != gndx) { + todo = &dp_priv->event_list[gndx]; + if (todo->event_id == event) { + todo->event_id = EV_NO_EVENT; /* deleted */ + todo->delay = 0; + } + gndx++; + gndx %= DP_EVENT_Q_MAX; + } + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + + return 0; +} + +void dp_display_signal_audio_complete(struct msm_dp *dp_display) +{ + struct dp_display_private *dp; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + complete_all(&dp->audio_comp); +} + +static int dp_display_bind(struct device *dev, struct device *master, + void *data) +{ + int rc = 0; + struct dp_display_private *dp; + struct drm_device *drm; + struct msm_drm_private *priv; + + drm = dev_get_drvdata(master); + + dp = container_of(g_dp_display, + struct dp_display_private, dp_display); + if (!dp) { + DRM_ERROR("DP driver bind failed. Invalid driver data\n"); + return -EINVAL; + } + + dp->dp_display.drm_dev = drm; + priv = drm->dev_private; + priv->dp = &(dp->dp_display); + + rc = dp->parser->parse(dp->parser); + if (rc) { + DRM_ERROR("device tree parsing failed\n"); + goto end; + } + + rc = dp_aux_register(dp->aux); + if (rc) { + DRM_ERROR("DRM DP AUX register failed\n"); + goto end; + } + + rc = dp_power_client_init(dp->power); + if (rc) { + DRM_ERROR("Power client create failed\n"); + goto end; + } + + rc = dp_register_audio_driver(dev, dp->audio); + if (rc) + DRM_ERROR("Audio registration Dp failed\n"); + +end: + return rc; +} + +static void dp_display_unbind(struct device *dev, struct device *master, + void *data) +{ + struct dp_display_private *dp; + struct drm_device *drm = dev_get_drvdata(master); + struct msm_drm_private *priv = drm->dev_private; + + dp = container_of(g_dp_display, + struct dp_display_private, dp_display); + if (!dp) { + DRM_ERROR("Invalid DP driver data\n"); + return; + } + + dp_power_client_deinit(dp->power); + dp_aux_unregister(dp->aux); + priv->dp = NULL; +} + +static const struct component_ops dp_display_comp_ops = { + .bind = dp_display_bind, + .unbind = dp_display_unbind, +}; + +static bool dp_display_is_ds_bridge(struct dp_panel *panel) +{ + return (panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] & + DP_DWN_STRM_PORT_PRESENT); +} + +static bool dp_display_is_sink_count_zero(struct dp_display_private *dp) +{ + return dp_display_is_ds_bridge(dp->panel) && + (dp->link->sink_count == 0); +} + +static void dp_display_send_hpd_event(struct msm_dp *dp_display) +{ + struct dp_display_private *dp; + struct drm_connector *connector; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + connector = dp->dp_display.connector; + drm_helper_hpd_irq_event(connector->dev); +} + +static int dp_display_send_hpd_notification(struct dp_display_private *dp, + bool hpd) +{ + static bool encoder_mode_set; + struct msm_drm_private *priv = dp->dp_display.drm_dev->dev_private; + struct msm_kms *kms = priv->kms; + + if ((hpd && dp->dp_display.is_connected) || + (!hpd && !dp->dp_display.is_connected)) { + DRM_DEBUG_DP("HPD already %s\n", (hpd ? "on" : "off")); + return 0; + } + + /* reset video pattern flag on disconnect */ + if (!hpd) + dp->panel->video_test = false; + + dp->dp_display.is_connected = hpd; + + if (dp->dp_display.is_connected && dp->dp_display.encoder + && !encoder_mode_set + && kms->funcs->set_encoder_mode) { + kms->funcs->set_encoder_mode(kms, + dp->dp_display.encoder, false); + DRM_DEBUG_DP("set_encoder_mode() Completed\n"); + encoder_mode_set = true; + } + + dp_display_send_hpd_event(&dp->dp_display); + + return 0; +} + +static int dp_display_process_hpd_high(struct dp_display_private *dp) +{ + int rc = 0; + struct edid *edid; + + dp->panel->max_dp_lanes = dp->parser->max_dp_lanes; + + rc = dp_panel_read_sink_caps(dp->panel, dp->dp_display.connector); + if (rc) + goto end; + + dp_link_process_request(dp->link); + + edid = dp->panel->edid; + + dp->audio_supported = drm_detect_monitor_audio(edid); + dp_panel_handle_sink_request(dp->panel); + + dp->dp_display.max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ; + dp->dp_display.max_dp_lanes = dp->parser->max_dp_lanes; + + rc = dp_ctrl_on_link(dp->ctrl); + if (rc) { + DRM_ERROR("failed to complete DP link training\n"); + goto end; + } + + dp_add_event(dp, EV_USER_NOTIFICATION, true, 0); + + +end: + return rc; +} + +static void dp_display_host_init(struct dp_display_private *dp) +{ + bool flip = false; + + if (dp->core_initialized) { + DRM_DEBUG_DP("DP core already initialized\n"); + return; + } + + if (dp->usbpd->orientation == ORIENTATION_CC2) + flip = true; + + dp_power_init(dp->power, flip); + dp_ctrl_host_init(dp->ctrl, flip); + dp_aux_init(dp->aux); + dp->core_initialized = true; +} + +static int dp_display_usbpd_configure_cb(struct device *dev) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dev) { + DRM_ERROR("invalid dev\n"); + rc = -EINVAL; + goto end; + } + + dp = container_of(g_dp_display, + struct dp_display_private, dp_display); + if (!dp) { + DRM_ERROR("no driver data found\n"); + rc = -ENODEV; + goto end; + } + + dp_display_host_init(dp); + + /* + * set sink to normal operation mode -- D0 + * before dpcd read + */ + dp_link_psm_config(dp->link, &dp->panel->link_info, false); + rc = dp_display_process_hpd_high(dp); +end: + return rc; +} + +static int dp_display_usbpd_disconnect_cb(struct device *dev) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dev) { + DRM_ERROR("invalid dev\n"); + rc = -EINVAL; + return rc; + } + + dp = container_of(g_dp_display, + struct dp_display_private, dp_display); + if (!dp) { + DRM_ERROR("no driver data found\n"); + rc = -ENODEV; + return rc; + } + + dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); + + return rc; +} + +static void dp_display_handle_video_request(struct dp_display_private *dp) +{ + if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) { + dp->panel->video_test = true; + dp_link_send_test_response(dp->link); + } +} + +static int dp_display_handle_irq_hpd(struct dp_display_private *dp) +{ + u32 sink_request; + + sink_request = dp->link->sink_request; + + if (sink_request & DS_PORT_STATUS_CHANGED) { + dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); + if (dp_display_is_sink_count_zero(dp)) { + DRM_DEBUG_DP("sink count is zero, nothing to do\n"); + return 0; + } + + return dp_display_process_hpd_high(dp); + } + + dp_ctrl_handle_sink_request(dp->ctrl); + + if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) + dp_display_handle_video_request(dp); + + return 0; +} + +static int dp_display_usbpd_attention_cb(struct device *dev) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dev) { + DRM_ERROR("invalid dev\n"); + return -EINVAL; + } + + dp = container_of(g_dp_display, + struct dp_display_private, dp_display); + if (!dp) { + DRM_ERROR("no driver data found\n"); + return -ENODEV; + } + + /* check for any test request issued by sink */ + rc = dp_link_process_request(dp->link); + if (!rc) + dp_display_handle_irq_hpd(dp); + + return rc; +} + +static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) +{ + struct dp_usbpd *hpd = dp->usbpd; + u32 state; + u32 tout = DP_TIMEOUT_5_SECOND; + int ret; + + if (!hpd) + return 0; + + mutex_lock(&dp->event_mutex); + + state = atomic_read(&dp->hpd_state); + if (state == ST_SUSPEND_PENDING) { + mutex_unlock(&dp->event_mutex); + return 0; + } + + if (state == ST_CONNECT_PENDING || state == ST_CONNECTED) { + mutex_unlock(&dp->event_mutex); + return 0; + } + + if (state == ST_DISCONNECT_PENDING) { + /* wait until ST_DISCONNECTED */ + dp_add_event(dp, EV_HPD_PLUG_INT, 0, 1); /* delay = 1 */ + mutex_unlock(&dp->event_mutex); + return 0; + } + + if (state == ST_SUSPENDED) + tout = DP_TIMEOUT_NONE; + + atomic_set(&dp->hpd_state, ST_CONNECT_PENDING); + + hpd->hpd_high = 1; + + ret = dp_display_usbpd_configure_cb(&dp->pdev->dev); + if (ret) { /* failed */ + hpd->hpd_high = 0; + atomic_set(&dp->hpd_state, ST_DISCONNECTED); + } + + /* start sanity checking */ + dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout); + + mutex_unlock(&dp->event_mutex); + + /* uevent will complete connection part */ + return 0; +}; + +static int dp_display_enable(struct dp_display_private *dp, u32 data); +static int dp_display_disable(struct dp_display_private *dp, u32 data); + +static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data) +{ + u32 state; + + mutex_lock(&dp->event_mutex); + + state = atomic_read(&dp->hpd_state); + if (state == ST_CONNECT_PENDING) { + dp_display_enable(dp, 0); + atomic_set(&dp->hpd_state, ST_CONNECTED); + } + + mutex_unlock(&dp->event_mutex); + + return 0; +} + +static void dp_display_handle_plugged_change(struct msm_dp *dp_display, + bool plugged) +{ + if (dp_display->plugged_cb && dp_display->codec_dev) + dp_display->plugged_cb(dp_display->codec_dev, plugged); +} + +static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) +{ + struct dp_usbpd *hpd = dp->usbpd; + u32 state; + + if (!hpd) + return 0; + + mutex_lock(&dp->event_mutex); + + state = atomic_read(&dp->hpd_state); + if (state == ST_SUSPEND_PENDING) { + mutex_unlock(&dp->event_mutex); + return 0; + } + + if (state == ST_DISCONNECT_PENDING || state == ST_DISCONNECTED) { + mutex_unlock(&dp->event_mutex); + return 0; + } + + if (state == ST_CONNECT_PENDING) { + /* wait until CONNECTED */ + dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 1); /* delay = 1 */ + mutex_unlock(&dp->event_mutex); + return 0; + } + + atomic_set(&dp->hpd_state, ST_DISCONNECT_PENDING); + + /* disable HPD plug interrupt until disconnect is done */ + dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK + | DP_DP_IRQ_HPD_INT_MASK, false); + + hpd->hpd_high = 0; + + /* + * We don't need separate work for disconnect as + * connect/attention interrupts are disabled + */ + dp_display_usbpd_disconnect_cb(&dp->pdev->dev); + + /* start sanity checking */ + dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND); + + /* signal the disconnect event early to ensure proper teardown */ + dp_display_handle_plugged_change(g_dp_display, false); + reinit_completion(&dp->audio_comp); + + dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK | + DP_DP_IRQ_HPD_INT_MASK, true); + + /* uevent will complete disconnection part */ + mutex_unlock(&dp->event_mutex); + return 0; +} + +static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data) +{ + u32 state; + + mutex_lock(&dp->event_mutex); + + state = atomic_read(&dp->hpd_state); + if (state == ST_DISCONNECT_PENDING) { + dp_display_disable(dp, 0); + atomic_set(&dp->hpd_state, ST_DISCONNECTED); + } + + mutex_unlock(&dp->event_mutex); + + return 0; +} + +static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) +{ + u32 state; + + mutex_lock(&dp->event_mutex); + + /* irq_hpd can happen at either connected or disconnected state */ + state = atomic_read(&dp->hpd_state); + if (state == ST_SUSPEND_PENDING) { + mutex_unlock(&dp->event_mutex); + return 0; + } + + dp_display_usbpd_attention_cb(&dp->pdev->dev); + + mutex_unlock(&dp->event_mutex); + + return 0; +} + +static void dp_display_deinit_sub_modules(struct dp_display_private *dp) +{ + dp_debug_put(dp->debug); + dp_ctrl_put(dp->ctrl); + dp_panel_put(dp->panel); + dp_aux_put(dp->aux); + dp_audio_put(dp->audio); +} + +static int dp_init_sub_modules(struct dp_display_private *dp) +{ + int rc = 0; + struct device *dev = &dp->pdev->dev; + struct dp_usbpd_cb *cb = &dp->usbpd_cb; + struct dp_panel_in panel_in = { + .dev = dev, + }; + + /* Callback APIs used for cable status change event */ + cb->configure = dp_display_usbpd_configure_cb; + cb->disconnect = dp_display_usbpd_disconnect_cb; + cb->attention = dp_display_usbpd_attention_cb; + + dp->usbpd = dp_hpd_get(dev, cb); + if (IS_ERR(dp->usbpd)) { + rc = PTR_ERR(dp->usbpd); + DRM_ERROR("failed to initialize hpd, rc = %d\n", rc); + dp->usbpd = NULL; + goto error; + } + + dp->parser = dp_parser_get(dp->pdev); + if (IS_ERR(dp->parser)) { + rc = PTR_ERR(dp->parser); + DRM_ERROR("failed to initialize parser, rc = %d\n", rc); + dp->parser = NULL; + goto error; + } + + dp->catalog = dp_catalog_get(dev, &dp->parser->io); + if (IS_ERR(dp->catalog)) { + rc = PTR_ERR(dp->catalog); + DRM_ERROR("failed to initialize catalog, rc = %d\n", rc); + dp->catalog = NULL; + goto error; + } + + dp->power = dp_power_get(dp->parser); + if (IS_ERR(dp->power)) { + rc = PTR_ERR(dp->power); + DRM_ERROR("failed to initialize power, rc = %d\n", rc); + dp->power = NULL; + goto error; + } + + dp->aux = dp_aux_get(dev, dp->catalog); + if (IS_ERR(dp->aux)) { + rc = PTR_ERR(dp->aux); + DRM_ERROR("failed to initialize aux, rc = %d\n", rc); + dp->aux = NULL; + goto error; + } + + dp->link = dp_link_get(dev, dp->aux); + if (IS_ERR(dp->link)) { + rc = PTR_ERR(dp->link); + DRM_ERROR("failed to initialize link, rc = %d\n", rc); + dp->link = NULL; + goto error_link; + } + + panel_in.aux = dp->aux; + panel_in.catalog = dp->catalog; + panel_in.link = dp->link; + + dp->panel = dp_panel_get(&panel_in); + if (IS_ERR(dp->panel)) { + rc = PTR_ERR(dp->panel); + DRM_ERROR("failed to initialize panel, rc = %d\n", rc); + dp->panel = NULL; + goto error_link; + } + + dp->ctrl = dp_ctrl_get(dev, dp->link, dp->panel, dp->aux, + dp->power, dp->catalog, dp->parser); + if (IS_ERR(dp->ctrl)) { + rc = PTR_ERR(dp->ctrl); + DRM_ERROR("failed to initialize ctrl, rc = %d\n", rc); + dp->ctrl = NULL; + goto error_ctrl; + } + + dp->audio = dp_audio_get(dp->pdev, dp->panel, dp->catalog); + if (IS_ERR(dp->audio)) { + rc = PTR_ERR(dp->audio); + pr_err("failed to initialize audio, rc = %d\n", rc); + dp->audio = NULL; + goto error_audio; + } + + return rc; + +error_audio: + dp_ctrl_put(dp->ctrl); +error_ctrl: + dp_panel_put(dp->panel); +error_link: + dp_aux_put(dp->aux); +error: + return rc; +} + +static int dp_display_set_mode(struct msm_dp *dp_display, + struct dp_display_mode *mode) +{ + struct dp_display_private *dp; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + dp->panel->dp_mode.drm_mode = mode->drm_mode; + dp->panel->dp_mode.bpp = mode->bpp; + dp->panel->dp_mode.capabilities = mode->capabilities; + dp_panel_init_panel_info(dp->panel); + return 0; +} + +static int dp_display_prepare(struct msm_dp *dp) +{ + return 0; +} + +static int dp_display_enable(struct dp_display_private *dp, u32 data) +{ + int rc = 0; + struct msm_dp *dp_display; + + dp_display = g_dp_display; + + if (dp_display->power_on) { + DRM_DEBUG_DP("Link already setup, return\n"); + return 0; + } + + rc = dp_ctrl_on_stream(dp->ctrl); + if (!rc) + dp_display->power_on = true; + + /* complete resume_comp regardless it is armed or not */ + complete(&dp->resume_comp); + return rc; +} + +static int dp_display_post_enable(struct msm_dp *dp_display) +{ + struct dp_display_private *dp; + u32 rate; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + rate = dp->link->link_params.rate; + + if (dp->audio_supported) { + dp->audio->bw_code = drm_dp_link_rate_to_bw_code(rate); + dp->audio->lane_count = dp->link->link_params.num_lanes; + } + + /* signal the connect event late to synchronize video and display */ + dp_display_handle_plugged_change(dp_display, true); + return 0; +} + +static int dp_display_disable(struct dp_display_private *dp, u32 data) +{ + struct msm_dp *dp_display; + + dp_display = g_dp_display; + + if (!dp_display->power_on) + return -EINVAL; + + /* wait only if audio was enabled */ + if (dp_display->audio_enabled) { + if (!wait_for_completion_timeout(&dp->audio_comp, + HZ * 5)) + DRM_ERROR("audio comp timeout\n"); + } + + dp_display->audio_enabled = false; + + dp_ctrl_off(dp->ctrl); + + dp->core_initialized = false; + + dp_display->power_on = false; + + return 0; +} + +static int dp_display_unprepare(struct msm_dp *dp) +{ + return 0; +} + +int dp_display_set_plugged_cb(struct msm_dp *dp_display, + hdmi_codec_plugged_cb fn, struct device *codec_dev) +{ + bool plugged; + + dp_display->plugged_cb = fn; + dp_display->codec_dev = codec_dev; + plugged = dp_display->is_connected; + dp_display_handle_plugged_change(dp_display, plugged); + + return 0; +} + +int dp_display_validate_mode(struct msm_dp *dp, u32 mode_pclk_khz) +{ + const u32 num_components = 3, default_bpp = 24; + struct dp_display_private *dp_display; + struct dp_link_info *link_info; + u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0; + + if (!dp || !mode_pclk_khz || !dp->connector) { + DRM_ERROR("invalid params\n"); + return -EINVAL; + } + + dp_display = container_of(dp, struct dp_display_private, dp_display); + link_info = &dp_display->panel->link_info; + + mode_bpp = dp->connector->display_info.bpc * num_components; + if (!mode_bpp) + mode_bpp = default_bpp; + + mode_bpp = dp_panel_get_mode_bpp(dp_display->panel, + mode_bpp, mode_pclk_khz); + + mode_rate_khz = mode_pclk_khz * mode_bpp; + supported_rate_khz = link_info->num_lanes * link_info->rate * 8; + + if (mode_rate_khz > supported_rate_khz) + return MODE_BAD; + + return MODE_OK; +} + +int dp_display_get_modes(struct msm_dp *dp, + struct dp_display_mode *dp_mode) +{ + struct dp_display_private *dp_display; + int ret = 0; + + if (!dp) { + DRM_ERROR("invalid params\n"); + return 0; + } + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + ret = dp_panel_get_modes(dp_display->panel, + dp->connector, dp_mode); + if (dp_mode->drm_mode.clock) + dp->max_pclk_khz = dp_mode->drm_mode.clock; + return ret; +} + +bool dp_display_check_video_test(struct msm_dp *dp) +{ + struct dp_display_private *dp_display; + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + return dp_display->panel->video_test; +} + +int dp_display_get_test_bpp(struct msm_dp *dp) +{ + struct dp_display_private *dp_display; + + if (!dp) { + DRM_ERROR("invalid params\n"); + return 0; + } + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + return dp_link_bit_depth_to_bpp( + dp_display->link->test_video.test_bit_depth); +} + +static void dp_display_config_hpd(struct dp_display_private *dp) +{ + + dp_display_host_init(dp); + dp_catalog_ctrl_hpd_config(dp->catalog); + + /* Enable interrupt first time + * we are leaving dp clocks on during disconnect + * and never disable interrupt + */ + enable_irq(dp->irq); +} + +static int hpd_event_thread(void *data) +{ + struct dp_display_private *dp_priv; + unsigned long flag; + struct dp_event *todo; + int timeout_mode = 0; + + dp_priv = (struct dp_display_private *)data; + + while (1) { + if (timeout_mode) { + wait_event_timeout(dp_priv->event_q, + (dp_priv->event_pndx == dp_priv->event_gndx), + EVENT_TIMEOUT); + } else { + wait_event_interruptible(dp_priv->event_q, + (dp_priv->event_pndx != dp_priv->event_gndx)); + } + spin_lock_irqsave(&dp_priv->event_lock, flag); + todo = &dp_priv->event_list[dp_priv->event_gndx]; + if (todo->delay) { + struct dp_event *todo_next; + + dp_priv->event_gndx++; + dp_priv->event_gndx %= DP_EVENT_Q_MAX; + + /* re enter delay event into q */ + todo_next = &dp_priv->event_list[dp_priv->event_pndx++]; + dp_priv->event_pndx %= DP_EVENT_Q_MAX; + todo_next->event_id = todo->event_id; + todo_next->data = todo->data; + todo_next->delay = todo->delay - 1; + + /* clean up older event */ + todo->event_id = EV_NO_EVENT; + todo->delay = 0; + + /* switch to timeout mode */ + timeout_mode = 1; + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + continue; + } + + /* timeout with no events in q */ + if (dp_priv->event_pndx == dp_priv->event_gndx) { + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + continue; + } + + dp_priv->event_gndx++; + dp_priv->event_gndx %= DP_EVENT_Q_MAX; + timeout_mode = 0; + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + + switch (todo->event_id) { + case EV_HPD_INIT_SETUP: + dp_display_config_hpd(dp_priv); + break; + case EV_HPD_PLUG_INT: + dp_hpd_plug_handle(dp_priv, todo->data); + break; + case EV_HPD_UNPLUG_INT: + dp_hpd_unplug_handle(dp_priv, todo->data); + break; + case EV_IRQ_HPD_INT: + dp_irq_hpd_handle(dp_priv, todo->data); + break; + case EV_HPD_REPLUG_INT: + /* do nothing */ + break; + case EV_USER_NOTIFICATION: + dp_display_send_hpd_notification(dp_priv, + todo->data); + break; + case EV_CONNECT_PENDING_TIMEOUT: + dp_connect_pending_timeout(dp_priv, + todo->data); + break; + case EV_DISCONNECT_PENDING_TIMEOUT: + dp_disconnect_pending_timeout(dp_priv, + todo->data); + break; + default: + break; + } + } + + return 0; +} + +static void dp_hpd_event_setup(struct dp_display_private *dp_priv) +{ + init_waitqueue_head(&dp_priv->event_q); + spin_lock_init(&dp_priv->event_lock); + + kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler"); +} + +static irqreturn_t dp_display_irq_handler(int irq, void *dev_id) +{ + struct dp_display_private *dp = dev_id; + irqreturn_t ret = IRQ_HANDLED; + u32 hpd_isr_status; + + if (!dp) { + DRM_ERROR("invalid data\n"); + return IRQ_NONE; + } + + hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog); + + if (hpd_isr_status & 0x0F) { + /* hpd related interrupts */ + if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK || + hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) { + dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); + } + + if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) { + /* delete connect pending event first */ + dp_del_event(dp, EV_CONNECT_PENDING_TIMEOUT); + dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0); + } + + if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) + dp_add_event(dp, EV_HPD_REPLUG_INT, 0, 0); + + if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK) + dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); + } + + /* DP controller isr */ + dp_ctrl_isr(dp->ctrl); + + /* DP aux isr */ + dp_aux_isr(dp->aux); + + return ret; +} + +int dp_display_request_irq(struct msm_dp *dp_display) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dp_display) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0); + if (dp->irq < 0) { + rc = dp->irq; + DRM_ERROR("failed to get irq: %d\n", rc); + return rc; + } + + rc = devm_request_irq(&dp->pdev->dev, dp->irq, + dp_display_irq_handler, + IRQF_TRIGGER_HIGH, "dp_display_isr", dp); + if (rc < 0) { + DRM_ERROR("failed to request IRQ%u: %d\n", + dp->irq, rc); + return rc; + } + disable_irq(dp->irq); + + return 0; +} + +static int dp_display_probe(struct platform_device *pdev) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!pdev || !pdev->dev.of_node) { + DRM_ERROR("pdev not found\n"); + return -ENODEV; + } + + dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL); + if (!dp) + return -ENOMEM; + + dp->pdev = pdev; + dp->name = "drm_dp"; + + rc = dp_init_sub_modules(dp); + if (rc) { + DRM_ERROR("init sub module failed\n"); + return -EPROBE_DEFER; + } + + mutex_init(&dp->event_mutex); + + init_completion(&dp->resume_comp); + + g_dp_display = &dp->dp_display; + + /* Store DP audio handle inside DP display */ + g_dp_display->dp_audio = dp->audio; + + init_completion(&dp->audio_comp); + + platform_set_drvdata(pdev, g_dp_display); + + rc = component_add(&pdev->dev, &dp_display_comp_ops); + if (rc) { + DRM_ERROR("component add failed, rc=%d\n", rc); + dp_display_deinit_sub_modules(dp); + } + + return rc; +} + +static int dp_display_remove(struct platform_device *pdev) +{ + struct dp_display_private *dp; + + dp = container_of(g_dp_display, + struct dp_display_private, dp_display); + + dp_display_deinit_sub_modules(dp); + + component_del(&pdev->dev, &dp_display_comp_ops); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static int dp_pm_resume(struct device *dev) +{ + return 0; +} + +static int dp_pm_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dp_display_private *dp = platform_get_drvdata(pdev); + + if (!dp) { + DRM_ERROR("DP driver bind failed. Invalid driver data\n"); + return -EINVAL; + } + + atomic_set(&dp->hpd_state, ST_SUSPENDED); + + return 0; +} + +static int dp_pm_prepare(struct device *dev) +{ + return 0; +} + +static void dp_pm_complete(struct device *dev) +{ + +} + +static const struct dev_pm_ops dp_pm_ops = { + .suspend = dp_pm_suspend, + .resume = dp_pm_resume, + .prepare = dp_pm_prepare, + .complete = dp_pm_complete, +}; + +static struct platform_driver dp_display_driver = { + .probe = dp_display_probe, + .remove = dp_display_remove, + .driver = { + .name = "msm-dp-display", + .of_match_table = dp_dt_match, + .suppress_bind_attrs = true, + .pm = &dp_pm_ops, + }, +}; + +int __init msm_dp_register(void) +{ + int ret; + + ret = platform_driver_register(&dp_display_driver); + if (ret) + DRM_ERROR("Dp display driver register failed"); + + return ret; +} + +void __exit msm_dp_unregister(void) +{ + platform_driver_unregister(&dp_display_driver); +} + +void msm_dp_irq_postinstall(struct msm_dp *dp_display) +{ + struct dp_display_private *dp; + + if (!dp_display) + return; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + dp_hpd_event_setup(dp); + + dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 100); +} + +void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor) +{ + struct dp_display_private *dp; + struct device *dev; + int rc; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + dev = &dp->pdev->dev; + + dp->debug = dp_debug_get(dev, dp->panel, dp->usbpd, + dp->link, &dp->dp_display.connector, + minor); + if (IS_ERR(dp->debug)) { + rc = PTR_ERR(dp->debug); + DRM_ERROR("failed to initialize debug, rc = %d\n", rc); + dp->debug = NULL; + } +} + +int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, + struct drm_encoder *encoder) +{ + struct msm_drm_private *priv; + int ret; + + if (WARN_ON(!encoder) || WARN_ON(!dp_display) || WARN_ON(!dev)) + return -EINVAL; + + priv = dev->dev_private; + dp_display->drm_dev = dev; + + ret = dp_display_request_irq(dp_display); + if (ret) { + DRM_ERROR("request_irq failed, ret=%d\n", ret); + return ret; + } + + dp_display->encoder = encoder; + + dp_display->connector = dp_drm_connector_init(dp_display); + if (IS_ERR(dp_display->connector)) { + ret = PTR_ERR(dp_display->connector); + DRM_DEV_ERROR(dev->dev, + "failed to create dp connector: %d\n", ret); + dp_display->connector = NULL; + return ret; + } + + priv->connectors[priv->num_connectors++] = dp_display->connector; + return 0; +} + +static int dp_display_wait4resume_done(struct dp_display_private *dp) +{ + int ret = 0; + + reinit_completion(&dp->resume_comp); + if (!wait_for_completion_timeout(&dp->resume_comp, + WAIT_FOR_RESUME_TIMEOUT_JIFFIES)) { + DRM_ERROR("wait4resume_done timedout\n"); + ret = -ETIMEDOUT; + } + return ret; +} + +int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder) +{ + int rc = 0; + struct dp_display_private *dp_display; + u32 state; + + dp_display = container_of(dp, struct dp_display_private, dp_display); + if (!dp_display->dp_mode.drm_mode.clock) { + DRM_ERROR("invalid params\n"); + return -EINVAL; + } + + mutex_lock(&dp_display->event_mutex); + + rc = dp_display_set_mode(dp, &dp_display->dp_mode); + if (rc) { + DRM_ERROR("Failed to perform a mode set, rc=%d\n", rc); + mutex_unlock(&dp_display->event_mutex); + return rc; + } + + rc = dp_display_prepare(dp); + if (rc) { + DRM_ERROR("DP display prepare failed, rc=%d\n", rc); + mutex_unlock(&dp_display->event_mutex); + return rc; + } + + state = atomic_read(&dp_display->hpd_state); + if (state == ST_SUSPENDED) { + /* start link training */ + dp_add_event(dp_display, EV_HPD_PLUG_INT, 0, 0); + mutex_unlock(&dp_display->event_mutex); + + /* wait until dp interface is up */ + goto resume_done; + } + + dp_display_enable(dp_display, 0); + + rc = dp_display_post_enable(dp); + if (rc) { + DRM_ERROR("DP display post enable failed, rc=%d\n", rc); + dp_display_disable(dp_display, 0); + dp_display_unprepare(dp); + } + + dp_del_event(dp_display, EV_CONNECT_PENDING_TIMEOUT); + + if (state == ST_SUSPEND_PENDING) + dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0); + + /* completed connection */ + atomic_set(&dp_display->hpd_state, ST_CONNECTED); + + mutex_unlock(&dp_display->event_mutex); + + return rc; + +resume_done: + dp_display_wait4resume_done(dp_display); + return rc; +} + +int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder) +{ + struct dp_display_private *dp_display; + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + dp_ctrl_push_idle(dp_display->ctrl); + + return 0; +} + +int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder) +{ + int rc = 0; + u32 state; + struct dp_display_private *dp_display; + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + mutex_lock(&dp_display->event_mutex); + + dp_display_disable(dp_display, 0); + + rc = dp_display_unprepare(dp); + if (rc) + DRM_ERROR("DP display unprepare failed, rc=%d\n", rc); + + dp_del_event(dp_display, EV_DISCONNECT_PENDING_TIMEOUT); + + state = atomic_read(&dp_display->hpd_state); + if (state == ST_DISCONNECT_PENDING) { + /* completed disconnection */ + atomic_set(&dp_display->hpd_state, ST_DISCONNECTED); + } else { + atomic_set(&dp_display->hpd_state, ST_SUSPEND_PENDING); + } + + mutex_unlock(&dp_display->event_mutex); + return rc; +} + +void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct dp_display_private *dp_display; + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + memset(&dp_display->dp_mode, 0x0, sizeof(struct dp_display_mode)); + + if (dp_display_check_video_test(dp)) + dp_display->dp_mode.bpp = dp_display_get_test_bpp(dp); + else /* Default num_components per pixel = 3 */ + dp_display->dp_mode.bpp = dp->connector->display_info.bpc * 3; + + if (!dp_display->dp_mode.bpp) + dp_display->dp_mode.bpp = 24; /* Default bpp */ + + drm_mode_copy(&dp_display->dp_mode.drm_mode, adjusted_mode); + + dp_display->dp_mode.v_active_low = + !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NVSYNC); + + dp_display->dp_mode.h_active_low = + !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC); +} diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h new file mode 100644 index 000000000000..6092ba1ed85e --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_display.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_DISPLAY_H_ +#define _DP_DISPLAY_H_ + +#include "dp_panel.h" +#include <sound/hdmi-codec.h> + +struct msm_dp { + struct drm_device *drm_dev; + struct device *codec_dev; + struct drm_connector *connector; + struct drm_encoder *encoder; + bool is_connected; + bool audio_enabled; + bool power_on; + + hdmi_codec_plugged_cb plugged_cb; + + u32 max_pclk_khz; + + u32 max_dp_lanes; + struct dp_audio *dp_audio; +}; + +int dp_display_set_plugged_cb(struct msm_dp *dp_display, + hdmi_codec_plugged_cb fn, struct device *codec_dev); +int dp_display_validate_mode(struct msm_dp *dp_display, u32 mode_pclk_khz); +int dp_display_get_modes(struct msm_dp *dp_display, + struct dp_display_mode *dp_mode); +int dp_display_request_irq(struct msm_dp *dp_display); +bool dp_display_check_video_test(struct msm_dp *dp_display); +int dp_display_get_test_bpp(struct msm_dp *dp_display); +void dp_display_signal_audio_complete(struct msm_dp *dp_display); + +#endif /* _DP_DISPLAY_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c new file mode 100644 index 000000000000..764f4b81017e --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_atomic.h> +#include <drm/drm_crtc.h> + +#include "msm_drv.h" +#include "msm_kms.h" +#include "dp_drm.h" + +struct dp_connector { + struct drm_connector base; + struct msm_dp *dp_display; +}; +#define to_dp_connector(x) container_of(x, struct dp_connector, base) + +/** + * dp_connector_detect - callback to determine if connector is connected + * @conn: Pointer to drm connector structure + * @force: Force detect setting from drm framework + * Returns: Connector 'is connected' status + */ +static enum drm_connector_status dp_connector_detect(struct drm_connector *conn, + bool force) +{ + struct msm_dp *dp; + + dp = to_dp_connector(conn)->dp_display; + + DRM_DEBUG_DP("is_connected = %s\n", + (dp->is_connected) ? "true" : "false"); + + return (dp->is_connected) ? connector_status_connected : + connector_status_disconnected; +} + +/** + * dp_connector_get_modes - callback to add drm modes via drm_mode_probed_add() + * @connector: Pointer to drm connector structure + * Returns: Number of modes added + */ +static int dp_connector_get_modes(struct drm_connector *connector) +{ + int rc = 0; + struct msm_dp *dp; + struct dp_display_mode *dp_mode = NULL; + struct drm_display_mode *m, drm_mode; + + if (!connector) + return 0; + + dp = to_dp_connector(connector)->dp_display; + + dp_mode = kzalloc(sizeof(*dp_mode), GFP_KERNEL); + if (!dp_mode) + return 0; + + /* pluggable case assumes EDID is read when HPD */ + if (dp->is_connected) { + /* + *The get_modes() function might return one mode that is stored + * in dp_mode when compliance test is in progress. If not, the + * return value is equal to the total number of modes supported + * by the sink + */ + rc = dp_display_get_modes(dp, dp_mode); + if (rc <= 0) { + DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc); + kfree(dp_mode); + return rc; + } + if (dp_mode->drm_mode.clock) { /* valid DP mode */ + memset(&drm_mode, 0x0, sizeof(drm_mode)); + drm_mode_copy(&drm_mode, &dp_mode->drm_mode); + m = drm_mode_duplicate(connector->dev, &drm_mode); + if (!m) { + DRM_ERROR("failed to add mode %ux%u\n", + drm_mode.hdisplay, + drm_mode.vdisplay); + kfree(dp_mode); + return 0; + } + drm_mode_probed_add(connector, m); + } + } else { + DRM_DEBUG_DP("No sink connected\n"); + } + kfree(dp_mode); + return rc; +} + +/** + * dp_connector_mode_valid - callback to determine if specified mode is valid + * @connector: Pointer to drm connector structure + * @mode: Pointer to drm mode structure + * Returns: Validity status for specified mode + */ +static enum drm_mode_status dp_connector_mode_valid( + struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct msm_dp *dp_disp; + + dp_disp = to_dp_connector(connector)->dp_display; + + if ((dp_disp->max_pclk_khz <= 0) || + (dp_disp->max_pclk_khz > DP_MAX_PIXEL_CLK_KHZ) || + (mode->clock > dp_disp->max_pclk_khz)) + return MODE_BAD; + + return dp_display_validate_mode(dp_disp, mode->clock); +} + +static const struct drm_connector_funcs dp_connector_funcs = { + .detect = dp_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const struct drm_connector_helper_funcs dp_connector_helper_funcs = { + .get_modes = dp_connector_get_modes, + .mode_valid = dp_connector_mode_valid, +}; + +/* connector initialization */ +struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display) +{ + struct drm_connector *connector = NULL; + struct dp_connector *dp_connector; + int ret; + + dp_connector = devm_kzalloc(dp_display->drm_dev->dev, + sizeof(*dp_connector), + GFP_KERNEL); + if (!dp_connector) + return ERR_PTR(-ENOMEM); + + dp_connector->dp_display = dp_display; + + connector = &dp_connector->base; + + ret = drm_connector_init(dp_display->drm_dev, connector, + &dp_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); + if (ret) + return ERR_PTR(ret); + + drm_connector_helper_add(connector, &dp_connector_helper_funcs); + + /* + * Enable HPD to let hpd event is handled when cable is connected. + */ + connector->polled = DRM_CONNECTOR_POLL_HPD; + + drm_connector_attach_encoder(connector, dp_display->encoder); + + return connector; +} diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h new file mode 100644 index 000000000000..c27bfceefdf0 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_drm.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_DRM_H_ +#define _DP_DRM_H_ + +#include <linux/types.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> + +#include "msm_drv.h" +#include "dp_display.h" + +struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display); + +#endif /* _DP_DRM_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c new file mode 100644 index 000000000000..5b8fe32022b5 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_hpd.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include <linux/slab.h> +#include <linux/device.h> + +#include "dp_hpd.h" + +/* DP specific VDM commands */ +#define DP_USBPD_VDM_STATUS 0x10 +#define DP_USBPD_VDM_CONFIGURE 0x11 + +/* USBPD-TypeC specific Macros */ +#define VDM_VERSION 0x0 +#define USB_C_DP_SID 0xFF01 + +struct dp_hpd_private { + struct device *dev; + struct dp_usbpd_cb *dp_cb; + struct dp_usbpd dp_usbpd; +}; + +int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd) +{ + int rc = 0; + struct dp_hpd_private *hpd_priv; + + hpd_priv = container_of(dp_usbpd, struct dp_hpd_private, + dp_usbpd); + + dp_usbpd->hpd_high = hpd; + + if (!hpd_priv->dp_cb && !hpd_priv->dp_cb->configure + && !hpd_priv->dp_cb->disconnect) { + pr_err("hpd dp_cb not initialized\n"); + return -EINVAL; + } + if (hpd) + hpd_priv->dp_cb->configure(hpd_priv->dev); + else + hpd_priv->dp_cb->disconnect(hpd_priv->dev); + + return rc; +} + +struct dp_usbpd *dp_hpd_get(struct device *dev, struct dp_usbpd_cb *cb) +{ + struct dp_hpd_private *dp_hpd; + + if (!cb) { + pr_err("invalid cb data\n"); + return ERR_PTR(-EINVAL); + } + + dp_hpd = devm_kzalloc(dev, sizeof(*dp_hpd), GFP_KERNEL); + if (!dp_hpd) + return ERR_PTR(-ENOMEM); + + dp_hpd->dev = dev; + dp_hpd->dp_cb = cb; + + dp_hpd->dp_usbpd.connect = dp_hpd_connect; + + return &dp_hpd->dp_usbpd; +} diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.h b/drivers/gpu/drm/msm/dp/dp_hpd.h new file mode 100644 index 000000000000..5bc5bb64680f --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_hpd.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_HPD_H_ +#define _DP_HPD_H_ + +//#include <linux/usb/usbpd.h> + +#include <linux/types.h> +#include <linux/device.h> + +enum plug_orientation { + ORIENTATION_NONE, + ORIENTATION_CC1, + ORIENTATION_CC2, +}; + +/** + * struct dp_usbpd - DisplayPort status + * + * @orientation: plug orientation configuration + * @low_pow_st: low power state + * @adaptor_dp_en: adaptor functionality enabled + * @multi_func: multi-function preferred + * @usb_config_req: request to switch to usb + * @exit_dp_mode: request exit from displayport mode + * @hpd_high: Hot Plug Detect signal is high. + * @hpd_irq: Change in the status since last message + * @alt_mode_cfg_done: bool to specify alt mode status + * @debug_en: bool to specify debug mode + * @connect: simulate disconnect or connect for debug mode + */ +struct dp_usbpd { + enum plug_orientation orientation; + bool low_pow_st; + bool adaptor_dp_en; + bool multi_func; + bool usb_config_req; + bool exit_dp_mode; + bool hpd_high; + bool hpd_irq; + bool alt_mode_cfg_done; + bool debug_en; + + int (*connect)(struct dp_usbpd *dp_usbpd, bool hpd); +}; + +/** + * struct dp_usbpd_cb - callback functions provided by the client + * + * @configure: called by usbpd module when PD communication has + * been completed and the usb peripheral has been configured on + * dp mode. + * @disconnect: notify the cable disconnect issued by usb. + * @attention: notify any attention message issued by usb. + */ +struct dp_usbpd_cb { + int (*configure)(struct device *dev); + int (*disconnect)(struct device *dev); + int (*attention)(struct device *dev); +}; + +/** + * dp_hpd_get() - setup hpd module + * + * @dev: device instance of the caller + * @cb: struct containing callback function pointers. + * + * This function allows the client to initialize the usbpd + * module. The module will communicate with HPD module. + */ +struct dp_usbpd *dp_hpd_get(struct device *dev, struct dp_usbpd_cb *cb); + +int dp_hpd_register(struct dp_usbpd *dp_usbpd); +void dp_hpd_unregister(struct dp_usbpd *dp_usbpd); +int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd); + +#endif /* _DP_HPD_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c new file mode 100644 index 000000000000..c811da515fb3 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -0,0 +1,1210 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include <drm/drm_print.h> + +#include "dp_link.h" +#include "dp_panel.h" + +#define DP_TEST_REQUEST_MASK 0x7F + +enum audio_sample_rate { + AUDIO_SAMPLE_RATE_32_KHZ = 0x00, + AUDIO_SAMPLE_RATE_44_1_KHZ = 0x01, + AUDIO_SAMPLE_RATE_48_KHZ = 0x02, + AUDIO_SAMPLE_RATE_88_2_KHZ = 0x03, + AUDIO_SAMPLE_RATE_96_KHZ = 0x04, + AUDIO_SAMPLE_RATE_176_4_KHZ = 0x05, + AUDIO_SAMPLE_RATE_192_KHZ = 0x06, +}; + +enum audio_pattern_type { + AUDIO_TEST_PATTERN_OPERATOR_DEFINED = 0x00, + AUDIO_TEST_PATTERN_SAWTOOTH = 0x01, +}; + +struct dp_link_request { + u32 test_requested; + u32 test_link_rate; + u32 test_lane_count; +}; + +struct dp_link_private { + u32 prev_sink_count; + struct device *dev; + struct drm_dp_aux *aux; + struct dp_link dp_link; + + struct dp_link_request request; + struct mutex psm_mutex; + u8 link_status[DP_LINK_STATUS_SIZE]; +}; + +static int dp_aux_link_power_up(struct drm_dp_aux *aux, + struct dp_link_info *link) +{ + u8 value; + int err; + + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D0; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + usleep_range(1000, 2000); + + return 0; +} + +static int dp_aux_link_power_down(struct drm_dp_aux *aux, + struct dp_link_info *link) +{ + u8 value; + int err; + + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D3; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + return 0; +} + +static int dp_link_get_period(struct dp_link_private *link, int const addr) +{ + int ret = 0; + u8 data; + u32 const max_audio_period = 0xA; + + /* TEST_AUDIO_PERIOD_CH_XX */ + if (drm_dp_dpcd_readb(link->aux, addr, &data) < 0) { + DRM_ERROR("failed to read test_audio_period (0x%x)\n", addr); + ret = -EINVAL; + goto exit; + } + + /* Period - Bits 3:0 */ + data = data & 0xF; + if ((int)data > max_audio_period) { + DRM_ERROR("invalid test_audio_period_ch_1 = 0x%x\n", data); + ret = -EINVAL; + goto exit; + } + + ret = data; +exit: + return ret; +} + +static int dp_link_parse_audio_channel_period(struct dp_link_private *link) +{ + int ret = 0; + struct dp_link_test_audio *req = &link->dp_link.test_audio; + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_1 = ret; + DRM_DEBUG_DP("test_audio_period_ch_1 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_2 = ret; + DRM_DEBUG_DP("test_audio_period_ch_2 = 0x%x\n", ret); + + /* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */ + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_3 = ret; + DRM_DEBUG_DP("test_audio_period_ch_3 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_4 = ret; + DRM_DEBUG_DP("test_audio_period_ch_4 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_5 = ret; + DRM_DEBUG_DP("test_audio_period_ch_5 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_6 = ret; + DRM_DEBUG_DP("test_audio_period_ch_6 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_7 = ret; + DRM_DEBUG_DP("test_audio_period_ch_7 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_8 = ret; + DRM_DEBUG_DP("test_audio_period_ch_8 = 0x%x\n", ret); +exit: + return ret; +} + +static int dp_link_parse_audio_pattern_type(struct dp_link_private *link) +{ + int ret = 0; + u8 data; + ssize_t rlen; + int const max_audio_pattern_type = 0x1; + + rlen = drm_dp_dpcd_readb(link->aux, + DP_TEST_AUDIO_PATTERN_TYPE, &data); + if (rlen < 0) { + DRM_ERROR("failed to read link audio mode. rlen=%zd\n", rlen); + return rlen; + } + + /* Audio Pattern Type - Bits 7:0 */ + if ((int)data > max_audio_pattern_type) { + DRM_ERROR("invalid audio pattern type = 0x%x\n", data); + ret = -EINVAL; + goto exit; + } + + link->dp_link.test_audio.test_audio_pattern_type = data; + DRM_DEBUG_DP("audio pattern type = 0x%x\n", data); +exit: + return ret; +} + +static int dp_link_parse_audio_mode(struct dp_link_private *link) +{ + int ret = 0; + u8 data; + ssize_t rlen; + int const max_audio_sampling_rate = 0x6; + int const max_audio_channel_count = 0x8; + int sampling_rate = 0x0; + int channel_count = 0x0; + + rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_AUDIO_MODE, &data); + if (rlen < 0) { + DRM_ERROR("failed to read link audio mode. rlen=%zd\n", rlen); + return rlen; + } + + /* Sampling Rate - Bits 3:0 */ + sampling_rate = data & 0xF; + if (sampling_rate > max_audio_sampling_rate) { + DRM_ERROR("sampling rate (0x%x) greater than max (0x%x)\n", + sampling_rate, max_audio_sampling_rate); + ret = -EINVAL; + goto exit; + } + + /* Channel Count - Bits 7:4 */ + channel_count = ((data & 0xF0) >> 4) + 1; + if (channel_count > max_audio_channel_count) { + DRM_ERROR("channel_count (0x%x) greater than max (0x%x)\n", + channel_count, max_audio_channel_count); + ret = -EINVAL; + goto exit; + } + + link->dp_link.test_audio.test_audio_sampling_rate = sampling_rate; + link->dp_link.test_audio.test_audio_channel_count = channel_count; + DRM_DEBUG_DP("sampling_rate = 0x%x, channel_count = 0x%x\n", + sampling_rate, channel_count); +exit: + return ret; +} + +static int dp_link_parse_audio_pattern_params(struct dp_link_private *link) +{ + int ret = 0; + + ret = dp_link_parse_audio_mode(link); + if (ret) + goto exit; + + ret = dp_link_parse_audio_pattern_type(link); + if (ret) + goto exit; + + ret = dp_link_parse_audio_channel_period(link); + +exit: + return ret; +} + +static bool dp_link_is_video_pattern_valid(u32 pattern) +{ + switch (pattern) { + case DP_NO_TEST_PATTERN: + case DP_COLOR_RAMP: + case DP_BLACK_AND_WHITE_VERTICAL_LINES: + case DP_COLOR_SQUARE: + return true; + default: + return false; + } +} + +/** + * dp_link_is_bit_depth_valid() - validates the bit depth requested + * @tbd: bit depth requested by the sink + * + * Returns true if the requested bit depth is supported. + */ +static bool dp_link_is_bit_depth_valid(u32 tbd) +{ + /* DP_TEST_VIDEO_PATTERN_NONE is treated as invalid */ + switch (tbd) { + case DP_TEST_BIT_DEPTH_6: + case DP_TEST_BIT_DEPTH_8: + case DP_TEST_BIT_DEPTH_10: + return true; + default: + return false; + } +} + +static int dp_link_parse_timing_params1(struct dp_link_private *link, + int addr, int len, u32 *val) +{ + u8 bp[2]; + int rlen; + + if (len != 2) + return -EINVAL; + + /* Read the requested video link pattern (Byte 0x221). */ + rlen = drm_dp_dpcd_read(link->aux, addr, bp, len); + if (rlen < len) { + DRM_ERROR("failed to read 0x%x\n", addr); + return -EINVAL; + } + + *val = bp[1] | (bp[0] << 8); + + return 0; +} + +static int dp_link_parse_timing_params2(struct dp_link_private *link, + int addr, int len, + u32 *val1, u32 *val2) +{ + u8 bp[2]; + int rlen; + + if (len != 2) + return -EINVAL; + + /* Read the requested video link pattern (Byte 0x221). */ + rlen = drm_dp_dpcd_read(link->aux, addr, bp, len); + if (rlen < len) { + DRM_ERROR("failed to read 0x%x\n", addr); + return -EINVAL; + } + + *val1 = (bp[0] & BIT(7)) >> 7; + *val2 = bp[1] | ((bp[0] & 0x7F) << 8); + + return 0; +} + +static int dp_link_parse_timing_params3(struct dp_link_private *link, + int addr, u32 *val) +{ + u8 bp; + u32 len = 1; + int rlen; + + rlen = drm_dp_dpcd_read(link->aux, addr, &bp, len); + if (rlen < 1) { + DRM_ERROR("failed to read 0x%x\n", addr); + return -EINVAL; + } + *val = bp; + + return 0; +} + +/** + * dp_parse_video_pattern_params() - parses video pattern parameters from DPCD + * @link: Display Port Driver data + * + * Returns 0 if it successfully parses the video link pattern and the link + * bit depth requested by the sink and, and if the values parsed are valid. + */ +static int dp_link_parse_video_pattern_params(struct dp_link_private *link) +{ + int ret = 0; + ssize_t rlen; + u8 bp; + + rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_PATTERN, &bp); + if (rlen < 0) { + DRM_ERROR("failed to read link video pattern. rlen=%zd\n", + rlen); + return rlen; + } + + if (!dp_link_is_video_pattern_valid(bp)) { + DRM_ERROR("invalid link video pattern = 0x%x\n", bp); + ret = -EINVAL; + return ret; + } + + link->dp_link.test_video.test_video_pattern = bp; + + /* Read the requested color bit depth and dynamic range (Byte 0x232) */ + rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_MISC0, &bp); + if (rlen < 0) { + DRM_ERROR("failed to read link bit depth. rlen=%zd\n", rlen); + return rlen; + } + + /* Dynamic Range */ + link->dp_link.test_video.test_dyn_range = + (bp & DP_TEST_DYNAMIC_RANGE_CEA); + + /* Color bit depth */ + bp &= DP_TEST_BIT_DEPTH_MASK; + if (!dp_link_is_bit_depth_valid(bp)) { + DRM_ERROR("invalid link bit depth = 0x%x\n", bp); + ret = -EINVAL; + return ret; + } + + link->dp_link.test_video.test_bit_depth = bp; + + /* resolution timing params */ + ret = dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2, + &link->dp_link.test_video.test_h_total); + if (ret) { + DRM_ERROR("failed to parse test_htotal(DP_TEST_H_TOTAL_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2, + &link->dp_link.test_video.test_v_total); + if (ret) { + DRM_ERROR("failed to parse test_v_total(DP_TEST_V_TOTAL_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2, + &link->dp_link.test_video.test_h_start); + if (ret) { + DRM_ERROR("failed to parse test_h_start(DP_TEST_H_START_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2, + &link->dp_link.test_video.test_v_start); + if (ret) { + DRM_ERROR("failed to parse test_v_start(DP_TEST_V_START_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2, + &link->dp_link.test_video.test_hsync_pol, + &link->dp_link.test_video.test_hsync_width); + if (ret) { + DRM_ERROR("failed to parse (DP_TEST_HSYNC_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2, + &link->dp_link.test_video.test_vsync_pol, + &link->dp_link.test_video.test_vsync_width); + if (ret) { + DRM_ERROR("failed to parse (DP_TEST_VSYNC_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2, + &link->dp_link.test_video.test_h_width); + if (ret) { + DRM_ERROR("failed to parse test_h_width(DP_TEST_H_WIDTH_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2, + &link->dp_link.test_video.test_v_height); + if (ret) { + DRM_ERROR("failed to parse test_v_height\n"); + return ret; + } + + ret = dp_link_parse_timing_params3(link, DP_TEST_MISC1, + &link->dp_link.test_video.test_rr_d); + link->dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR; + if (ret) { + DRM_ERROR("failed to parse test_rr_d (DP_TEST_MISC1)\n"); + return ret; + } + + ret = dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR, + &link->dp_link.test_video.test_rr_n); + if (ret) { + DRM_ERROR("failed to parse test_rr_n\n"); + return ret; + } + + DRM_DEBUG_DP("link video pattern = 0x%x\n" + "link dynamic range = 0x%x\n" + "link bit depth = 0x%x\n" + "TEST_H_TOTAL = %d, TEST_V_TOTAL = %d\n" + "TEST_H_START = %d, TEST_V_START = %d\n" + "TEST_HSYNC_POL = %d\n" + "TEST_HSYNC_WIDTH = %d\n" + "TEST_VSYNC_POL = %d\n" + "TEST_VSYNC_WIDTH = %d\n" + "TEST_H_WIDTH = %d\n" + "TEST_V_HEIGHT = %d\n" + "TEST_REFRESH_DENOMINATOR = %d\n" + "TEST_REFRESH_NUMERATOR = %d\n", + link->dp_link.test_video.test_video_pattern, + link->dp_link.test_video.test_dyn_range, + link->dp_link.test_video.test_bit_depth, + link->dp_link.test_video.test_h_total, + link->dp_link.test_video.test_v_total, + link->dp_link.test_video.test_h_start, + link->dp_link.test_video.test_v_start, + link->dp_link.test_video.test_hsync_pol, + link->dp_link.test_video.test_hsync_width, + link->dp_link.test_video.test_vsync_pol, + link->dp_link.test_video.test_vsync_width, + link->dp_link.test_video.test_h_width, + link->dp_link.test_video.test_v_height, + link->dp_link.test_video.test_rr_d, + link->dp_link.test_video.test_rr_n); + + return ret; +} + +/** + * dp_link_parse_link_training_params() - parses link training parameters from + * DPCD + * @link: Display Port Driver data + * + * Returns 0 if it successfully parses the link rate (Byte 0x219) and lane + * count (Byte 0x220), and if these values parse are valid. + */ +static int dp_link_parse_link_training_params(struct dp_link_private *link) +{ + u8 bp; + ssize_t rlen; + + rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_LINK_RATE, &bp); + if (rlen < 0) { + DRM_ERROR("failed to read link rate. rlen=%zd\n", rlen); + return rlen; + } + + if (!is_link_rate_valid(bp)) { + DRM_ERROR("invalid link rate = 0x%x\n", bp); + return -EINVAL; + } + + link->request.test_link_rate = bp; + DRM_DEBUG_DP("link rate = 0x%x\n", link->request.test_link_rate); + + rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_LANE_COUNT, &bp); + if (rlen < 0) { + DRM_ERROR("failed to read lane count. rlen=%zd\n", rlen); + return rlen; + } + bp &= DP_MAX_LANE_COUNT_MASK; + + if (!is_lane_count_valid(bp)) { + DRM_ERROR("invalid lane count = 0x%x\n", bp); + return -EINVAL; + } + + link->request.test_lane_count = bp; + DRM_DEBUG_DP("lane count = 0x%x\n", link->request.test_lane_count); + return 0; +} + +/** + * dp_parse_phy_test_params() - parses the phy link parameters + * @link: Display Port Driver data + * + * Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being + * requested. + */ +static int dp_link_parse_phy_test_params(struct dp_link_private *link) +{ + u8 data; + ssize_t rlen; + + rlen = drm_dp_dpcd_readb(link->aux, DP_PHY_TEST_PATTERN, + &data); + if (rlen < 0) { + DRM_ERROR("failed to read phy link pattern. rlen=%zd\n", rlen); + return rlen; + } + + link->dp_link.phy_params.phy_test_pattern_sel = data & 0x07; + + DRM_DEBUG_DP("phy_test_pattern_sel = 0x%x\n", data); + + switch (data) { + case DP_PHY_TEST_PATTERN_SEL_MASK: + case DP_PHY_TEST_PATTERN_NONE: + case DP_PHY_TEST_PATTERN_D10_2: + case DP_PHY_TEST_PATTERN_ERROR_COUNT: + case DP_PHY_TEST_PATTERN_PRBS7: + case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: + case DP_PHY_TEST_PATTERN_CP2520: + return 0; + default: + return -EINVAL; + } +} + +/** + * dp_link_is_video_audio_test_requested() - checks for audio/video link request + * @link: link requested by the sink + * + * Returns true if the requested link is a permitted audio/video link. + */ +static bool dp_link_is_video_audio_test_requested(u32 link) +{ + u8 video_audio_test = (DP_TEST_LINK_VIDEO_PATTERN | + DP_TEST_LINK_AUDIO_PATTERN | + DP_TEST_LINK_AUDIO_DISABLED_VIDEO); + + return ((link & video_audio_test) && + !(link & ~video_audio_test)); +} + +/** + * dp_link_parse_request() - parses link request parameters from sink + * @link: Display Port Driver data + * + * Parses the DPCD to check if an automated link is requested (Byte 0x201), + * and what type of link automation is being requested (Byte 0x218). + */ +static int dp_link_parse_request(struct dp_link_private *link) +{ + int ret = 0; + u8 data; + ssize_t rlen; + + /** + * Read the device service IRQ vector (Byte 0x201) to determine + * whether an automated link has been requested by the sink. + */ + rlen = drm_dp_dpcd_readb(link->aux, + DP_DEVICE_SERVICE_IRQ_VECTOR, &data); + if (rlen < 0) { + DRM_ERROR("aux read failed. rlen=%zd\n", rlen); + return rlen; + } + + DRM_DEBUG_DP("device service irq vector = 0x%x\n", data); + + if (!(data & DP_AUTOMATED_TEST_REQUEST)) { + DRM_DEBUG_DP("no test requested\n"); + return 0; + } + + /** + * Read the link request byte (Byte 0x218) to determine what type + * of automated link has been requested by the sink. + */ + rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_REQUEST, &data); + if (rlen < 0) { + DRM_ERROR("aux read failed. rlen=%zd\n", rlen); + return rlen; + } + + if (!data || (data == DP_TEST_LINK_FAUX_PATTERN)) { + DRM_DEBUG_DP("link 0x%x not supported\n", data); + goto end; + } + + DRM_DEBUG_DP("Test:(0x%x) requested\n", data); + link->request.test_requested = data; + if (link->request.test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) { + ret = dp_link_parse_phy_test_params(link); + if (ret) + goto end; + ret = dp_link_parse_link_training_params(link); + if (ret) + goto end; + } + + if (link->request.test_requested == DP_TEST_LINK_TRAINING) { + ret = dp_link_parse_link_training_params(link); + if (ret) + goto end; + } + + if (dp_link_is_video_audio_test_requested( + link->request.test_requested)) { + ret = dp_link_parse_video_pattern_params(link); + if (ret) + goto end; + + ret = dp_link_parse_audio_pattern_params(link); + } +end: + /* + * Send a DP_TEST_ACK if all link parameters are valid, otherwise send + * a DP_TEST_NAK. + */ + if (ret) { + link->dp_link.test_response = DP_TEST_NAK; + } else { + if (link->request.test_requested != DP_TEST_LINK_EDID_READ) + link->dp_link.test_response = DP_TEST_ACK; + else + link->dp_link.test_response = + DP_TEST_EDID_CHECKSUM_WRITE; + } + + return ret; +} + +/** + * dp_link_parse_sink_count() - parses the sink count + * @dp_link: pointer to link module data + * + * Parses the DPCD to check if there is an update to the sink count + * (Byte 0x200), and whether all the sink devices connected have Content + * Protection enabled. + */ +static int dp_link_parse_sink_count(struct dp_link *dp_link) +{ + ssize_t rlen; + bool cp_ready; + + struct dp_link_private *link = container_of(dp_link, + struct dp_link_private, dp_link); + + rlen = drm_dp_dpcd_readb(link->aux, DP_SINK_COUNT, + &link->dp_link.sink_count); + if (rlen < 0) { + DRM_ERROR("sink count read failed. rlen=%zd\n", rlen); + return rlen; + } + + cp_ready = link->dp_link.sink_count & DP_SINK_CP_READY; + + link->dp_link.sink_count = + DP_GET_SINK_COUNT(link->dp_link.sink_count); + + DRM_DEBUG_DP("sink_count = 0x%x, cp_ready = 0x%x\n", + link->dp_link.sink_count, cp_ready); + return 0; +} + +static void dp_link_parse_sink_status_field(struct dp_link_private *link) +{ + int len = 0; + + link->prev_sink_count = link->dp_link.sink_count; + dp_link_parse_sink_count(&link->dp_link); + + len = drm_dp_dpcd_read_link_status(link->aux, + link->link_status); + if (len < DP_LINK_STATUS_SIZE) + DRM_ERROR("DP link status read failed\n"); + dp_link_parse_request(link); +} + +/** + * dp_link_process_link_training_request() - processes new training requests + * @link: Display Port link data + * + * This function will handle new link training requests that are initiated by + * the sink. In particular, it will update the requested lane count and link + * rate, and then trigger the link retraining procedure. + * + * The function will return 0 if a link training request has been processed, + * otherwise it will return -EINVAL. + */ +static int dp_link_process_link_training_request(struct dp_link_private *link) +{ + if (link->request.test_requested != DP_TEST_LINK_TRAINING) + return -EINVAL; + + DRM_DEBUG_DP("Test:0x%x link rate = 0x%x, lane count = 0x%x\n", + DP_TEST_LINK_TRAINING, + link->request.test_link_rate, + link->request.test_lane_count); + + link->dp_link.link_params.num_lanes = link->request.test_lane_count; + link->dp_link.link_params.rate = link->request.test_link_rate; + + return 0; +} + +bool dp_link_send_test_response(struct dp_link *dp_link) +{ + struct dp_link_private *link = NULL; + int ret = 0; + + if (!dp_link) { + DRM_ERROR("invalid input\n"); + return false; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_RESPONSE, + dp_link->test_response); + + return ret == 1; +} + +int dp_link_psm_config(struct dp_link *dp_link, + struct dp_link_info *link_info, bool enable) +{ + struct dp_link_private *link = NULL; + int ret = 0; + + if (!dp_link) { + DRM_ERROR("invalid params\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + mutex_lock(&link->psm_mutex); + if (enable) + ret = dp_aux_link_power_down(link->aux, link_info); + else + ret = dp_aux_link_power_up(link->aux, link_info); + + if (ret) + DRM_ERROR("Failed to %s low power mode\n", enable ? + "enter" : "exit"); + else + dp_link->psm_enabled = enable; + + mutex_unlock(&link->psm_mutex); + return ret; +} + +bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum) +{ + struct dp_link_private *link = NULL; + int ret = 0; + + if (!dp_link) { + DRM_ERROR("invalid input\n"); + return false; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_EDID_CHECKSUM, + checksum); + return ret == 1; +} + +static int dp_link_parse_vx_px(struct dp_link_private *link) +{ + int ret = 0; + + DRM_DEBUG_DP("vx: 0=%d, 1=%d, 2=%d, 3=%d\n", + drm_dp_get_adjust_request_voltage(link->link_status, 0), + drm_dp_get_adjust_request_voltage(link->link_status, 1), + drm_dp_get_adjust_request_voltage(link->link_status, 2), + drm_dp_get_adjust_request_voltage(link->link_status, 3)); + + DRM_DEBUG_DP("px: 0=%d, 1=%d, 2=%d, 3=%d\n", + drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0), + drm_dp_get_adjust_request_pre_emphasis(link->link_status, 1), + drm_dp_get_adjust_request_pre_emphasis(link->link_status, 2), + drm_dp_get_adjust_request_pre_emphasis(link->link_status, 3)); + + /** + * Update the voltage and pre-emphasis levels as per DPCD request + * vector. + */ + DRM_DEBUG_DP("Current: v_level = 0x%x, p_level = 0x%x\n", + link->dp_link.phy_params.v_level, + link->dp_link.phy_params.p_level); + link->dp_link.phy_params.v_level = + drm_dp_get_adjust_request_voltage(link->link_status, 0); + link->dp_link.phy_params.p_level = + drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0); + DRM_DEBUG_DP("Requested: v_level = 0x%x, p_level = 0x%x\n", + link->dp_link.phy_params.v_level, + link->dp_link.phy_params.p_level); + + return ret; +} + +/** + * dp_link_process_phy_test_pattern_request() - process new phy link requests + * @link: Display Port Driver data + * + * This function will handle new phy link pattern requests that are initiated + * by the sink. The function will return 0 if a phy link pattern has been + * processed, otherwise it will return -EINVAL. + */ +static int dp_link_process_phy_test_pattern_request( + struct dp_link_private *link) +{ + int ret = 0; + + if (!(link->request.test_requested & DP_TEST_LINK_PHY_TEST_PATTERN)) { + DRM_DEBUG_DP("no phy test\n"); + return -EINVAL; + } + + if (!is_link_rate_valid(link->request.test_link_rate) || + !is_lane_count_valid(link->request.test_lane_count)) { + DRM_ERROR("Invalid: link rate = 0x%x,lane count = 0x%x\n", + link->request.test_link_rate, + link->request.test_lane_count); + return -EINVAL; + } + + DRM_DEBUG_DP("Current: rate = 0x%x, lane count = 0x%x\n", + link->dp_link.link_params.rate, + link->dp_link.link_params.num_lanes); + + DRM_DEBUG_DP("Requested: rate = 0x%x, lane count = 0x%x\n", + link->request.test_link_rate, + link->request.test_lane_count); + + link->dp_link.link_params.num_lanes = link->request.test_lane_count; + link->dp_link.link_params.rate = link->request.test_link_rate; + + ret = dp_link_parse_vx_px(link); + + if (ret) + DRM_ERROR("parse_vx_px failed. ret=%d\n", ret); + + return ret; +} + +static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r) +{ + return link_status[r - DP_LANE0_1_STATUS]; +} + +/** + * dp_link_process_link_status_update() - processes link status updates + * @link: Display Port link module data + * + * This function will check for changes in the link status, e.g. clock + * recovery done on all lanes, and trigger link training if there is a + * failure/error on the link. + * + * The function will return 0 if the a link status update has been processed, + * otherwise it will return -EINVAL. + */ +static int dp_link_process_link_status_update(struct dp_link_private *link) +{ + if (!(get_link_status(link->link_status, + DP_LANE_ALIGN_STATUS_UPDATED) & + DP_LINK_STATUS_UPDATED) || + (drm_dp_clock_recovery_ok(link->link_status, + link->dp_link.link_params.num_lanes) && + drm_dp_channel_eq_ok(link->link_status, + link->dp_link.link_params.num_lanes))) + return -EINVAL; + + DRM_DEBUG_DP("channel_eq_done = %d, clock_recovery_done = %d\n", + drm_dp_clock_recovery_ok(link->link_status, + link->dp_link.link_params.num_lanes), + drm_dp_clock_recovery_ok(link->link_status, + link->dp_link.link_params.num_lanes)); + + return 0; +} + +/** + * dp_link_process_downstream_port_status_change() - process port status changes + * @link: Display Port Driver data + * + * This function will handle downstream port updates that are initiated by + * the sink. If the downstream port status has changed, the EDID is read via + * AUX. + * + * The function will return 0 if a downstream port update has been + * processed, otherwise it will return -EINVAL. + */ +static int dp_link_process_ds_port_status_change(struct dp_link_private *link) +{ + if (get_link_status(link->link_status, DP_LANE_ALIGN_STATUS_UPDATED) & + DP_DOWNSTREAM_PORT_STATUS_CHANGED) + goto reset; + + if (link->prev_sink_count == link->dp_link.sink_count) + return -EINVAL; + +reset: + /* reset prev_sink_count */ + link->prev_sink_count = link->dp_link.sink_count; + + return 0; +} + +static bool dp_link_is_video_pattern_requested(struct dp_link_private *link) +{ + return (link->request.test_requested & DP_TEST_LINK_VIDEO_PATTERN) + && !(link->request.test_requested & + DP_TEST_LINK_AUDIO_DISABLED_VIDEO); +} + +static bool dp_link_is_audio_pattern_requested(struct dp_link_private *link) +{ + return (link->request.test_requested & DP_TEST_LINK_AUDIO_PATTERN); +} + +static void dp_link_reset_data(struct dp_link_private *link) +{ + link->request = (const struct dp_link_request){ 0 }; + link->dp_link.test_video = (const struct dp_link_test_video){ 0 }; + link->dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN; + link->dp_link.test_audio = (const struct dp_link_test_audio){ 0 }; + link->dp_link.phy_params.phy_test_pattern_sel = 0; + link->dp_link.sink_request = 0; + link->dp_link.test_response = 0; +} + +/** + * dp_link_process_request() - handle HPD IRQ transition to HIGH + * @dp_link: pointer to link module data + * + * This function will handle the HPD IRQ state transitions from LOW to HIGH + * (including cases when there are back to back HPD IRQ HIGH) indicating + * the start of a new link training request or sink status update. + */ +int dp_link_process_request(struct dp_link *dp_link) +{ + int ret = 0; + struct dp_link_private *link; + + if (!dp_link) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + dp_link_reset_data(link); + + dp_link_parse_sink_status_field(link); + + if (link->request.test_requested == DP_TEST_LINK_EDID_READ) { + dp_link->sink_request |= DP_TEST_LINK_EDID_READ; + return ret; + } + + ret = dp_link_process_ds_port_status_change(link); + if (!ret) { + dp_link->sink_request |= DS_PORT_STATUS_CHANGED; + return ret; + } + + ret = dp_link_process_link_training_request(link); + if (!ret) { + dp_link->sink_request |= DP_TEST_LINK_TRAINING; + return ret; + } + + ret = dp_link_process_phy_test_pattern_request(link); + if (!ret) { + dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN; + return ret; + } + + ret = dp_link_process_link_status_update(link); + if (!ret) { + dp_link->sink_request |= DP_LINK_STATUS_UPDATED; + return ret; + } + + if (dp_link_is_video_pattern_requested(link)) { + ret = 0; + dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN; + } + + if (dp_link_is_audio_pattern_requested(link)) { + dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN; + return -EINVAL; + } + + return ret; +} + +int dp_link_get_colorimetry_config(struct dp_link *dp_link) +{ + u32 cc; + struct dp_link_private *link; + + if (!dp_link) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + /* + * Unless a video pattern CTS test is ongoing, use RGB_VESA + * Only RGB_VESA and RGB_CEA supported for now + */ + if (dp_link_is_video_pattern_requested(link)) + cc = link->dp_link.test_video.test_dyn_range; + else + cc = DP_TEST_DYNAMIC_RANGE_VESA; + + return cc; +} + +int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status) +{ + int i; + int v_max = 0, p_max = 0; + + if (!dp_link) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + /* use the max level across lanes */ + for (i = 0; i < dp_link->link_params.num_lanes; i++) { + u8 data_v = drm_dp_get_adjust_request_voltage(link_status, i); + u8 data_p = drm_dp_get_adjust_request_pre_emphasis(link_status, + i); + DRM_DEBUG_DP("lane=%d req_vol_swing=%d req_pre_emphasis=%d\n", + i, data_v, data_p); + if (v_max < data_v) + v_max = data_v; + if (p_max < data_p) + p_max = data_p; + } + + dp_link->phy_params.v_level = v_max >> DP_TRAIN_VOLTAGE_SWING_SHIFT; + dp_link->phy_params.p_level = p_max >> DP_TRAIN_PRE_EMPHASIS_SHIFT; + + /** + * Adjust the voltage swing and pre-emphasis level combination to within + * the allowable range. + */ + if (dp_link->phy_params.v_level > DP_TRAIN_VOLTAGE_SWING_MAX) { + DRM_DEBUG_DP("Requested vSwingLevel=%d, change to %d\n", + dp_link->phy_params.v_level, + DP_TRAIN_VOLTAGE_SWING_MAX); + dp_link->phy_params.v_level = DP_TRAIN_VOLTAGE_SWING_MAX; + } + + if (dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_MAX) { + DRM_DEBUG_DP("Requested preEmphasisLevel=%d, change to %d\n", + dp_link->phy_params.p_level, + DP_TRAIN_PRE_EMPHASIS_MAX); + dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_MAX; + } + + if ((dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_LVL_1) + && (dp_link->phy_params.v_level == + DP_TRAIN_VOLTAGE_SWING_LVL_2)) { + DRM_DEBUG_DP("Requested preEmphasisLevel=%d, change to %d\n", + dp_link->phy_params.p_level, + DP_TRAIN_PRE_EMPHASIS_LVL_1); + dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_LVL_1; + } + + DRM_DEBUG_DP("adjusted: v_level=%d, p_level=%d\n", + dp_link->phy_params.v_level, dp_link->phy_params.p_level); + + return 0; +} + +u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp) +{ + u32 tbd; + + /* + * Few simplistic rules and assumptions made here: + * 1. Test bit depth is bit depth per color component + * 2. Assume 3 color components + */ + switch (bpp) { + case 18: + tbd = DP_TEST_BIT_DEPTH_6; + break; + case 24: + tbd = DP_TEST_BIT_DEPTH_8; + break; + case 30: + tbd = DP_TEST_BIT_DEPTH_10; + break; + default: + tbd = DP_TEST_BIT_DEPTH_UNKNOWN; + break; + } + + if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN) + tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT); + + return tbd; +} + +struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux) +{ + struct dp_link_private *link; + struct dp_link *dp_link; + + if (!dev || !aux) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + link = devm_kzalloc(dev, sizeof(*link), GFP_KERNEL); + if (!link) + return ERR_PTR(-ENOMEM); + + link->dev = dev; + link->aux = aux; + + mutex_init(&link->psm_mutex); + dp_link = &link->dp_link; + + return dp_link; +} diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h new file mode 100644 index 000000000000..49811b6221e5 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_link.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_LINK_H_ +#define _DP_LINK_H_ + +#include "dp_aux.h" + +#define DS_PORT_STATUS_CHANGED 0x200 +#define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF +#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0) + +struct dp_link_info { + unsigned char revision; + unsigned int rate; + unsigned int num_lanes; + unsigned long capabilities; +}; + +enum dp_link_voltage_level { + DP_TRAIN_VOLTAGE_SWING_LVL_0 = 0, + DP_TRAIN_VOLTAGE_SWING_LVL_1 = 1, + DP_TRAIN_VOLTAGE_SWING_LVL_2 = 2, + DP_TRAIN_VOLTAGE_SWING_MAX = DP_TRAIN_VOLTAGE_SWING_LVL_2, +}; + +enum dp_link_preemaphasis_level { + DP_TRAIN_PRE_EMPHASIS_LVL_0 = 0, + DP_TRAIN_PRE_EMPHASIS_LVL_1 = 1, + DP_TRAIN_PRE_EMPHASIS_LVL_2 = 2, + DP_TRAIN_PRE_EMPHASIS_MAX = DP_TRAIN_PRE_EMPHASIS_LVL_2, +}; + +struct dp_link_test_video { + u32 test_video_pattern; + u32 test_bit_depth; + u32 test_dyn_range; + u32 test_h_total; + u32 test_v_total; + u32 test_h_start; + u32 test_v_start; + u32 test_hsync_pol; + u32 test_hsync_width; + u32 test_vsync_pol; + u32 test_vsync_width; + u32 test_h_width; + u32 test_v_height; + u32 test_rr_d; + u32 test_rr_n; +}; + +struct dp_link_test_audio { + u32 test_audio_sampling_rate; + u32 test_audio_channel_count; + u32 test_audio_pattern_type; + u32 test_audio_period_ch_1; + u32 test_audio_period_ch_2; + u32 test_audio_period_ch_3; + u32 test_audio_period_ch_4; + u32 test_audio_period_ch_5; + u32 test_audio_period_ch_6; + u32 test_audio_period_ch_7; + u32 test_audio_period_ch_8; +}; + +struct dp_link_phy_params { + u32 phy_test_pattern_sel; + u8 v_level; + u8 p_level; +}; + +struct dp_link { + u32 sink_request; + u32 test_response; + bool psm_enabled; + + u8 sink_count; + struct dp_link_test_video test_video; + struct dp_link_test_audio test_audio; + struct dp_link_phy_params phy_params; + struct dp_link_info link_params; +}; + +/** + * mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp + * @tbd: test bit depth + * + * Returns the bits per pixel (bpp) to be used corresponding to the + * git bit depth value. This function assumes that bit depth has + * already been validated. + */ +static inline u32 dp_link_bit_depth_to_bpp(u32 tbd) +{ + /* + * Few simplistic rules and assumptions made here: + * 1. Bit depth is per color component + * 2. If bit depth is unknown return 0 + * 3. Assume 3 color components + */ + switch (tbd) { + case DP_TEST_BIT_DEPTH_6: + return 18; + case DP_TEST_BIT_DEPTH_8: + return 24; + case DP_TEST_BIT_DEPTH_10: + return 30; + case DP_TEST_BIT_DEPTH_UNKNOWN: + default: + return 0; + } +} + +/** + * dp_test_bit_depth_to_bpc() - convert test bit depth to bpc + * @tbd: test bit depth + * + * Returns the bits per comp (bpc) to be used corresponding to the + * bit depth value. This function assumes that bit depth has + * already been validated. + */ +static inline u32 dp_link_bit_depth_to_bpc(u32 tbd) +{ + switch (tbd) { + case DP_TEST_BIT_DEPTH_6: + return 6; + case DP_TEST_BIT_DEPTH_8: + return 8; + case DP_TEST_BIT_DEPTH_10: + return 10; + case DP_TEST_BIT_DEPTH_UNKNOWN: + default: + return 0; + } +} + +u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp); +int dp_link_process_request(struct dp_link *dp_link); +int dp_link_get_colorimetry_config(struct dp_link *dp_link); +int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status); +bool dp_link_send_test_response(struct dp_link *dp_link); +int dp_link_psm_config(struct dp_link *dp_link, + struct dp_link_info *link_info, bool enable); +bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum); + +/** + * dp_link_get() - get the functionalities of dp test module + * + * + * return: a pointer to dp_link struct + */ +struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux); + +#endif /* _DP_LINK_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c new file mode 100644 index 000000000000..18cec4fc5e0b --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -0,0 +1,463 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#include "dp_panel.h" + +#include <drm/drm_connector.h> +#include <drm/drm_edid.h> +#include <drm/drm_print.h> + +struct dp_panel_private { + struct device *dev; + struct dp_panel dp_panel; + struct drm_dp_aux *aux; + struct dp_link *link; + struct dp_catalog *catalog; + bool panel_on; + bool aux_cfg_update_done; +}; + +static int dp_panel_read_dpcd(struct dp_panel *dp_panel) +{ + int rc = 0; + size_t len; + ssize_t rlen; + struct dp_panel_private *panel; + struct dp_link_info *link_info; + u8 *dpcd, major = 0, minor = 0, temp; + u32 offset = DP_DPCD_REV; + + dpcd = dp_panel->dpcd; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + link_info = &dp_panel->link_info; + + rlen = drm_dp_dpcd_read(panel->aux, offset, + dpcd, (DP_RECEIVER_CAP_SIZE + 1)); + if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) { + DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen); + if (rlen == -ETIMEDOUT) + rc = rlen; + else + rc = -EINVAL; + + goto end; + } + + temp = dpcd[DP_TRAINING_AUX_RD_INTERVAL]; + + /* check for EXTENDED_RECEIVER_CAPABILITY_FIELD_PRESENT */ + if (temp & BIT(7)) { + DRM_DEBUG_DP("using EXTENDED_RECEIVER_CAPABILITY_FIELD\n"); + offset = DPRX_EXTENDED_DPCD_FIELD; + } + + rlen = drm_dp_dpcd_read(panel->aux, offset, + dpcd, (DP_RECEIVER_CAP_SIZE + 1)); + if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) { + DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen); + if (rlen == -ETIMEDOUT) + rc = rlen; + else + rc = -EINVAL; + + goto end; + } + + link_info->revision = dpcd[DP_DPCD_REV]; + major = (link_info->revision >> 4) & 0x0f; + minor = link_info->revision & 0x0f; + + link_info->rate = drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]); + link_info->num_lanes = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; + + if (link_info->num_lanes > dp_panel->max_dp_lanes) + link_info->num_lanes = dp_panel->max_dp_lanes; + + /* Limit support upto HBR2 until HBR3 support is added */ + if (link_info->rate >= (drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4))) + link_info->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4); + + DRM_DEBUG_DP("version: %d.%d\n", major, minor); + DRM_DEBUG_DP("link_rate=%d\n", link_info->rate); + DRM_DEBUG_DP("lane_count=%d\n", link_info->num_lanes); + + if (drm_dp_enhanced_frame_cap(dpcd)) + link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; + + dp_panel->dfp_present = dpcd[DP_DOWNSTREAMPORT_PRESENT]; + dp_panel->dfp_present &= DP_DWN_STRM_PORT_PRESENT; + + if (dp_panel->dfp_present && (dpcd[DP_DPCD_REV] > 0x10)) { + dp_panel->ds_port_cnt = dpcd[DP_DOWN_STREAM_PORT_COUNT]; + dp_panel->ds_port_cnt &= DP_PORT_COUNT_MASK; + len = DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE; + + rlen = drm_dp_dpcd_read(panel->aux, + DP_DOWNSTREAM_PORT_0, dp_panel->ds_cap_info, len); + if (rlen < len) { + DRM_ERROR("ds port status failed, rlen=%zd\n", rlen); + rc = -EINVAL; + goto end; + } + } + +end: + return rc; +} + +static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel, + u32 mode_edid_bpp, u32 mode_pclk_khz) +{ + struct dp_link_info *link_info; + const u32 max_supported_bpp = 30, min_supported_bpp = 18; + u32 bpp = 0, data_rate_khz = 0; + + bpp = min_t(u32, mode_edid_bpp, max_supported_bpp); + + link_info = &dp_panel->link_info; + data_rate_khz = link_info->num_lanes * link_info->rate * 8; + + while (bpp > min_supported_bpp) { + if (mode_pclk_khz * bpp <= data_rate_khz) + break; + bpp -= 6; + } + + return bpp; +} + +static int dp_panel_update_modes(struct drm_connector *connector, + struct edid *edid) +{ + int rc = 0; + + if (edid) { + rc = drm_connector_update_edid_property(connector, edid); + if (rc) { + DRM_ERROR("failed to update edid property %d\n", rc); + return rc; + } + rc = drm_add_edid_modes(connector, edid); + DRM_DEBUG_DP("%s -", __func__); + return rc; + } + + rc = drm_connector_update_edid_property(connector, NULL); + if (rc) + DRM_ERROR("failed to update edid property %d\n", rc); + + return rc; +} + +int dp_panel_read_sink_caps(struct dp_panel *dp_panel, + struct drm_connector *connector) +{ + int rc = 0, bw_code; + int rlen, count; + struct dp_panel_private *panel; + + if (!dp_panel || !connector) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + rc = dp_panel_read_dpcd(dp_panel); + bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate); + if (rc || !is_link_rate_valid(bw_code) || + !is_lane_count_valid(dp_panel->link_info.num_lanes) || + (bw_code > dp_panel->max_bw_code)) { + DRM_ERROR("read dpcd failed %d\n", rc); + return rc; + } + + if (dp_panel->dfp_present) { + rlen = drm_dp_dpcd_read(panel->aux, DP_SINK_COUNT, + &count, 1); + if (rlen == 1) { + count = DP_GET_SINK_COUNT(count); + if (!count) { + DRM_ERROR("no downstream ports connected\n"); + panel->link->sink_count = 0; + rc = -ENOTCONN; + goto end; + } + } + } + + kfree(dp_panel->edid); + dp_panel->edid = NULL; + + dp_panel->edid = drm_get_edid(connector, + &panel->aux->ddc); + if (!dp_panel->edid) { + DRM_ERROR("panel edid read failed\n"); + + /* fail safe edid */ + mutex_lock(&connector->dev->mode_config.mutex); + if (drm_add_modes_noedid(connector, 640, 480)) + drm_set_preferred_mode(connector, 640, 480); + mutex_unlock(&connector->dev->mode_config.mutex); + } + + if (panel->aux_cfg_update_done) { + DRM_DEBUG_DP("read DPCD with updated AUX config\n"); + rc = dp_panel_read_dpcd(dp_panel); + bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate); + if (rc || !is_link_rate_valid(bw_code) || + !is_lane_count_valid(dp_panel->link_info.num_lanes) + || (bw_code > dp_panel->max_bw_code)) { + DRM_ERROR("read dpcd failed %d\n", rc); + return rc; + } + panel->aux_cfg_update_done = false; + } +end: + return rc; +} + +u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, + u32 mode_edid_bpp, u32 mode_pclk_khz) +{ + struct dp_panel_private *panel; + u32 bpp = mode_edid_bpp; + + if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) { + DRM_ERROR("invalid input\n"); + return 0; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + if (dp_panel->video_test) + bpp = dp_link_bit_depth_to_bpp( + panel->link->test_video.test_bit_depth); + else + bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp, + mode_pclk_khz); + + return bpp; +} + +int dp_panel_get_modes(struct dp_panel *dp_panel, + struct drm_connector *connector, struct dp_display_mode *mode) +{ + if (!dp_panel) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + if (dp_panel->edid) + return dp_panel_update_modes(connector, dp_panel->edid); + + return 0; +} + +static u8 dp_panel_get_edid_checksum(struct edid *edid) +{ + struct edid *last_block; + u8 *raw_edid; + bool is_edid_corrupt; + + if (!edid) { + DRM_ERROR("invalid edid input\n"); + return 0; + } + + raw_edid = (u8 *)edid; + raw_edid += (edid->extensions * EDID_LENGTH); + last_block = (struct edid *)raw_edid; + + /* block type extension */ + drm_edid_block_valid(raw_edid, 1, false, &is_edid_corrupt); + if (!is_edid_corrupt) + return last_block->checksum; + + DRM_ERROR("Invalid block, no checksum\n"); + return 0; +} + +void dp_panel_handle_sink_request(struct dp_panel *dp_panel) +{ + struct dp_panel_private *panel; + + if (!dp_panel) { + DRM_ERROR("invalid input\n"); + return; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) { + u8 checksum = dp_panel_get_edid_checksum(dp_panel->edid); + + dp_link_send_edid_checksum(panel->link, checksum); + dp_link_send_test_response(panel->link); + } +} + +void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable) +{ + struct dp_catalog *catalog; + struct dp_panel_private *panel; + + if (!dp_panel) { + DRM_ERROR("invalid input\n"); + return; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + + if (!panel->panel_on) { + DRM_DEBUG_DP("DP panel not enabled, handle TPG on next on\n"); + return; + } + + if (!enable) { + dp_catalog_panel_tpg_disable(catalog); + return; + } + + DRM_DEBUG_DP("%s: calling catalog tpg_enable\n", __func__); + dp_catalog_panel_tpg_enable(catalog, &panel->dp_panel.dp_mode.drm_mode); +} + +void dp_panel_dump_regs(struct dp_panel *dp_panel) +{ + struct dp_catalog *catalog; + struct dp_panel_private *panel; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + + dp_catalog_dump_regs(catalog); +} + +int dp_panel_timing_cfg(struct dp_panel *dp_panel) +{ + int rc = 0; + u32 data, total_ver, total_hor; + struct dp_catalog *catalog; + struct dp_panel_private *panel; + struct drm_display_mode *drm_mode; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + drm_mode = &panel->dp_panel.dp_mode.drm_mode; + + DRM_DEBUG_DP("width=%d hporch= %d %d %d\n", + drm_mode->hdisplay, drm_mode->htotal - drm_mode->hsync_end, + drm_mode->hsync_start - drm_mode->hdisplay, + drm_mode->hsync_end - drm_mode->hsync_start); + + DRM_DEBUG_DP("height=%d vporch= %d %d %d\n", + drm_mode->vdisplay, drm_mode->vtotal - drm_mode->vsync_end, + drm_mode->vsync_start - drm_mode->vdisplay, + drm_mode->vsync_end - drm_mode->vsync_start); + + total_hor = drm_mode->htotal; + + total_ver = drm_mode->vtotal; + + data = total_ver; + data <<= 16; + data |= total_hor; + + catalog->total = data; + + data = (drm_mode->vtotal - drm_mode->vsync_start); + data <<= 16; + data |= (drm_mode->htotal - drm_mode->hsync_start); + + catalog->sync_start = data; + + data = drm_mode->vsync_end - drm_mode->vsync_start; + data <<= 16; + data |= (panel->dp_panel.dp_mode.v_active_low << 31); + data |= drm_mode->hsync_end - drm_mode->hsync_start; + data |= (panel->dp_panel.dp_mode.h_active_low << 15); + + catalog->width_blanking = data; + + data = drm_mode->vdisplay; + data <<= 16; + data |= drm_mode->hdisplay; + + catalog->dp_active = data; + + dp_catalog_panel_timing_cfg(catalog); + panel->panel_on = true; + + return rc; +} + +int dp_panel_init_panel_info(struct dp_panel *dp_panel) +{ + int rc = 0; + struct drm_display_mode *drm_mode; + + drm_mode = &dp_panel->dp_mode.drm_mode; + + /* + * print resolution info as this is a result + * of user initiated action of cable connection + */ + DRM_DEBUG_DP("SET NEW RESOLUTION:\n"); + DRM_DEBUG_DP("%dx%d@%dfps\n", drm_mode->hdisplay, + drm_mode->vdisplay, drm_mode_vrefresh(drm_mode)); + DRM_DEBUG_DP("h_porches(back|front|width) = (%d|%d|%d)\n", + drm_mode->htotal - drm_mode->hsync_end, + drm_mode->hsync_start - drm_mode->hdisplay, + drm_mode->hsync_end - drm_mode->hsync_start); + DRM_DEBUG_DP("v_porches(back|front|width) = (%d|%d|%d)\n", + drm_mode->vtotal - drm_mode->vsync_end, + drm_mode->vsync_start - drm_mode->vdisplay, + drm_mode->vsync_end - drm_mode->vsync_start); + DRM_DEBUG_DP("pixel clock (KHz)=(%d)\n", drm_mode->clock); + DRM_DEBUG_DP("bpp = %d\n", dp_panel->dp_mode.bpp); + + dp_panel->dp_mode.bpp = max_t(u32, 18, + min_t(u32, dp_panel->dp_mode.bpp, 30)); + DRM_DEBUG_DP("updated bpp = %d\n", dp_panel->dp_mode.bpp); + + return rc; +} + +struct dp_panel *dp_panel_get(struct dp_panel_in *in) +{ + struct dp_panel_private *panel; + struct dp_panel *dp_panel; + + if (!in->dev || !in->catalog || !in->aux || !in->link) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + panel = devm_kzalloc(in->dev, sizeof(*panel), GFP_KERNEL); + if (!panel) + return ERR_PTR(-ENOMEM); + + panel->dev = in->dev; + panel->aux = in->aux; + panel->catalog = in->catalog; + panel->link = in->link; + + dp_panel = &panel->dp_panel; + dp_panel->max_bw_code = DP_LINK_BW_8_1; + panel->aux_cfg_update_done = false; + + return dp_panel; +} + +void dp_panel_put(struct dp_panel *dp_panel) +{ + if (!dp_panel) + return; + + kfree(dp_panel->edid); +} diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h new file mode 100644 index 000000000000..9023e5bb4b8b --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_panel.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_PANEL_H_ +#define _DP_PANEL_H_ + +#include <drm/msm_drm.h> + +#include "dp_aux.h" +#include "dp_link.h" +#include "dp_hpd.h" + +struct edid; + +#define DPRX_EXTENDED_DPCD_FIELD 0x2200 + +#define DP_DOWNSTREAM_PORTS 4 +#define DP_DOWNSTREAM_CAP_SIZE 4 + +struct dp_display_mode { + struct drm_display_mode drm_mode; + u32 capabilities; + u32 bpp; + u32 h_active_low; + u32 v_active_low; +}; + +struct dp_panel_in { + struct device *dev; + struct drm_dp_aux *aux; + struct dp_link *link; + struct dp_catalog *catalog; +}; + +struct dp_panel { + /* dpcd raw data */ + u8 dpcd[DP_RECEIVER_CAP_SIZE + 1]; + u8 ds_cap_info[DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE]; + u32 ds_port_cnt; + u32 dfp_present; + + struct dp_link_info link_info; + struct drm_dp_desc desc; + struct edid *edid; + struct drm_connector *connector; + struct dp_display_mode dp_mode; + bool video_test; + + u32 vic; + u32 max_pclk_khz; + u32 max_dp_lanes; + + u32 max_bw_code; +}; + +int dp_panel_init_panel_info(struct dp_panel *dp_panel); +int dp_panel_deinit(struct dp_panel *dp_panel); +int dp_panel_timing_cfg(struct dp_panel *dp_panel); +void dp_panel_dump_regs(struct dp_panel *dp_panel); +int dp_panel_read_sink_caps(struct dp_panel *dp_panel, + struct drm_connector *connector); +u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp, + u32 mode_pclk_khz); +int dp_panel_get_modes(struct dp_panel *dp_panel, + struct drm_connector *connector, struct dp_display_mode *mode); +void dp_panel_handle_sink_request(struct dp_panel *dp_panel); +void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable); + +/** + * is_link_rate_valid() - validates the link rate + * @lane_rate: link rate requested by the sink + * + * Returns true if the requested link rate is supported. + */ +static inline bool is_link_rate_valid(u32 bw_code) +{ + return (bw_code == DP_LINK_BW_1_62 || + bw_code == DP_LINK_BW_2_7 || + bw_code == DP_LINK_BW_5_4 || + bw_code == DP_LINK_BW_8_1); +} + +/** + * dp_link_is_lane_count_valid() - validates the lane count + * @lane_count: lane count requested by the sink + * + * Returns true if the requested lane count is supported. + */ +static inline bool is_lane_count_valid(u32 lane_count) +{ + return (lane_count == 1 || + lane_count == 2 || + lane_count == 4); +} + +struct dp_panel *dp_panel_get(struct dp_panel_in *in); +void dp_panel_put(struct dp_panel *dp_panel); +#endif /* _DP_PANEL_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c new file mode 100644 index 000000000000..0519dd3ac3c3 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_parser.c @@ -0,0 +1,293 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#include <linux/of_gpio.h> +#include <linux/phy/phy.h> + +#include <drm/drm_print.h> + +#include "dp_parser.h" +#include "dp_reg.h" + +static const struct dp_regulator_cfg sdm845_dp_reg_cfg = { + .num = 2, + .regs = { + {"vdda-1p2", 21800, 4 }, /* 1.2 V */ + {"vdda-0p9", 36000, 32 }, /* 0.9 V */ + }, +}; + +static int msm_dss_ioremap(struct platform_device *pdev, + struct dss_io_data *io_data) +{ + struct resource *res = NULL; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + DRM_ERROR("%pS->%s: msm_dss_get_res failed\n", + __builtin_return_address(0), __func__); + return -ENODEV; + } + + io_data->len = (u32)resource_size(res); + io_data->base = ioremap(res->start, io_data->len); + if (!io_data->base) { + DRM_ERROR("%pS->%s: ioremap failed\n", + __builtin_return_address(0), __func__); + return -EIO; + } + + return 0; +} + +static void msm_dss_iounmap(struct dss_io_data *io_data) +{ + if (io_data->base) { + iounmap(io_data->base); + io_data->base = NULL; + } + io_data->len = 0; +} + +static void dp_parser_unmap_io_resources(struct dp_parser *parser) +{ + struct dp_io *io = &parser->io; + + msm_dss_iounmap(&io->dp_controller); +} + +static int dp_parser_ctrl_res(struct dp_parser *parser) +{ + int rc = 0; + struct platform_device *pdev = parser->pdev; + struct dp_io *io = &parser->io; + + rc = msm_dss_ioremap(pdev, &io->dp_controller); + if (rc) { + DRM_ERROR("unable to remap dp io resources, rc=%d\n", rc); + goto err; + } + + io->phy = devm_phy_get(&pdev->dev, "dp"); + if (IS_ERR(io->phy)) { + rc = PTR_ERR(io->phy); + goto err; + } + + return 0; +err: + dp_parser_unmap_io_resources(parser); + return rc; +} + +static int dp_parser_misc(struct dp_parser *parser) +{ + struct device_node *of_node = parser->pdev->dev.of_node; + int len = 0; + const char *data_lane_property = "data-lanes"; + + len = of_property_count_elems_of_size(of_node, + data_lane_property, sizeof(u32)); + if (len < 0) { + DRM_WARN("Invalid property %s, default max DP lanes = %d\n", + data_lane_property, DP_MAX_NUM_DP_LANES); + len = DP_MAX_NUM_DP_LANES; + } + + parser->max_dp_lanes = len; + return 0; +} + +static inline bool dp_parser_check_prefix(const char *clk_prefix, + const char *clk_name) +{ + return !strncmp(clk_prefix, clk_name, strlen(clk_prefix)); +} + +static int dp_parser_init_clk_data(struct dp_parser *parser) +{ + int num_clk, i, rc; + int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0; + const char *clk_name; + struct device *dev = &parser->pdev->dev; + struct dss_module_power *core_power = &parser->mp[DP_CORE_PM]; + struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM]; + struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM]; + + num_clk = of_property_count_strings(dev->of_node, "clock-names"); + if (num_clk <= 0) { + DRM_ERROR("no clocks are defined\n"); + return -EINVAL; + } + + for (i = 0; i < num_clk; i++) { + rc = of_property_read_string_index(dev->of_node, + "clock-names", i, &clk_name); + if (rc < 0) + return rc; + + if (dp_parser_check_prefix("core", clk_name)) + core_clk_count++; + + if (dp_parser_check_prefix("ctrl", clk_name)) + ctrl_clk_count++; + + if (dp_parser_check_prefix("stream", clk_name)) + stream_clk_count++; + } + + /* Initialize the CORE power module */ + if (core_clk_count == 0) { + DRM_ERROR("no core clocks are defined\n"); + return -EINVAL; + } + + core_power->num_clk = core_clk_count; + core_power->clk_config = devm_kzalloc(dev, + sizeof(struct dss_clk) * core_power->num_clk, + GFP_KERNEL); + if (!core_power->clk_config) + return -EINVAL; + + /* Initialize the CTRL power module */ + if (ctrl_clk_count == 0) { + DRM_ERROR("no ctrl clocks are defined\n"); + return -EINVAL; + } + + ctrl_power->num_clk = ctrl_clk_count; + ctrl_power->clk_config = devm_kzalloc(dev, + sizeof(struct dss_clk) * ctrl_power->num_clk, + GFP_KERNEL); + if (!ctrl_power->clk_config) { + ctrl_power->num_clk = 0; + return -EINVAL; + } + + /* Initialize the STREAM power module */ + if (stream_clk_count == 0) { + DRM_ERROR("no stream (pixel) clocks are defined\n"); + return -EINVAL; + } + + stream_power->num_clk = stream_clk_count; + stream_power->clk_config = devm_kzalloc(dev, + sizeof(struct dss_clk) * stream_power->num_clk, + GFP_KERNEL); + if (!stream_power->clk_config) { + stream_power->num_clk = 0; + return -EINVAL; + } + + return 0; +} + +static int dp_parser_clock(struct dp_parser *parser) +{ + int rc = 0, i = 0; + int num_clk = 0; + int core_clk_index = 0, ctrl_clk_index = 0, stream_clk_index = 0; + int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0; + const char *clk_name; + struct device *dev = &parser->pdev->dev; + struct dss_module_power *core_power = &parser->mp[DP_CORE_PM]; + struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM]; + struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM]; + + rc = dp_parser_init_clk_data(parser); + if (rc) { + DRM_ERROR("failed to initialize power data %d\n", rc); + return -EINVAL; + } + + core_clk_count = core_power->num_clk; + ctrl_clk_count = ctrl_power->num_clk; + stream_clk_count = stream_power->num_clk; + + num_clk = core_clk_count + ctrl_clk_count + stream_clk_count; + + for (i = 0; i < num_clk; i++) { + rc = of_property_read_string_index(dev->of_node, "clock-names", + i, &clk_name); + if (rc) { + DRM_ERROR("error reading clock-names %d\n", rc); + return rc; + } + if (dp_parser_check_prefix("core", clk_name) && + core_clk_index < core_clk_count) { + struct dss_clk *clk = + &core_power->clk_config[core_clk_index]; + strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name)); + clk->type = DSS_CLK_AHB; + core_clk_index++; + } else if (dp_parser_check_prefix("stream", clk_name) && + stream_clk_index < stream_clk_count) { + struct dss_clk *clk = + &stream_power->clk_config[stream_clk_index]; + strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name)); + clk->type = DSS_CLK_PCLK; + stream_clk_index++; + } else if (dp_parser_check_prefix("ctrl", clk_name) && + ctrl_clk_index < ctrl_clk_count) { + struct dss_clk *clk = + &ctrl_power->clk_config[ctrl_clk_index]; + strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name)); + ctrl_clk_index++; + if (dp_parser_check_prefix("ctrl_link", clk_name) || + dp_parser_check_prefix("stream_pixel", clk_name)) + clk->type = DSS_CLK_PCLK; + else + clk->type = DSS_CLK_AHB; + } + } + + DRM_DEBUG_DP("clock parsing successful\n"); + + return 0; +} + +static int dp_parser_parse(struct dp_parser *parser) +{ + int rc = 0; + + if (!parser) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + rc = dp_parser_ctrl_res(parser); + if (rc) + return rc; + + rc = dp_parser_misc(parser); + if (rc) + return rc; + + rc = dp_parser_clock(parser); + if (rc) + return rc; + + /* Map the corresponding regulator information according to + * version. Currently, since we only have one supported platform, + * mapping the regulator directly. + */ + parser->regulator_cfg = &sdm845_dp_reg_cfg; + + return 0; +} + +struct dp_parser *dp_parser_get(struct platform_device *pdev) +{ + struct dp_parser *parser; + + parser = devm_kzalloc(&pdev->dev, sizeof(*parser), GFP_KERNEL); + if (!parser) + return ERR_PTR(-ENOMEM); + + parser->parse = dp_parser_parse; + parser->pdev = pdev; + + return parser; +} diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h new file mode 100644 index 000000000000..34b49628bbaf --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_parser.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_PARSER_H_ +#define _DP_PARSER_H_ + +#include <linux/platform_device.h> +#include <linux/phy/phy.h> +#include <linux/phy/phy-dp.h> + +#include "dpu_io_util.h" +#include "msm_drv.h" + +#define DP_LABEL "MDSS DP DISPLAY" +#define DP_MAX_PIXEL_CLK_KHZ 675000 +#define DP_MAX_NUM_DP_LANES 4 + +enum dp_pm_type { + DP_CORE_PM, + DP_CTRL_PM, + DP_STREAM_PM, + DP_PHY_PM, + DP_MAX_PM +}; + +struct dss_io_data { + u32 len; + void __iomem *base; +}; + +static inline const char *dp_parser_pm_name(enum dp_pm_type module) +{ + switch (module) { + case DP_CORE_PM: return "DP_CORE_PM"; + case DP_CTRL_PM: return "DP_CTRL_PM"; + case DP_STREAM_PM: return "DP_STREAM_PM"; + case DP_PHY_PM: return "DP_PHY_PM"; + default: return "???"; + } +} + +/** + * struct dp_display_data - display related device tree data. + * + * @ctrl_node: referece to controller device + * @phy_node: reference to phy device + * @is_active: is the controller currently active + * @name: name of the display + * @display_type: type of the display + */ +struct dp_display_data { + struct device_node *ctrl_node; + struct device_node *phy_node; + bool is_active; + const char *name; + const char *display_type; +}; + +/** + * struct dp_ctrl_resource - controller's IO related data + * + * @dp_controller: Display Port controller mapped memory address + * @phy_io: phy's mapped memory address + */ +struct dp_io { + struct dss_io_data dp_controller; + struct phy *phy; + union phy_configure_opts phy_opts; +}; + +/** + * struct dp_pinctrl - DP's pin control + * + * @pin: pin-controller's instance + * @state_active: active state pin control + * @state_hpd_active: hpd active state pin control + * @state_suspend: suspend state pin control + */ +struct dp_pinctrl { + struct pinctrl *pin; + struct pinctrl_state *state_active; + struct pinctrl_state *state_hpd_active; + struct pinctrl_state *state_suspend; +}; + +#define DP_DEV_REGULATOR_MAX 4 + +/* Regulators for DP devices */ +struct dp_reg_entry { + char name[32]; + int enable_load; + int disable_load; +}; + +struct dp_regulator_cfg { + int num; + struct dp_reg_entry regs[DP_DEV_REGULATOR_MAX]; +}; + +/** + * struct dp_parser - DP parser's data exposed to clients + * + * @pdev: platform data of the client + * @mp: gpio, regulator and clock related data + * @pinctrl: pin-control related data + * @disp_data: controller's display related data + * @parse: function to be called by client to parse device tree. + */ +struct dp_parser { + struct platform_device *pdev; + struct dss_module_power mp[DP_MAX_PM]; + struct dp_pinctrl pinctrl; + struct dp_io io; + struct dp_display_data disp_data; + const struct dp_regulator_cfg *regulator_cfg; + u32 max_dp_lanes; + + int (*parse)(struct dp_parser *parser); +}; + +/** + * dp_parser_get() - get the DP's device tree parser module + * + * @pdev: platform data of the client + * return: pointer to dp_parser structure. + * + * This function provides client capability to parse the + * device tree and populate the data structures. The data + * related to clock, regulators, pin-control and other + * can be parsed using this module. + */ +struct dp_parser *dp_parser_get(struct platform_device *pdev); + +#endif diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c new file mode 100644 index 000000000000..17c1fc6a2d44 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_power.c @@ -0,0 +1,372 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/regulator/consumer.h> +#include "dp_power.h" +#include "msm_drv.h" + +struct dp_power_private { + struct dp_parser *parser; + struct platform_device *pdev; + struct clk *link_clk_src; + struct clk *pixel_provider; + struct clk *link_provider; + struct regulator_bulk_data supplies[DP_DEV_REGULATOR_MAX]; + + struct dp_power dp_power; +}; + +static void dp_power_regulator_disable(struct dp_power_private *power) +{ + struct regulator_bulk_data *s = power->supplies; + const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs; + int num = power->parser->regulator_cfg->num; + int i; + + DBG(""); + for (i = num - 1; i >= 0; i--) + if (regs[i].disable_load >= 0) + regulator_set_load(s[i].consumer, + regs[i].disable_load); + + regulator_bulk_disable(num, s); +} + +static int dp_power_regulator_enable(struct dp_power_private *power) +{ + struct regulator_bulk_data *s = power->supplies; + const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs; + int num = power->parser->regulator_cfg->num; + int ret, i; + + DBG(""); + for (i = 0; i < num; i++) { + if (regs[i].enable_load >= 0) { + ret = regulator_set_load(s[i].consumer, + regs[i].enable_load); + if (ret < 0) { + pr_err("regulator %d set op mode failed, %d\n", + i, ret); + goto fail; + } + } + } + + ret = regulator_bulk_enable(num, s); + if (ret < 0) { + pr_err("regulator enable failed, %d\n", ret); + goto fail; + } + + return 0; + +fail: + for (i--; i >= 0; i--) + regulator_set_load(s[i].consumer, regs[i].disable_load); + return ret; +} + +static int dp_power_regulator_init(struct dp_power_private *power) +{ + struct regulator_bulk_data *s = power->supplies; + const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs; + struct platform_device *pdev = power->pdev; + int num = power->parser->regulator_cfg->num; + int i, ret; + + for (i = 0; i < num; i++) + s[i].supply = regs[i].name; + + ret = devm_regulator_bulk_get(&pdev->dev, num, s); + if (ret < 0) { + pr_err("%s: failed to init regulator, ret=%d\n", + __func__, ret); + return ret; + } + + return 0; +} + +static int dp_power_clk_init(struct dp_power_private *power) +{ + int rc = 0; + struct dss_module_power *core, *ctrl, *stream; + struct device *dev = &power->pdev->dev; + + core = &power->parser->mp[DP_CORE_PM]; + ctrl = &power->parser->mp[DP_CTRL_PM]; + stream = &power->parser->mp[DP_STREAM_PM]; + + rc = msm_dss_get_clk(dev, core->clk_config, core->num_clk); + if (rc) { + DRM_ERROR("failed to get %s clk. err=%d\n", + dp_parser_pm_name(DP_CORE_PM), rc); + return rc; + } + + rc = msm_dss_get_clk(dev, ctrl->clk_config, ctrl->num_clk); + if (rc) { + DRM_ERROR("failed to get %s clk. err=%d\n", + dp_parser_pm_name(DP_CTRL_PM), rc); + msm_dss_put_clk(core->clk_config, core->num_clk); + return -ENODEV; + } + + rc = msm_dss_get_clk(dev, stream->clk_config, stream->num_clk); + if (rc) { + DRM_ERROR("failed to get %s clk. err=%d\n", + dp_parser_pm_name(DP_CTRL_PM), rc); + msm_dss_put_clk(core->clk_config, core->num_clk); + return -ENODEV; + } + + return 0; +} + +static int dp_power_clk_deinit(struct dp_power_private *power) +{ + struct dss_module_power *core, *ctrl, *stream; + + core = &power->parser->mp[DP_CORE_PM]; + ctrl = &power->parser->mp[DP_CTRL_PM]; + stream = &power->parser->mp[DP_STREAM_PM]; + + if (!core || !ctrl || !stream) { + DRM_ERROR("invalid power_data\n"); + return -EINVAL; + } + + msm_dss_put_clk(ctrl->clk_config, ctrl->num_clk); + msm_dss_put_clk(core->clk_config, core->num_clk); + msm_dss_put_clk(stream->clk_config, stream->num_clk); + return 0; +} + +static int dp_power_clk_set_rate(struct dp_power_private *power, + enum dp_pm_type module, bool enable) +{ + int rc = 0; + struct dss_module_power *mp = &power->parser->mp[module]; + + if (enable) { + rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk); + if (rc) { + DRM_ERROR("failed to set clks rate.\n"); + return rc; + } + } + + rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable); + if (rc) { + DRM_ERROR("failed to %d clks, err: %d\n", enable, rc); + return rc; + } + + return 0; +} + +int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type) +{ + if (pm_type == DP_CORE_PM) + return dp_power->core_clks_on; + + if (pm_type == DP_CTRL_PM) + return dp_power->link_clks_on; + + if (pm_type == DP_STREAM_PM) + return dp_power->stream_clks_on; + + return 0; +} + +int dp_power_clk_enable(struct dp_power *dp_power, + enum dp_pm_type pm_type, bool enable) +{ + int rc = 0; + struct dp_power_private *power; + + power = container_of(dp_power, struct dp_power_private, dp_power); + + if (pm_type != DP_CORE_PM && pm_type != DP_CTRL_PM && + pm_type != DP_STREAM_PM) { + DRM_ERROR("unsupported power module: %s\n", + dp_parser_pm_name(pm_type)); + return -EINVAL; + } + + if (enable) { + if (pm_type == DP_CORE_PM && dp_power->core_clks_on) { + DRM_DEBUG_DP("core clks already enabled\n"); + return 0; + } + + if (pm_type == DP_CTRL_PM && dp_power->link_clks_on) { + DRM_DEBUG_DP("links clks already enabled\n"); + return 0; + } + + if (pm_type == DP_STREAM_PM && dp_power->stream_clks_on) { + DRM_DEBUG_DP("pixel clks already enabled\n"); + return 0; + } + + if ((pm_type == DP_CTRL_PM) && (!dp_power->core_clks_on)) { + DRM_DEBUG_DP("Enable core clks before link clks\n"); + + rc = dp_power_clk_set_rate(power, DP_CORE_PM, enable); + if (rc) { + DRM_ERROR("fail to enable clks: %s. err=%d\n", + dp_parser_pm_name(DP_CORE_PM), rc); + return rc; + } + dp_power->core_clks_on = true; + } + } + + rc = dp_power_clk_set_rate(power, pm_type, enable); + if (rc) { + DRM_ERROR("failed to '%s' clks for: %s. err=%d\n", + enable ? "enable" : "disable", + dp_parser_pm_name(pm_type), rc); + return rc; + } + + if (pm_type == DP_CORE_PM) + dp_power->core_clks_on = enable; + else if (pm_type == DP_STREAM_PM) + dp_power->stream_clks_on = enable; + else + dp_power->link_clks_on = enable; + + DRM_DEBUG_DP("%s clocks for %s\n", + enable ? "enable" : "disable", + dp_parser_pm_name(pm_type)); + DRM_DEBUG_DP("strem_clks:%s link_clks:%s core_clks:%s\n", + dp_power->stream_clks_on ? "on" : "off", + dp_power->link_clks_on ? "on" : "off", + dp_power->core_clks_on ? "on" : "off"); + + return 0; +} + +int dp_power_client_init(struct dp_power *dp_power) +{ + int rc = 0; + struct dp_power_private *power; + + if (!dp_power) { + DRM_ERROR("invalid power data\n"); + return -EINVAL; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + pm_runtime_enable(&power->pdev->dev); + + rc = dp_power_regulator_init(power); + if (rc) { + DRM_ERROR("failed to init regulators %d\n", rc); + goto error; + } + + rc = dp_power_clk_init(power); + if (rc) { + DRM_ERROR("failed to init clocks %d\n", rc); + goto error; + } + return 0; + +error: + pm_runtime_disable(&power->pdev->dev); + return rc; +} + +void dp_power_client_deinit(struct dp_power *dp_power) +{ + struct dp_power_private *power; + + if (!dp_power) { + DRM_ERROR("invalid power data\n"); + return; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + dp_power_clk_deinit(power); + pm_runtime_disable(&power->pdev->dev); + +} + +int dp_power_init(struct dp_power *dp_power, bool flip) +{ + int rc = 0; + struct dp_power_private *power = NULL; + + if (!dp_power) { + DRM_ERROR("invalid power data\n"); + return -EINVAL; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + pm_runtime_get_sync(&power->pdev->dev); + rc = dp_power_regulator_enable(power); + if (rc) { + DRM_ERROR("failed to enable regulators, %d\n", rc); + goto exit; + } + + rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true); + if (rc) { + DRM_ERROR("failed to enable DP core clocks, %d\n", rc); + goto err_clk; + } + + return 0; + +err_clk: + dp_power_regulator_disable(power); +exit: + pm_runtime_put_sync(&power->pdev->dev); + return rc; +} + +int dp_power_deinit(struct dp_power *dp_power) +{ + struct dp_power_private *power; + + power = container_of(dp_power, struct dp_power_private, dp_power); + + dp_power_clk_enable(dp_power, DP_CORE_PM, false); + dp_power_regulator_disable(power); + pm_runtime_put_sync(&power->pdev->dev); + return 0; +} + +struct dp_power *dp_power_get(struct dp_parser *parser) +{ + struct dp_power_private *power; + struct dp_power *dp_power; + + if (!parser) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + power = devm_kzalloc(&parser->pdev->dev, sizeof(*power), GFP_KERNEL); + if (!power) + return ERR_PTR(-ENOMEM); + + power->parser = parser; + power->pdev = parser->pdev; + + dp_power = &power->dp_power; + + return dp_power; +} diff --git a/drivers/gpu/drm/msm/dp/dp_power.h b/drivers/gpu/drm/msm/dp/dp_power.h new file mode 100644 index 000000000000..76743d755833 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_power.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_POWER_H_ +#define _DP_POWER_H_ + +#include "dp_parser.h" + +/** + * sruct dp_power - DisplayPort's power related data + * + * @init: initializes the regulators/core clocks/GPIOs/pinctrl + * @deinit: turns off the regulators/core clocks/GPIOs/pinctrl + * @clk_enable: enable/disable the DP clocks + * @set_pixel_clk_parent: set the parent of DP pixel clock + */ +struct dp_power { + bool core_clks_on; + bool link_clks_on; + bool stream_clks_on; +}; + +/** + * dp_power_init() - enable power supplies for display controller + * + * @power: instance of power module + * @flip: bool for flipping gpio direction + * return: 0 if success or error if failure. + * + * This API will turn on the regulators and configures gpio's + * aux/hpd. + */ +int dp_power_init(struct dp_power *power, bool flip); + +/** + * dp_power_deinit() - turn off regulators and gpios. + * + * @power: instance of power module + * return: 0 for success + * + * This API turns off power and regulators. + */ +int dp_power_deinit(struct dp_power *power); + +/** + * dp_power_clk_status() - display controller clocks status + * + * @power: instance of power module + * @pm_type: type of pm, core/ctrl/phy + * return: status of power clocks + * + * This API return status of DP clocks + */ + +int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type); + +/** + * dp_power_clk_enable() - enable display controller clocks + * + * @power: instance of power module + * @pm_type: type of pm, core/ctrl/phy + * @enable: enables or disables + * return: pointer to allocated power module data + * + * This API will call setrate and enable for DP clocks + */ + +int dp_power_clk_enable(struct dp_power *power, enum dp_pm_type pm_type, + bool enable); + +/** + * dp_power_client_init() - initialize clock and regulator modules + * + * @power: instance of power module + * return: 0 for success, error for failure. + * + * This API will configure the DisplayPort's clocks and regulator + * modules. + */ +int dp_power_client_init(struct dp_power *power); + +/** + * dp_power_clinet_deinit() - de-initialize clock and regulator modules + * + * @power: instance of power module + * return: 0 for success, error for failure. + * + * This API will de-initialize the DisplayPort's clocks and regulator + * modueles. + */ +void dp_power_client_deinit(struct dp_power *power); + +/** + * dp_power_get() - configure and get the DisplayPort power module data + * + * @parser: instance of parser module + * return: pointer to allocated power module data + * + * This API will configure the DisplayPort's power module and provides + * methods to be called by the client to configure the power related + * modueles. + */ +struct dp_power *dp_power_get(struct dp_parser *parser); + +#endif /* _DP_POWER_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h new file mode 100644 index 000000000000..43042ff90a19 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_reg.h @@ -0,0 +1,306 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_REG_H_ +#define _DP_REG_H_ + +/* DP_TX Registers */ +#define REG_DP_HW_VERSION (0x00000000) + +#define REG_DP_SW_RESET (0x00000010) +#define DP_SW_RESET (0x00000001) + +#define REG_DP_PHY_CTRL (0x00000014) +#define DP_PHY_CTRL_SW_RESET_PLL (0x00000001) +#define DP_PHY_CTRL_SW_RESET (0x00000004) + +#define REG_DP_CLK_CTRL (0x00000018) +#define REG_DP_CLK_ACTIVE (0x0000001C) +#define REG_DP_INTR_STATUS (0x00000020) +#define REG_DP_INTR_STATUS2 (0x00000024) +#define REG_DP_INTR_STATUS3 (0x00000028) + +#define REG_DP_DP_HPD_CTRL (0x00000000) +#define DP_DP_HPD_CTRL_HPD_EN (0x00000001) + +#define REG_DP_DP_HPD_INT_STATUS (0x00000004) + +#define REG_DP_DP_HPD_INT_ACK (0x00000008) +#define DP_DP_HPD_PLUG_INT_ACK (0x00000001) +#define DP_DP_IRQ_HPD_INT_ACK (0x00000002) +#define DP_DP_HPD_REPLUG_INT_ACK (0x00000004) +#define DP_DP_HPD_UNPLUG_INT_ACK (0x00000008) + +#define REG_DP_DP_HPD_INT_MASK (0x0000000C) +#define DP_DP_HPD_PLUG_INT_MASK (0x00000001) +#define DP_DP_IRQ_HPD_INT_MASK (0x00000002) +#define DP_DP_HPD_REPLUG_INT_MASK (0x00000004) +#define DP_DP_HPD_UNPLUG_INT_MASK (0x00000008) +#define DP_DP_HPD_INT_MASK (DP_DP_HPD_PLUG_INT_MASK | \ + DP_DP_IRQ_HPD_INT_MASK | \ + DP_DP_HPD_REPLUG_INT_MASK | \ + DP_DP_HPD_UNPLUG_INT_MASK) +#define DP_DP_HPD_STATE_STATUS_CONNECTED (0x40000000) +#define DP_DP_HPD_STATE_STATUS_PENDING (0x20000000) +#define DP_DP_HPD_STATE_STATUS_DISCONNECTED (0x00000000) +#define DP_DP_HPD_STATE_STATUS_MASK (0xE0000000) + +#define REG_DP_DP_HPD_REFTIMER (0x00000018) +#define DP_DP_HPD_REFTIMER_ENABLE (1 << 16) + +#define REG_DP_DP_HPD_EVENT_TIME_0 (0x0000001C) +#define REG_DP_DP_HPD_EVENT_TIME_1 (0x00000020) +#define DP_DP_HPD_EVENT_TIME_0_VAL (0x3E800FA) +#define DP_DP_HPD_EVENT_TIME_1_VAL (0x1F407D0) + +#define REG_DP_AUX_CTRL (0x00000030) +#define DP_AUX_CTRL_ENABLE (0x00000001) +#define DP_AUX_CTRL_RESET (0x00000002) + +#define REG_DP_AUX_DATA (0x00000034) +#define DP_AUX_DATA_READ (0x00000001) +#define DP_AUX_DATA_WRITE (0x00000000) +#define DP_AUX_DATA_OFFSET (0x00000008) +#define DP_AUX_DATA_INDEX_OFFSET (0x00000010) +#define DP_AUX_DATA_MASK (0x0000ff00) +#define DP_AUX_DATA_INDEX_WRITE (0x80000000) + +#define REG_DP_AUX_TRANS_CTRL (0x00000038) +#define DP_AUX_TRANS_CTRL_I2C (0x00000100) +#define DP_AUX_TRANS_CTRL_GO (0x00000200) +#define DP_AUX_TRANS_CTRL_NO_SEND_ADDR (0x00000400) +#define DP_AUX_TRANS_CTRL_NO_SEND_STOP (0x00000800) + +#define REG_DP_TIMEOUT_COUNT (0x0000003C) +#define REG_DP_AUX_LIMITS (0x00000040) +#define REG_DP_AUX_STATUS (0x00000044) + +#define DP_DPCD_CP_IRQ (0x201) +#define DP_DPCD_RXSTATUS (0x69493) + +#define DP_INTERRUPT_TRANS_NUM (0x000000A0) + +#define REG_DP_MAINLINK_CTRL (0x00000000) +#define DP_MAINLINK_CTRL_ENABLE (0x00000001) +#define DP_MAINLINK_CTRL_RESET (0x00000002) +#define DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER (0x00000010) +#define DP_MAINLINK_FB_BOUNDARY_SEL (0x02000000) + +#define REG_DP_STATE_CTRL (0x00000004) +#define DP_STATE_CTRL_LINK_TRAINING_PATTERN1 (0x00000001) +#define DP_STATE_CTRL_LINK_TRAINING_PATTERN2 (0x00000002) +#define DP_STATE_CTRL_LINK_TRAINING_PATTERN3 (0x00000004) +#define DP_STATE_CTRL_LINK_TRAINING_PATTERN4 (0x00000008) +#define DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE (0x00000010) +#define DP_STATE_CTRL_LINK_PRBS7 (0x00000020) +#define DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN (0x00000040) +#define DP_STATE_CTRL_SEND_VIDEO (0x00000080) +#define DP_STATE_CTRL_PUSH_IDLE (0x00000100) + +#define REG_DP_CONFIGURATION_CTRL (0x00000008) +#define DP_CONFIGURATION_CTRL_SYNC_ASYNC_CLK (0x00000001) +#define DP_CONFIGURATION_CTRL_STATIC_DYNAMIC_CN (0x00000002) +#define DP_CONFIGURATION_CTRL_P_INTERLACED (0x00000004) +#define DP_CONFIGURATION_CTRL_INTERLACED_BTF (0x00000008) +#define DP_CONFIGURATION_CTRL_NUM_OF_LANES (0x00000010) +#define DP_CONFIGURATION_CTRL_ENHANCED_FRAMING (0x00000040) +#define DP_CONFIGURATION_CTRL_SEND_VSC (0x00000080) +#define DP_CONFIGURATION_CTRL_BPC (0x00000100) +#define DP_CONFIGURATION_CTRL_ASSR (0x00000400) +#define DP_CONFIGURATION_CTRL_RGB_YUV (0x00000800) +#define DP_CONFIGURATION_CTRL_LSCLK_DIV (0x00002000) +#define DP_CONFIGURATION_CTRL_NUM_OF_LANES_SHIFT (0x04) +#define DP_CONFIGURATION_CTRL_BPC_SHIFT (0x08) +#define DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT (0x0D) + +#define REG_DP_SOFTWARE_MVID (0x00000010) +#define REG_DP_SOFTWARE_NVID (0x00000018) +#define REG_DP_TOTAL_HOR_VER (0x0000001C) +#define REG_DP_START_HOR_VER_FROM_SYNC (0x00000020) +#define REG_DP_HSYNC_VSYNC_WIDTH_POLARITY (0x00000024) +#define REG_DP_ACTIVE_HOR_VER (0x00000028) + +#define REG_DP_MISC1_MISC0 (0x0000002C) +#define DP_MISC0_SYNCHRONOUS_CLK (0x00000001) +#define DP_MISC0_COLORIMETRY_CFG_SHIFT (0x00000001) +#define DP_MISC0_TEST_BITS_DEPTH_SHIFT (0x00000005) + +#define REG_DP_VALID_BOUNDARY (0x00000030) +#define REG_DP_VALID_BOUNDARY_2 (0x00000034) + +#define REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING (0x00000038) +#define LANE0_MAPPING_SHIFT (0x00000000) +#define LANE1_MAPPING_SHIFT (0x00000002) +#define LANE2_MAPPING_SHIFT (0x00000004) +#define LANE3_MAPPING_SHIFT (0x00000006) + +#define REG_DP_MAINLINK_READY (0x00000040) +#define DP_MAINLINK_READY_FOR_VIDEO (0x00000001) +#define DP_MAINLINK_READY_LINK_TRAINING_SHIFT (0x00000003) + +#define REG_DP_MAINLINK_LEVELS (0x00000044) +#define DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2 (0x00000002) + + +#define REG_DP_TU (0x0000004C) + +#define REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET (0x00000054) +#define DP_HBR2_ERM_PATTERN (0x00010000) + +#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0 (0x000000C0) +#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1 (0x000000C4) +#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2 (0x000000C8) + +#define MMSS_DP_MISC1_MISC0 (0x0000002C) +#define MMSS_DP_AUDIO_TIMING_GEN (0x00000080) +#define MMSS_DP_AUDIO_TIMING_RBR_32 (0x00000084) +#define MMSS_DP_AUDIO_TIMING_HBR_32 (0x00000088) +#define MMSS_DP_AUDIO_TIMING_RBR_44 (0x0000008C) +#define MMSS_DP_AUDIO_TIMING_HBR_44 (0x00000090) +#define MMSS_DP_AUDIO_TIMING_RBR_48 (0x00000094) +#define MMSS_DP_AUDIO_TIMING_HBR_48 (0x00000098) + +#define MMSS_DP_PSR_CRC_RG (0x00000154) +#define MMSS_DP_PSR_CRC_B (0x00000158) + +#define REG_DP_COMPRESSION_MODE_CTRL (0x00000180) + +#define MMSS_DP_AUDIO_CFG (0x00000200) +#define MMSS_DP_AUDIO_STATUS (0x00000204) +#define MMSS_DP_AUDIO_PKT_CTRL (0x00000208) +#define MMSS_DP_AUDIO_PKT_CTRL2 (0x0000020C) +#define MMSS_DP_AUDIO_ACR_CTRL (0x00000210) +#define MMSS_DP_AUDIO_CTRL_RESET (0x00000214) + +#define MMSS_DP_SDP_CFG (0x00000228) +#define MMSS_DP_SDP_CFG2 (0x0000022C) +#define MMSS_DP_AUDIO_TIMESTAMP_0 (0x00000230) +#define MMSS_DP_AUDIO_TIMESTAMP_1 (0x00000234) + +#define MMSS_DP_AUDIO_STREAM_0 (0x00000240) +#define MMSS_DP_AUDIO_STREAM_1 (0x00000244) + +#define MMSS_DP_EXTENSION_0 (0x00000250) +#define MMSS_DP_EXTENSION_1 (0x00000254) +#define MMSS_DP_EXTENSION_2 (0x00000258) +#define MMSS_DP_EXTENSION_3 (0x0000025C) +#define MMSS_DP_EXTENSION_4 (0x00000260) +#define MMSS_DP_EXTENSION_5 (0x00000264) +#define MMSS_DP_EXTENSION_6 (0x00000268) +#define MMSS_DP_EXTENSION_7 (0x0000026C) +#define MMSS_DP_EXTENSION_8 (0x00000270) +#define MMSS_DP_EXTENSION_9 (0x00000274) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_0 (0x00000278) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_1 (0x0000027C) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_2 (0x00000280) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_3 (0x00000284) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_4 (0x00000288) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_5 (0x0000028C) +#define MMSS_DP_AUDIO_ISRC_0 (0x00000290) +#define MMSS_DP_AUDIO_ISRC_1 (0x00000294) +#define MMSS_DP_AUDIO_ISRC_2 (0x00000298) +#define MMSS_DP_AUDIO_ISRC_3 (0x0000029C) +#define MMSS_DP_AUDIO_ISRC_4 (0x000002A0) +#define MMSS_DP_AUDIO_ISRC_5 (0x000002A4) +#define MMSS_DP_AUDIO_INFOFRAME_0 (0x000002A8) +#define MMSS_DP_AUDIO_INFOFRAME_1 (0x000002AC) +#define MMSS_DP_AUDIO_INFOFRAME_2 (0x000002B0) + +#define MMSS_DP_GENERIC0_0 (0x00000300) +#define MMSS_DP_GENERIC0_1 (0x00000304) +#define MMSS_DP_GENERIC0_2 (0x00000308) +#define MMSS_DP_GENERIC0_3 (0x0000030C) +#define MMSS_DP_GENERIC0_4 (0x00000310) +#define MMSS_DP_GENERIC0_5 (0x00000314) +#define MMSS_DP_GENERIC0_6 (0x00000318) +#define MMSS_DP_GENERIC0_7 (0x0000031C) +#define MMSS_DP_GENERIC0_8 (0x00000320) +#define MMSS_DP_GENERIC0_9 (0x00000324) +#define MMSS_DP_GENERIC1_0 (0x00000328) +#define MMSS_DP_GENERIC1_1 (0x0000032C) +#define MMSS_DP_GENERIC1_2 (0x00000330) +#define MMSS_DP_GENERIC1_3 (0x00000334) +#define MMSS_DP_GENERIC1_4 (0x00000338) +#define MMSS_DP_GENERIC1_5 (0x0000033C) +#define MMSS_DP_GENERIC1_6 (0x00000340) +#define MMSS_DP_GENERIC1_7 (0x00000344) +#define MMSS_DP_GENERIC1_8 (0x00000348) +#define MMSS_DP_GENERIC1_9 (0x0000034C) + +#define MMSS_DP_VSCEXT_0 (0x000002D0) +#define MMSS_DP_VSCEXT_1 (0x000002D4) +#define MMSS_DP_VSCEXT_2 (0x000002D8) +#define MMSS_DP_VSCEXT_3 (0x000002DC) +#define MMSS_DP_VSCEXT_4 (0x000002E0) +#define MMSS_DP_VSCEXT_5 (0x000002E4) +#define MMSS_DP_VSCEXT_6 (0x000002E8) +#define MMSS_DP_VSCEXT_7 (0x000002EC) +#define MMSS_DP_VSCEXT_8 (0x000002F0) +#define MMSS_DP_VSCEXT_9 (0x000002F4) + +#define MMSS_DP_BIST_ENABLE (0x00000000) +#define DP_BIST_ENABLE_DPBIST_EN (0x00000001) + +#define MMSS_DP_TIMING_ENGINE_EN (0x00000010) +#define DP_TIMING_ENGINE_EN_EN (0x00000001) + +#define MMSS_DP_INTF_CONFIG (0x00000014) +#define MMSS_DP_INTF_HSYNC_CTL (0x00000018) +#define MMSS_DP_INTF_VSYNC_PERIOD_F0 (0x0000001C) +#define MMSS_DP_INTF_VSYNC_PERIOD_F1 (0x00000020) +#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0 (0x00000024) +#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1 (0x00000028) +#define MMSS_INTF_DISPLAY_V_START_F0 (0x0000002C) +#define MMSS_INTF_DISPLAY_V_START_F1 (0x00000030) +#define MMSS_DP_INTF_DISPLAY_V_END_F0 (0x00000034) +#define MMSS_DP_INTF_DISPLAY_V_END_F1 (0x00000038) +#define MMSS_DP_INTF_ACTIVE_V_START_F0 (0x0000003C) +#define MMSS_DP_INTF_ACTIVE_V_START_F1 (0x00000040) +#define MMSS_DP_INTF_ACTIVE_V_END_F0 (0x00000044) +#define MMSS_DP_INTF_ACTIVE_V_END_F1 (0x00000048) +#define MMSS_DP_INTF_DISPLAY_HCTL (0x0000004C) +#define MMSS_DP_INTF_ACTIVE_HCTL (0x00000050) +#define MMSS_DP_INTF_POLARITY_CTL (0x00000058) + +#define MMSS_DP_TPG_MAIN_CONTROL (0x00000060) +#define MMSS_DP_DSC_DTO (0x0000007C) +#define DP_TPG_CHECKERED_RECT_PATTERN (0x00000100) + +#define MMSS_DP_TPG_VIDEO_CONFIG (0x00000064) +#define DP_TPG_VIDEO_CONFIG_BPP_8BIT (0x00000001) +#define DP_TPG_VIDEO_CONFIG_RGB (0x00000004) + +#define MMSS_DP_ASYNC_FIFO_CONFIG (0x00000088) + +#define REG_DP_PHY_AUX_INTERRUPT_CLEAR (0x0000004C) +#define REG_DP_PHY_AUX_BIST_CFG (0x00000050) +#define REG_DP_PHY_AUX_INTERRUPT_STATUS (0x000000BC) + +/* DP HDCP 1.3 registers */ +#define DP_HDCP_CTRL (0x0A0) +#define DP_HDCP_STATUS (0x0A4) +#define DP_HDCP_SW_UPPER_AKSV (0x098) +#define DP_HDCP_SW_LOWER_AKSV (0x09C) +#define DP_HDCP_ENTROPY_CTRL0 (0x350) +#define DP_HDCP_ENTROPY_CTRL1 (0x35C) +#define DP_HDCP_SHA_STATUS (0x0C8) +#define DP_HDCP_RCVPORT_DATA2_0 (0x0B0) +#define DP_HDCP_RCVPORT_DATA3 (0x0A4) +#define DP_HDCP_RCVPORT_DATA4 (0x0A8) +#define DP_HDCP_RCVPORT_DATA5 (0x0C0) +#define DP_HDCP_RCVPORT_DATA6 (0x0C4) + +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_CTRL (0x024) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_DATA (0x028) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA0 (0x004) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA1 (0x008) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA7 (0x00C) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA8 (0x010) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9 (0x014) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10 (0x018) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11 (0x01C) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12 (0x020) + +#endif /* _DP_REG_H_ */ diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 4de771d6f0be..78ef5d4ed922 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -30,6 +30,8 @@ enum msm_dsi_phy_type { MSM_DSI_PHY_28NM_8960, MSM_DSI_PHY_14NM, MSM_DSI_PHY_10NM, + MSM_DSI_PHY_7NM, + MSM_DSI_PHY_7NM_V4_1, MSM_DSI_PHY_MAX }; diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h index 8e536e060070..50eb4d1b8fdd 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h @@ -1886,5 +1886,428 @@ static inline uint32_t REG_DSI_10nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x0000 #define REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE 0x000001a0 +#define REG_DSI_7nm_PHY_CMN_REVISION_ID0 0x00000000 + +#define REG_DSI_7nm_PHY_CMN_REVISION_ID1 0x00000004 + +#define REG_DSI_7nm_PHY_CMN_REVISION_ID2 0x00000008 + +#define REG_DSI_7nm_PHY_CMN_REVISION_ID3 0x0000000c + +#define REG_DSI_7nm_PHY_CMN_CLK_CFG0 0x00000010 + +#define REG_DSI_7nm_PHY_CMN_CLK_CFG1 0x00000014 + +#define REG_DSI_7nm_PHY_CMN_GLBL_CTRL 0x00000018 + +#define REG_DSI_7nm_PHY_CMN_RBUF_CTRL 0x0000001c + +#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_0 0x00000020 + +#define REG_DSI_7nm_PHY_CMN_CTRL_0 0x00000024 + +#define REG_DSI_7nm_PHY_CMN_CTRL_1 0x00000028 + +#define REG_DSI_7nm_PHY_CMN_CTRL_2 0x0000002c + +#define REG_DSI_7nm_PHY_CMN_CTRL_3 0x00000030 + +#define REG_DSI_7nm_PHY_CMN_LANE_CFG0 0x00000034 + +#define REG_DSI_7nm_PHY_CMN_LANE_CFG1 0x00000038 + +#define REG_DSI_7nm_PHY_CMN_PLL_CNTRL 0x0000003c + +#define REG_DSI_7nm_PHY_CMN_DPHY_SOT 0x00000040 + +#define REG_DSI_7nm_PHY_CMN_LANE_CTRL0 0x000000a0 + +#define REG_DSI_7nm_PHY_CMN_LANE_CTRL1 0x000000a4 + +#define REG_DSI_7nm_PHY_CMN_LANE_CTRL2 0x000000a8 + +#define REG_DSI_7nm_PHY_CMN_LANE_CTRL3 0x000000ac + +#define REG_DSI_7nm_PHY_CMN_LANE_CTRL4 0x000000b0 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0 0x000000b4 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1 0x000000b8 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2 0x000000bc + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3 0x000000c0 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4 0x000000c4 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5 0x000000c8 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6 0x000000cc + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7 0x000000d0 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8 0x000000d4 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9 0x000000d8 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10 0x000000dc + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11 0x000000e0 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12 0x000000e4 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13 0x000000e8 + +#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0 0x000000ec + +#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_1 0x000000f0 + +#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL 0x000000f4 + +#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL 0x000000f8 + +#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_MID_CTRL 0x000000fc + +#define REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL 0x00000100 + +#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0 0x00000104 + +#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1 0x00000108 + +#define REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL 0x0000010c + +#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_1 0x00000110 + +#define REG_DSI_7nm_PHY_CMN_CTRL_4 0x00000114 + +#define REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4 0x00000128 + +#define REG_DSI_7nm_PHY_CMN_PHY_STATUS 0x00000140 + +#define REG_DSI_7nm_PHY_CMN_LANE_STATUS0 0x00000148 + +#define REG_DSI_7nm_PHY_CMN_LANE_STATUS1 0x0000014c + +static inline uint32_t REG_DSI_7nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; } + +static inline uint32_t REG_DSI_7nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; } + +static inline uint32_t REG_DSI_7nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; } + +static inline uint32_t REG_DSI_7nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; } + +static inline uint32_t REG_DSI_7nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x80*i0; } + +static inline uint32_t REG_DSI_7nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000010 + 0x80*i0; } + +static inline uint32_t REG_DSI_7nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000014 + 0x80*i0; } + +static inline uint32_t REG_DSI_7nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; } + +#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000 + +#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004 + +#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS 0x00000008 + +#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS_TWO 0x0000000c + +#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010 + +#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FOUR 0x00000014 + +#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE 0x00000018 + +#define REG_DSI_7nm_PHY_PLL_INT_LOOP_CONTROLS 0x0000001c + +#define REG_DSI_7nm_PHY_PLL_DSM_DIVIDER 0x00000020 + +#define REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000024 + +#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES 0x00000028 + +#define REG_DSI_7nm_PHY_PLL_FREQ_UPDATE_CONTROL_OVERRIDES 0x0000002c + +#define REG_DSI_7nm_PHY_PLL_CMODE 0x00000030 + +#define REG_DSI_7nm_PHY_PLL_PSM_CTRL 0x00000034 + +#define REG_DSI_7nm_PHY_PLL_RSM_CTRL 0x00000038 + +#define REG_DSI_7nm_PHY_PLL_VCO_TUNE_MAP 0x0000003c + +#define REG_DSI_7nm_PHY_PLL_PLL_CNTRL 0x00000040 + +#define REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000044 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_LOW 0x00000048 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_HIGH 0x0000004c + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS 0x00000050 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MIN 0x00000054 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MAX 0x00000058 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_PFILT 0x0000005c + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_IFILT 0x00000060 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_TWO 0x00000064 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000068 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_FOUR 0x0000006c + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_HIGH 0x00000070 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_LOW 0x00000074 + +#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000078 + +#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_THRESH 0x0000007c + +#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_HIGH 0x00000080 + +#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_LOW 0x00000084 + +#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_HIGH 0x00000088 + +#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_LOW 0x0000008c + +#define REG_DSI_7nm_PHY_PLL_PFILT 0x00000090 + +#define REG_DSI_7nm_PHY_PLL_IFILT 0x00000094 + +#define REG_DSI_7nm_PHY_PLL_PLL_GAIN 0x00000098 + +#define REG_DSI_7nm_PHY_PLL_ICODE_LOW 0x0000009c + +#define REG_DSI_7nm_PHY_PLL_ICODE_HIGH 0x000000a0 + +#define REG_DSI_7nm_PHY_PLL_LOCKDET 0x000000a4 + +#define REG_DSI_7nm_PHY_PLL_OUTDIV 0x000000a8 + +#define REG_DSI_7nm_PHY_PLL_FASTLOCK_CONTROL 0x000000ac + +#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_ONE 0x000000b0 + +#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_TWO 0x000000b4 + +#define REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE 0x000000b8 + +#define REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000bc + +#define REG_DSI_7nm_PHY_PLL_RATE_CHANGE 0x000000c0 + +#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS 0x000000c4 + +#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000c8 + +#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START 0x000000cc + +#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW 0x000000d0 + +#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID 0x000000d4 + +#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH 0x000000d8 + +#define REG_DSI_7nm_PHY_PLL_DEC_FRAC_MUXES 0x000000dc + +#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000e0 + +#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000e4 + +#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000e8 + +#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000ec + +#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_2 0x000000f0 + +#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_2 0x000000f4 + +#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_2 0x000000f8 + +#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_2 0x000000fc + +#define REG_DSI_7nm_PHY_PLL_MASH_CONTROL 0x00000100 + +#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW 0x00000104 + +#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH 0x00000108 + +#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW 0x0000010c + +#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH 0x00000110 + +#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW 0x00000114 + +#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH 0x00000118 + +#define REG_DSI_7nm_PHY_PLL_SSC_MUX_CONTROL 0x0000011c + +#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x00000120 + +#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000124 + +#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000128 + +#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x0000012c + +#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1 0x00000130 + +#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1 0x00000134 + +#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_2 0x00000138 + +#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_2 0x0000013c + +#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_2 0x00000140 + +#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_2 0x00000144 + +#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_2 0x00000148 + +#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_2 0x0000014c + +#define REG_DSI_7nm_PHY_PLL_SSC_CONTROL 0x00000150 + +#define REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000154 + +#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000158 + +#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_2 0x0000015c + +#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x00000160 + +#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_2 0x00000164 + +#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1 0x00000168 + +#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_2 0x0000016c + +#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x00000170 + +#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_2 0x00000174 + +#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000178 + +#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_2 0x0000017c + +#define REG_DSI_7nm_PHY_PLL_PLL_FASTLOCK_EN_BAND 0x00000180 + +#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MID 0x00000184 + +#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_HIGH 0x00000188 + +#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MUX 0x0000018c + +#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000190 + +#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY 0x00000194 + +#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_MIN_DELAY 0x00000198 + +#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS 0x0000019c + +#define REG_DSI_7nm_PHY_PLL_SPARE_AND_JPC_OVERRIDES 0x000001a0 + +#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_1 0x000001a4 + +#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_2 0x000001a8 + +#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_CTRL_1 0x000001ac + +#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE 0x000001b0 + +#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_TWO 0x000001b4 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL 0x000001b8 + +#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_LOW 0x000001bc + +#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_HIGH 0x000001c0 + +#define REG_DSI_7nm_PHY_PLL_FD_OUT_LOW 0x000001c4 + +#define REG_DSI_7nm_PHY_PLL_FD_OUT_HIGH 0x000001c8 + +#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_STATUS_1 0x000001cc + +#define REG_DSI_7nm_PHY_PLL_PLL_MISC_CONFIG 0x000001d0 + +#define REG_DSI_7nm_PHY_PLL_FLL_CONFIG 0x000001d4 + +#define REG_DSI_7nm_PHY_PLL_FLL_FREQ_ACQ_TIME 0x000001d8 + +#define REG_DSI_7nm_PHY_PLL_FLL_CODE0 0x000001dc + +#define REG_DSI_7nm_PHY_PLL_FLL_CODE1 0x000001e0 + +#define REG_DSI_7nm_PHY_PLL_FLL_GAIN0 0x000001e4 + +#define REG_DSI_7nm_PHY_PLL_FLL_GAIN1 0x000001e8 + +#define REG_DSI_7nm_PHY_PLL_SW_RESET 0x000001ec + +#define REG_DSI_7nm_PHY_PLL_FAST_PWRUP 0x000001f0 + +#define REG_DSI_7nm_PHY_PLL_LOCKTIME0 0x000001f4 + +#define REG_DSI_7nm_PHY_PLL_LOCKTIME1 0x000001f8 + +#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS_SEL 0x000001fc + +#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS0 0x00000200 + +#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS1 0x00000204 + +#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS2 0x00000208 + +#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS3 0x0000020c + +#define REG_DSI_7nm_PHY_PLL_ANALOG_FLL_CONTROL_OVERRIDES 0x00000210 + +#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG 0x00000214 + +#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE0_STATUS 0x00000218 + +#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE1_STATUS 0x0000021c + +#define REG_DSI_7nm_PHY_PLL_RESET_SM_STATUS 0x00000220 + +#define REG_DSI_7nm_PHY_PLL_TDC_OFFSET 0x00000224 + +#define REG_DSI_7nm_PHY_PLL_PS3_PWRDOWN_CONTROLS 0x00000228 + +#define REG_DSI_7nm_PHY_PLL_PS4_PWRDOWN_CONTROLS 0x0000022c + +#define REG_DSI_7nm_PHY_PLL_PLL_RST_CONTROLS 0x00000230 + +#define REG_DSI_7nm_PHY_PLL_GEAR_BAND_SELECT_CONTROLS 0x00000234 + +#define REG_DSI_7nm_PHY_PLL_PSM_CLK_CONTROLS 0x00000238 + +#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES_2 0x0000023c + +#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1 0x00000240 + +#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_2 0x00000244 + +#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1 0x00000248 + +#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_2 0x0000024c + +#define REG_DSI_7nm_PHY_PLL_CMODE_1 0x00000250 + +#define REG_DSI_7nm_PHY_PLL_CMODE_2 0x00000254 + +#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1 0x00000258 + +#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_2 0x0000025c + +#define REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE 0x00000260 #endif /* DSI_XML */ diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index f892f2cbe8bb..b2ff68a15791 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -265,9 +265,12 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { &msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1, &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_0, + &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_0, + &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1, &sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops}, - }; const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor) diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h index efd469d1db45..ade9b609c7d9 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -21,6 +21,8 @@ #define MSM_DSI_6G_VER_MINOR_V2_1_0 0x20010000 #define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000 #define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001 +#define MSM_DSI_6G_VER_MINOR_V2_3_0 0x20030000 +#define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000 #define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001 #define MSM_DSI_V2_VER_MINOR_8064 0x0 diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 009f5b843dd1..e8c1a727179c 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -364,6 +364,102 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, return 0; } +int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req) +{ + const unsigned long bit_rate = clk_req->bitclk_rate; + const unsigned long esc_rate = clk_req->escclk_rate; + s32 ui, ui_x8; + s32 tmax, tmin; + s32 pcnt_clk_prep = 50; + s32 pcnt_clk_zero = 2; + s32 pcnt_clk_trail = 30; + s32 pcnt_hs_prep = 50; + s32 pcnt_hs_zero = 10; + s32 pcnt_hs_trail = 30; + s32 pcnt_hs_exit = 10; + s32 coeff = 1000; /* Precision, should avoid overflow */ + s32 hb_en; + s32 temp; + + if (!bit_rate || !esc_rate) + return -EINVAL; + + hb_en = 0; + + ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); + ui_x8 = ui << 3; + + /* TODO: verify these calculations against latest downstream driver + * everything except clk_post/clk_pre uses calculations from v3 based + * on the downstream driver having the same calculations for v3 and v4 + */ + + temp = S_DIV_ROUND_UP(38 * coeff, ui_x8); + tmin = max_t(s32, temp, 0); + temp = (95 * coeff) / ui_x8; + tmax = max_t(s32, temp, 0); + timing->clk_prepare = linear_inter(tmax, tmin, pcnt_clk_prep, 0, false); + + temp = 300 * coeff - (timing->clk_prepare << 3) * ui; + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = (tmin > 255) ? 511 : 255; + timing->clk_zero = linear_inter(tmax, tmin, pcnt_clk_zero, 0, false); + + tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8); + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = (temp + 3 * ui) / ui_x8; + timing->clk_trail = linear_inter(tmax, tmin, pcnt_clk_trail, 0, false); + + temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8); + tmin = max_t(s32, temp, 0); + temp = (85 * coeff + 6 * ui) / ui_x8; + tmax = max_t(s32, temp, 0); + timing->hs_prepare = linear_inter(tmax, tmin, pcnt_hs_prep, 0, false); + + temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui; + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = 255; + timing->hs_zero = linear_inter(tmax, tmin, pcnt_hs_zero, 0, false); + + tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1; + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = (temp / ui_x8) - 1; + timing->hs_trail = linear_inter(tmax, tmin, pcnt_hs_trail, 0, false); + + temp = 50 * coeff + ((hb_en << 2) - 8) * ui; + timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8); + + tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1; + tmax = 255; + timing->hs_exit = linear_inter(tmax, tmin, pcnt_hs_exit, 0, false); + + /* recommended min + * = roundup((mipi_min_ns + t_hs_trail_ns)/(16*bit_clk_ns), 0) - 1 + */ + temp = 60 * coeff + 52 * ui + + (timing->hs_trail + 1) * ui_x8; + tmin = DIV_ROUND_UP(temp, 16 * ui) - 1; + tmax = 255; + timing->shared_timings.clk_post = linear_inter(tmax, tmin, 5, 0, false); + + /* recommended min + * val1 = (tlpx_ns + clk_prepare_ns + clk_zero_ns + hs_rqst_ns) + * val2 = (16 * bit_clk_ns) + * final = roundup(val1/val2, 0) - 1 + */ + temp = 52 * coeff + (timing->clk_prepare + timing->clk_zero + 1) * ui_x8 + 54 * coeff; + tmin = DIV_ROUND_UP(temp, 16 * ui) - 1; + tmax = 255; + timing->shared_timings.clk_pre = DIV_ROUND_UP((tmax - tmin) * 125, 10000) + tmin; + + DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d", + timing->shared_timings.clk_pre, timing->shared_timings.clk_post, + timing->clk_zero, timing->clk_trail, timing->clk_prepare, timing->hs_exit, + timing->hs_zero, timing->hs_prepare, timing->hs_trail, timing->hs_rqst); + + return 0; +} + void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, u32 bit_mask) { @@ -508,6 +604,12 @@ static const struct of_device_id dsi_phy_dt_match[] = { { .compatible = "qcom,dsi-phy-10nm-8998", .data = &dsi_phy_10nm_8998_cfgs }, #endif +#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY + { .compatible = "qcom,dsi-phy-7nm", + .data = &dsi_phy_7nm_cfgs }, + { .compatible = "qcom,dsi-phy-7nm-8150", + .data = &dsi_phy_7nm_8150_cfgs }, +#endif {} }; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index ef8672d7b123..d2bd74b6f357 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -48,10 +48,10 @@ extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs; struct msm_dsi_dphy_timing { - u32 clk_pre; - u32 clk_post; u32 clk_zero; u32 clk_trail; u32 clk_prepare; @@ -102,6 +102,8 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, struct msm_dsi_phy_clk_request *clk_req); int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, struct msm_dsi_phy_clk_request *clk_req); +int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req); void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, u32 bit_mask); int msm_dsi_phy_init_common(struct msm_dsi_phy *phy); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c new file mode 100644 index 000000000000..255b5f5ab2ce --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -0,0 +1,255 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * Copyright (c) 2018, The Linux Foundation + */ + +#include <linux/iopoll.h> + +#include "dsi_phy.h" +#include "dsi.xml.h" + +static int dsi_phy_hw_v4_0_is_pll_on(struct msm_dsi_phy *phy) +{ + void __iomem *base = phy->base; + u32 data = 0; + + data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL); + mb(); /* make sure read happened */ + + return (data & BIT(0)); +} + +static void dsi_phy_hw_v4_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable) +{ + void __iomem *lane_base = phy->lane_base; + int phy_lane_0 = 0; /* TODO: Support all lane swap configs */ + + /* + * LPRX and CDRX need to enabled only for physical data lane + * corresponding to the logical data lane 0 + */ + if (enable) + dsi_phy_write(lane_base + + REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3); + else + dsi_phy_write(lane_base + + REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0); +} + +static void dsi_phy_hw_v4_0_lane_settings(struct msm_dsi_phy *phy) +{ + int i; + const u8 tx_dctrl_0[] = { 0x00, 0x00, 0x00, 0x04, 0x01 }; + const u8 tx_dctrl_1[] = { 0x40, 0x40, 0x40, 0x46, 0x41 }; + const u8 *tx_dctrl = tx_dctrl_0; + void __iomem *lane_base = phy->lane_base; + + if (phy->cfg->type == MSM_DSI_PHY_7NM_V4_1) + tx_dctrl = tx_dctrl_1; + + /* Strength ctrl settings */ + for (i = 0; i < 5; i++) { + /* + * Disable LPRX and CDRX for all lanes. And later on, it will + * be only enabled for the physical data lane corresponding + * to the logical data lane 0 + */ + dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(i), 0); + dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_PIN_SWAP(i), 0x0); + } + + dsi_phy_hw_v4_0_config_lpcdrx(phy, true); + + /* other settings */ + for (i = 0; i < 5; i++) { + dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG0(i), 0x0); + dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG1(i), 0x0); + dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG2(i), i == 4 ? 0x8a : 0xa); + dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_TX_DCTRL(i), tx_dctrl[i]); + } +} + +static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, + struct msm_dsi_phy_clk_request *clk_req) +{ + int ret; + u32 status; + u32 const delay_us = 5; + u32 const timeout_us = 1000; + struct msm_dsi_dphy_timing *timing = &phy->timing; + void __iomem *base = phy->base; + bool less_than_1500_mhz; + u32 vreg_ctrl_0, glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0; + u32 glbl_rescode_top_ctrl, glbl_rescode_bot_ctrl; + u32 data; + + DBG(""); + + if (msm_dsi_dphy_timing_calc_v4(timing, clk_req)) { + DRM_DEV_ERROR(&phy->pdev->dev, + "%s: D-PHY timing calculation failed\n", __func__); + return -EINVAL; + } + + if (dsi_phy_hw_v4_0_is_pll_on(phy)) + pr_warn("PLL turned on before configuring PHY\n"); + + /* wait for REFGEN READY */ + ret = readl_poll_timeout_atomic(base + REG_DSI_7nm_PHY_CMN_PHY_STATUS, + status, (status & BIT(0)), + delay_us, timeout_us); + if (ret) { + pr_err("Ref gen not ready. Aborting\n"); + return -EINVAL; + } + + /* TODO: CPHY enable path (this is for DPHY only) */ + + /* Alter PHY configurations if data rate less than 1.5GHZ*/ + less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000); + + if (phy->cfg->type == MSM_DSI_PHY_7NM_V4_1) { + vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52; + glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00; + glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 : 0x3c; + glbl_str_swi_cal_sel_ctrl = 0x00; + glbl_hstx_str_ctrl_0 = 0x88; + } else { + vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59; + glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00; + glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88; + glbl_rescode_top_ctrl = 0x03; + glbl_rescode_bot_ctrl = 0x3c; + } + + /* de-assert digital and pll power down */ + data = BIT(6) | BIT(5); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data); + + /* Assert PLL core reset */ + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x00); + + /* turn off resync FIFO */ + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0x00); + + /* program CMN_CTRL_4 for minor_ver 2 chipsets*/ + data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0); + data = data & (0xf0); + if (data == 0x20) + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_4, 0x04); + + /* Configure PHY lane swap (TODO: we need to calculate this) */ + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG0, 0x21); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG1, 0x84); + + /* Enable LDO */ + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_0, vreg_ctrl_0); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1, 0x5c); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x00); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL, + glbl_str_swi_cal_sel_ctrl); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0, + glbl_hstx_str_ctrl_0); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0, 0x00); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL, + glbl_rescode_top_ctrl); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL, + glbl_rescode_bot_ctrl); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL, 0x55); + + /* Remove power down from all blocks */ + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x7f); + + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, 0x1f); + + /* Select full-rate mode */ + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40); + + ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase); + if (ret) { + DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n", + __func__, ret); + return ret; + } + + /* DSI PHY timings */ + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1, timing->clk_zero); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2, timing->clk_prepare); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3, timing->clk_trail); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, timing->hs_zero); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->hs_prepare); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, timing->hs_trail); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12, + timing->shared_timings.clk_pre); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13, + timing->shared_timings.clk_post); + + /* DSI lane settings */ + dsi_phy_hw_v4_0_lane_settings(phy); + + DBG("DSI%d PHY enabled", phy->id); + + return 0; +} + +static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy) +{ + /* TODO */ +} + +static int dsi_7nm_phy_init(struct msm_dsi_phy *phy) +{ + struct platform_device *pdev = phy->pdev; + + phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane", + "DSI_PHY_LANE"); + if (IS_ERR(phy->lane_base)) { + DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n", + __func__); + return -ENOMEM; + } + + return 0; +} + +const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = { + .type = MSM_DSI_PHY_7NM_V4_1, + .src_pll_truthtable = { {false, false}, {true, false} }, + .reg_cfg = { + .num = 1, + .regs = { + {"vdds", 36000, 32}, + }, + }, + .ops = { + .enable = dsi_7nm_phy_enable, + .disable = dsi_7nm_phy_disable, + .init = dsi_7nm_phy_init, + }, + .io_start = { 0xae94400, 0xae96400 }, + .num_dsi_phy = 2, +}; + +const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = { + .type = MSM_DSI_PHY_7NM, + .src_pll_truthtable = { {false, false}, {true, false} }, + .reg_cfg = { + .num = 1, + .regs = { + {"vdds", 36000, 32}, + }, + }, + .ops = { + .enable = dsi_7nm_phy_enable, + .disable = dsi_7nm_phy_disable, + .init = dsi_7nm_phy_init, + }, + .io_start = { 0xae94400, 0xae96400 }, + .num_dsi_phy = 2, +}; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c index 4a4aa3c61d71..a45fe95aff49 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c @@ -161,6 +161,10 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, case MSM_DSI_PHY_10NM: pll = msm_dsi_pll_10nm_init(pdev, id); break; + case MSM_DSI_PHY_7NM: + case MSM_DSI_PHY_7NM_V4_1: + pll = msm_dsi_pll_7nm_init(pdev, id); + break; default: pll = ERR_PTR(-ENXIO); break; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h index c6a3623f905d..3405982a092c 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h @@ -116,5 +116,15 @@ msm_dsi_pll_10nm_init(struct platform_device *pdev, int id) return ERR_PTR(-ENODEV); } #endif +#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY +struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id); +#else +static inline struct msm_dsi_pll * +msm_dsi_pll_7nm_init(struct platform_device *pdev, int id) +{ + return ERR_PTR(-ENODEV); +} +#endif + #endif /* __DSI_PLL_H__ */ diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c new file mode 100644 index 000000000000..de0dfb815125 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c @@ -0,0 +1,904 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * Copyright (c) 2018, The Linux Foundation + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/iopoll.h> + +#include "dsi_pll.h" +#include "dsi.xml.h" + +/* + * DSI PLL 7nm - clock diagram (eg: DSI0): TODO: updated CPHY diagram + * + * dsi0_pll_out_div_clk dsi0_pll_bit_clk + * | | + * | | + * +---------+ | +----------+ | +----+ + * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk + * +---------+ | +----------+ | +----+ + * | | + * | | dsi0_pll_by_2_bit_clk + * | | | + * | | +----+ | |\ dsi0_pclk_mux + * | |--| /2 |--o--| \ | + * | | +----+ | \ | +---------+ + * | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk + * |------------------------------| / +---------+ + * | +-----+ | / + * -----------| /4? |--o----------|/ + * +-----+ | | + * | |dsiclk_sel + * | + * dsi0_pll_post_out_div_clk + */ + +#define DSI_BYTE_PLL_CLK 0 +#define DSI_PIXEL_PLL_CLK 1 +#define NUM_PROVIDED_CLKS 2 + +#define VCO_REF_CLK_RATE 19200000 + +struct dsi_pll_regs { + u32 pll_prop_gain_rate; + u32 pll_lockdet_rate; + u32 decimal_div_start; + u32 frac_div_start_low; + u32 frac_div_start_mid; + u32 frac_div_start_high; + u32 pll_clock_inverters; + u32 ssc_stepsize_low; + u32 ssc_stepsize_high; + u32 ssc_div_per_low; + u32 ssc_div_per_high; + u32 ssc_adjper_low; + u32 ssc_adjper_high; + u32 ssc_control; +}; + +struct dsi_pll_config { + u32 ref_freq; + bool div_override; + u32 output_div; + bool ignore_frac; + bool disable_prescaler; + bool enable_ssc; + bool ssc_center; + u32 dec_bits; + u32 frac_bits; + u32 lock_timer; + u32 ssc_freq; + u32 ssc_offset; + u32 ssc_adj_per; + u32 thresh_cycles; + u32 refclk_cycles; +}; + +struct pll_7nm_cached_state { + unsigned long vco_rate; + u8 bit_clk_div; + u8 pix_clk_div; + u8 pll_out_div; + u8 pll_mux; +}; + +struct dsi_pll_7nm { + struct msm_dsi_pll base; + + int id; + struct platform_device *pdev; + + void __iomem *phy_cmn_mmio; + void __iomem *mmio; + + u64 vco_ref_clk_rate; + u64 vco_current_rate; + + /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */ + spinlock_t postdiv_lock; + + int vco_delay; + struct dsi_pll_config pll_configuration; + struct dsi_pll_regs reg_setup; + + /* private clocks: */ + struct clk_hw *out_div_clk_hw; + struct clk_hw *bit_clk_hw; + struct clk_hw *byte_clk_hw; + struct clk_hw *by_2_bit_clk_hw; + struct clk_hw *post_out_div_clk_hw; + struct clk_hw *pclk_mux_hw; + struct clk_hw *out_dsiclk_hw; + + /* clock-provider: */ + struct clk_hw_onecell_data *hw_data; + + struct pll_7nm_cached_state cached_state; + + enum msm_dsi_phy_usecase uc; + struct dsi_pll_7nm *slave; +}; + +#define to_pll_7nm(x) container_of(x, struct dsi_pll_7nm, base) + +/* + * Global list of private DSI PLL struct pointers. We need this for Dual DSI + * mode, where the master PLL's clk_ops needs access the slave's private data + */ +static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX]; + +static void dsi_pll_setup_config(struct dsi_pll_7nm *pll) +{ + struct dsi_pll_config *config = &pll->pll_configuration; + + config->ref_freq = pll->vco_ref_clk_rate; + config->output_div = 1; + config->dec_bits = 8; + config->frac_bits = 18; + config->lock_timer = 64; + config->ssc_freq = 31500; + config->ssc_offset = 4800; + config->ssc_adj_per = 2; + config->thresh_cycles = 32; + config->refclk_cycles = 256; + + config->div_override = false; + config->ignore_frac = false; + config->disable_prescaler = false; + + /* TODO: ssc enable */ + config->enable_ssc = false; + config->ssc_center = 0; +} + +static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll) +{ + struct dsi_pll_config *config = &pll->pll_configuration; + struct dsi_pll_regs *regs = &pll->reg_setup; + u64 fref = pll->vco_ref_clk_rate; + u64 pll_freq; + u64 divider; + u64 dec, dec_multiple; + u32 frac; + u64 multiplier; + + pll_freq = pll->vco_current_rate; + + if (config->disable_prescaler) + divider = fref; + else + divider = fref * 2; + + multiplier = 1 << config->frac_bits; + dec_multiple = div_u64(pll_freq * multiplier, divider); + div_u64_rem(dec_multiple, multiplier, &frac); + + dec = div_u64(dec_multiple, multiplier); + + if (pll->base.type != MSM_DSI_PHY_7NM_V4_1) + regs->pll_clock_inverters = 0x28; + else if (pll_freq <= 1000000000ULL) + regs->pll_clock_inverters = 0xa0; + else if (pll_freq <= 2500000000ULL) + regs->pll_clock_inverters = 0x20; + else if (pll_freq <= 3020000000ULL) + regs->pll_clock_inverters = 0x00; + else + regs->pll_clock_inverters = 0x40; + + regs->pll_lockdet_rate = config->lock_timer; + regs->decimal_div_start = dec; + regs->frac_div_start_low = (frac & 0xff); + regs->frac_div_start_mid = (frac & 0xff00) >> 8; + regs->frac_div_start_high = (frac & 0x30000) >> 16; +} + +#define SSC_CENTER BIT(0) +#define SSC_EN BIT(1) + +static void dsi_pll_calc_ssc(struct dsi_pll_7nm *pll) +{ + struct dsi_pll_config *config = &pll->pll_configuration; + struct dsi_pll_regs *regs = &pll->reg_setup; + u32 ssc_per; + u32 ssc_mod; + u64 ssc_step_size; + u64 frac; + + if (!config->enable_ssc) { + DBG("SSC not enabled\n"); + return; + } + + ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1; + ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1); + ssc_per -= ssc_mod; + + frac = regs->frac_div_start_low | + (regs->frac_div_start_mid << 8) | + (regs->frac_div_start_high << 16); + ssc_step_size = regs->decimal_div_start; + ssc_step_size *= (1 << config->frac_bits); + ssc_step_size += frac; + ssc_step_size *= config->ssc_offset; + ssc_step_size *= (config->ssc_adj_per + 1); + ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1)); + ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000); + + regs->ssc_div_per_low = ssc_per & 0xFF; + regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8; + regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF); + regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8); + regs->ssc_adjper_low = config->ssc_adj_per & 0xFF; + regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8; + + regs->ssc_control = config->ssc_center ? SSC_CENTER : 0; + + pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n", + regs->decimal_div_start, frac, config->frac_bits); + pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n", + ssc_per, (u32)ssc_step_size, config->ssc_adj_per); +} + +static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll) +{ + void __iomem *base = pll->mmio; + struct dsi_pll_regs *regs = &pll->reg_setup; + + if (pll->pll_configuration.enable_ssc) { + pr_debug("SSC is enabled\n"); + + pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1, + regs->ssc_stepsize_low); + pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1, + regs->ssc_stepsize_high); + pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1, + regs->ssc_div_per_low); + pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1, + regs->ssc_div_per_high); + pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1, + regs->ssc_adjper_low); + pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1, + regs->ssc_adjper_high); + pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL, + SSC_EN | regs->ssc_control); + } +} + +static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll) +{ + void __iomem *base = pll->mmio; + u8 analog_controls_five_1 = 0x01, vco_config_1 = 0x00; + + if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) { + if (pll->vco_current_rate >= 3100000000ULL) + analog_controls_five_1 = 0x03; + + if (pll->vco_current_rate < 1520000000ULL) + vco_config_1 = 0x08; + else if (pll->vco_current_rate < 2990000000ULL) + vco_config_1 = 0x01; + } + + pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1, + analog_controls_five_1); + pll_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1); + pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01); + pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03); + pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00); + pll_write(base + REG_DSI_7nm_PHY_PLL_DSM_DIVIDER, 0x00); + pll_write(base + REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e); + pll_write(base + REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40); + pll_write(base + REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba); + pll_write(base + REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c); + pll_write(base + REG_DSI_7nm_PHY_PLL_OUTDIV, 0x00); + pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE, 0x00); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x0a); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1, 0xc0); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x84); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80); + pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x29); + pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x2f); + pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT, 0x2a); + pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT, + pll->base.type == MSM_DSI_PHY_7NM_V4_1 ? 0x3f : 0x22); + + if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) { + pll_write(base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22); + if (pll->slave) + pll_write(pll->slave->mmio + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22); + } +} + +static void dsi_pll_commit(struct dsi_pll_7nm *pll) +{ + void __iomem *base = pll->mmio; + struct dsi_pll_regs *reg = &pll->reg_setup; + + pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12); + pll_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1, reg->decimal_div_start); + pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low); + pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid); + pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06); + pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */ + pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, reg->pll_clock_inverters); +} + +static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + + DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_7nm->id, rate, + parent_rate); + + pll_7nm->vco_current_rate = rate; + pll_7nm->vco_ref_clk_rate = VCO_REF_CLK_RATE; + + dsi_pll_setup_config(pll_7nm); + + dsi_pll_calc_dec_frac(pll_7nm); + + dsi_pll_calc_ssc(pll_7nm); + + dsi_pll_commit(pll_7nm); + + dsi_pll_config_hzindep_reg(pll_7nm); + + dsi_pll_ssc_commit(pll_7nm); + + /* flush, ensure all register writes are done*/ + wmb(); + + return 0; +} + +static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll) +{ + int rc; + u32 status = 0; + u32 const delay_us = 100; + u32 const timeout_us = 5000; + + rc = readl_poll_timeout_atomic(pll->mmio + + REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE, + status, + ((status & BIT(0)) > 0), + delay_us, + timeout_us); + if (rc) + pr_err("DSI PLL(%d) lock failed, status=0x%08x\n", + pll->id, status); + + return rc; +} + +static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll) +{ + u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0); + + pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0); + pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data & ~BIT(5)); + ndelay(250); +} + +static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll) +{ + u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0); + + pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data | BIT(5)); + pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0xc0); + ndelay(250); +} + +static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll) +{ + u32 data; + + data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data & ~BIT(5)); +} + +static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll) +{ + u32 data; + + pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x04); + + data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1, + data | BIT(5) | BIT(4)); +} + +static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll) +{ + /* + * Reset the PHY digital domain. This would be needed when + * coming out of a CX or analog rail power collapse while + * ensuring that the pads maintain LP00 or LP11 state + */ + pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, BIT(0)); + wmb(); /* Ensure that the reset is deasserted */ + pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, 0x0); + wmb(); /* Ensure that the reset is deasserted */ +} + +static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + int rc; + + dsi_pll_enable_pll_bias(pll_7nm); + if (pll_7nm->slave) + dsi_pll_enable_pll_bias(pll_7nm->slave); + + /* Start PLL */ + pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x01); + + /* + * ensure all PLL configurations are written prior to checking + * for PLL lock. + */ + wmb(); + + /* Check for PLL lock */ + rc = dsi_pll_7nm_lock_status(pll_7nm); + if (rc) { + pr_err("PLL(%d) lock failed\n", pll_7nm->id); + goto error; + } + + pll->pll_on = true; + + /* + * assert power on reset for PHY digital in case the PLL is + * enabled after CX of analog domain power collapse. This needs + * to be done before enabling the global clk. + */ + dsi_pll_phy_dig_reset(pll_7nm); + if (pll_7nm->slave) + dsi_pll_phy_dig_reset(pll_7nm->slave); + + dsi_pll_enable_global_clk(pll_7nm); + if (pll_7nm->slave) + dsi_pll_enable_global_clk(pll_7nm->slave); + +error: + return rc; +} + +static void dsi_pll_disable_sub(struct dsi_pll_7nm *pll) +{ + pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0); + dsi_pll_disable_pll_bias(pll); +} + +static void dsi_pll_7nm_vco_unprepare(struct clk_hw *hw) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + + /* + * To avoid any stray glitches while abruptly powering down the PLL + * make sure to gate the clock using the clock enable bit before + * powering down the PLL + */ + dsi_pll_disable_global_clk(pll_7nm); + pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0); + dsi_pll_disable_sub(pll_7nm); + if (pll_7nm->slave) { + dsi_pll_disable_global_clk(pll_7nm->slave); + dsi_pll_disable_sub(pll_7nm->slave); + } + /* flush, ensure all register writes are done */ + wmb(); + pll->pll_on = false; +} + +static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + void __iomem *base = pll_7nm->mmio; + u64 ref_clk = pll_7nm->vco_ref_clk_rate; + u64 vco_rate = 0x0; + u64 multiplier; + u32 frac; + u32 dec; + u64 pll_freq, tmp64; + + dec = pll_read(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1); + dec &= 0xff; + + frac = pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1); + frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1) & + 0xff) << 8); + frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1) & + 0x3) << 16); + + /* + * TODO: + * 1. Assumes prescaler is disabled + * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits) + */ + multiplier = 1 << 18; + pll_freq = dec * (ref_clk * 2); + tmp64 = (ref_clk * 2 * frac); + pll_freq += div_u64(tmp64, multiplier); + + vco_rate = pll_freq; + + DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x", + pll_7nm->id, (unsigned long)vco_rate, dec, frac); + + return (unsigned long)vco_rate; +} + +static const struct clk_ops clk_ops_dsi_pll_7nm_vco = { + .round_rate = msm_dsi_pll_helper_clk_round_rate, + .set_rate = dsi_pll_7nm_vco_set_rate, + .recalc_rate = dsi_pll_7nm_vco_recalc_rate, + .prepare = dsi_pll_7nm_vco_prepare, + .unprepare = dsi_pll_7nm_vco_unprepare, +}; + +/* + * PLL Callbacks + */ + +static void dsi_pll_7nm_save_state(struct msm_dsi_pll *pll) +{ + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + struct pll_7nm_cached_state *cached = &pll_7nm->cached_state; + void __iomem *phy_base = pll_7nm->phy_cmn_mmio; + u32 cmn_clk_cfg0, cmn_clk_cfg1; + + cached->pll_out_div = pll_read(pll_7nm->mmio + + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE); + cached->pll_out_div &= 0x3; + + cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0); + cached->bit_clk_div = cmn_clk_cfg0 & 0xf; + cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4; + + cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + cached->pll_mux = cmn_clk_cfg1 & 0x3; + + DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x", + pll_7nm->id, cached->pll_out_div, cached->bit_clk_div, + cached->pix_clk_div, cached->pll_mux); +} + +static int dsi_pll_7nm_restore_state(struct msm_dsi_pll *pll) +{ + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + struct pll_7nm_cached_state *cached = &pll_7nm->cached_state; + void __iomem *phy_base = pll_7nm->phy_cmn_mmio; + u32 val; + + val = pll_read(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE); + val &= ~0x3; + val |= cached->pll_out_div; + pll_write(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val); + + pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0, + cached->bit_clk_div | (cached->pix_clk_div << 4)); + + val = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + val &= ~0x3; + val |= cached->pll_mux; + pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, val); + + DBG("DSI PLL%d", pll_7nm->id); + + return 0; +} + +static int dsi_pll_7nm_set_usecase(struct msm_dsi_pll *pll, + enum msm_dsi_phy_usecase uc) +{ + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + void __iomem *base = pll_7nm->phy_cmn_mmio; + u32 data = 0x0; /* internal PLL */ + + DBG("DSI PLL%d", pll_7nm->id); + + switch (uc) { + case MSM_DSI_PHY_STANDALONE: + break; + case MSM_DSI_PHY_MASTER: + pll_7nm->slave = pll_7nm_list[(pll_7nm->id + 1) % DSI_MAX]; + break; + case MSM_DSI_PHY_SLAVE: + data = 0x1; /* external PLL */ + break; + default: + return -EINVAL; + } + + /* set PLL src */ + pll_write(base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, (data << 2)); + + pll_7nm->uc = uc; + + return 0; +} + +static int dsi_pll_7nm_get_provider(struct msm_dsi_pll *pll, + struct clk **byte_clk_provider, + struct clk **pixel_clk_provider) +{ + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + struct clk_hw_onecell_data *hw_data = pll_7nm->hw_data; + + DBG("DSI PLL%d", pll_7nm->id); + + if (byte_clk_provider) + *byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk; + if (pixel_clk_provider) + *pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk; + + return 0; +} + +static void dsi_pll_7nm_destroy(struct msm_dsi_pll *pll) +{ + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + struct device *dev = &pll_7nm->pdev->dev; + + DBG("DSI PLL%d", pll_7nm->id); + of_clk_del_provider(dev->of_node); + + clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw); + clk_hw_unregister_mux(pll_7nm->pclk_mux_hw); + clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw); + clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw); + clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw); + clk_hw_unregister_divider(pll_7nm->bit_clk_hw); + clk_hw_unregister_divider(pll_7nm->out_div_clk_hw); + clk_hw_unregister(&pll_7nm->base.clk_hw); +} + +/* + * The post dividers and mux clocks are created using the standard divider and + * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux + * state to follow the master PLL's divider/mux state. Therefore, we don't + * require special clock ops that also configure the slave PLL registers + */ +static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm) +{ + char clk_name[32], parent[32], vco_name[32]; + char parent2[32], parent3[32], parent4[32]; + struct clk_init_data vco_init = { + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .name = vco_name, + .flags = CLK_IGNORE_UNUSED, + .ops = &clk_ops_dsi_pll_7nm_vco, + }; + struct device *dev = &pll_7nm->pdev->dev; + struct clk_hw_onecell_data *hw_data; + struct clk_hw *hw; + int ret; + + DBG("DSI%d", pll_7nm->id); + + hw_data = devm_kzalloc(dev, sizeof(*hw_data) + + NUM_PROVIDED_CLKS * sizeof(struct clk_hw *), + GFP_KERNEL); + if (!hw_data) + return -ENOMEM; + + snprintf(vco_name, 32, "dsi%dvco_clk", pll_7nm->id); + pll_7nm->base.clk_hw.init = &vco_init; + + ret = clk_hw_register(dev, &pll_7nm->base.clk_hw); + if (ret) + return ret; + + snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_7nm->id); + snprintf(parent, 32, "dsi%dvco_clk", pll_7nm->id); + + hw = clk_hw_register_divider(dev, clk_name, + parent, CLK_SET_RATE_PARENT, + pll_7nm->mmio + + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, + 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto err_base_clk_hw; + } + + pll_7nm->out_div_clk_hw = hw; + + snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_7nm->id); + snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id); + + /* BIT CLK: DIV_CTRL_3_0 */ + hw = clk_hw_register_divider(dev, clk_name, parent, + CLK_SET_RATE_PARENT, + pll_7nm->phy_cmn_mmio + + REG_DSI_7nm_PHY_CMN_CLK_CFG0, + 0, 4, CLK_DIVIDER_ONE_BASED, + &pll_7nm->postdiv_lock); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto err_out_div_clk_hw; + } + + pll_7nm->bit_clk_hw = hw; + + snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_7nm->id); + snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id); + + /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */ + hw = clk_hw_register_fixed_factor(dev, clk_name, parent, + CLK_SET_RATE_PARENT, 1, 8); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto err_bit_clk_hw; + } + + pll_7nm->byte_clk_hw = hw; + hw_data->hws[DSI_BYTE_PLL_CLK] = hw; + + snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id); + snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id); + + hw = clk_hw_register_fixed_factor(dev, clk_name, parent, + 0, 1, 2); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto err_byte_clk_hw; + } + + pll_7nm->by_2_bit_clk_hw = hw; + + snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id); + snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id); + + hw = clk_hw_register_fixed_factor(dev, clk_name, parent, + 0, 1, 4); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto err_by_2_bit_clk_hw; + } + + pll_7nm->post_out_div_clk_hw = hw; + + snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->id); + snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id); + snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id); + snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->id); + snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id); + + hw = clk_hw_register_mux(dev, clk_name, + ((const char *[]){ + parent, parent2, parent3, parent4 + }), 4, 0, pll_7nm->phy_cmn_mmio + + REG_DSI_7nm_PHY_CMN_CLK_CFG1, + 0, 2, 0, NULL); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto err_post_out_div_clk_hw; + } + + pll_7nm->pclk_mux_hw = hw; + + snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->id); + snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->id); + + /* PIX CLK DIV : DIV_CTRL_7_4*/ + hw = clk_hw_register_divider(dev, clk_name, parent, + 0, pll_7nm->phy_cmn_mmio + + REG_DSI_7nm_PHY_CMN_CLK_CFG0, + 4, 4, CLK_DIVIDER_ONE_BASED, + &pll_7nm->postdiv_lock); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto err_pclk_mux_hw; + } + + pll_7nm->out_dsiclk_hw = hw; + hw_data->hws[DSI_PIXEL_PLL_CLK] = hw; + + hw_data->num = NUM_PROVIDED_CLKS; + pll_7nm->hw_data = hw_data; + + ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, + pll_7nm->hw_data); + if (ret) { + DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret); + goto err_dsiclk_hw; + } + + return 0; + +err_dsiclk_hw: + clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw); +err_pclk_mux_hw: + clk_hw_unregister_mux(pll_7nm->pclk_mux_hw); +err_post_out_div_clk_hw: + clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw); +err_by_2_bit_clk_hw: + clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw); +err_byte_clk_hw: + clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw); +err_bit_clk_hw: + clk_hw_unregister_divider(pll_7nm->bit_clk_hw); +err_out_div_clk_hw: + clk_hw_unregister_divider(pll_7nm->out_div_clk_hw); +err_base_clk_hw: + clk_hw_unregister(&pll_7nm->base.clk_hw); + + return ret; +} + +struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id) +{ + struct dsi_pll_7nm *pll_7nm; + struct msm_dsi_pll *pll; + int ret; + + pll_7nm = devm_kzalloc(&pdev->dev, sizeof(*pll_7nm), GFP_KERNEL); + if (!pll_7nm) + return ERR_PTR(-ENOMEM); + + DBG("DSI PLL%d", id); + + pll_7nm->pdev = pdev; + pll_7nm->id = id; + pll_7nm_list[id] = pll_7nm; + + pll_7nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY"); + if (IS_ERR_OR_NULL(pll_7nm->phy_cmn_mmio)) { + DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n"); + return ERR_PTR(-ENOMEM); + } + + pll_7nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL"); + if (IS_ERR_OR_NULL(pll_7nm->mmio)) { + DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n"); + return ERR_PTR(-ENOMEM); + } + + spin_lock_init(&pll_7nm->postdiv_lock); + + pll = &pll_7nm->base; + pll->min_rate = 1000000000UL; + pll->max_rate = 3500000000UL; + if (pll->type == MSM_DSI_PHY_7NM_V4_1) { + pll->min_rate = 600000000UL; + pll->max_rate = (unsigned long)5000000000ULL; + /* workaround for max rate overflowing on 32-bit builds: */ + pll->max_rate = max(pll->max_rate, 0xffffffffUL); + } + pll->get_provider = dsi_pll_7nm_get_provider; + pll->destroy = dsi_pll_7nm_destroy; + pll->save_state = dsi_pll_7nm_save_state; + pll->restore_state = dsi_pll_7nm_restore_state; + pll->set_usecase = dsi_pll_7nm_set_usecase; + + pll_7nm->vco_delay = 1; + + ret = pll_7nm_register(pll_7nm); + if (ret) { + DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret); + return ERR_PTR(ret); + } + + /* TODO: Remove this when we have proper display handover support */ + msm_dsi_pll_save_state(pll); + + return pll; +} diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 79333842f70a..49685571dc0e 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -453,15 +453,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) if (ret) goto err_msm_uninit; - if (!dev->dma_parms) { - dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), - GFP_KERNEL); - if (!dev->dma_parms) { - ret = -ENOMEM; - goto err_msm_uninit; - } - } - dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + dma_set_max_seg_size(dev, UINT_MAX); msm_gem_shrinker_init(ddev); @@ -594,9 +586,10 @@ static int context_init(struct drm_device *dev, struct drm_file *file) if (!ctx) return -ENOMEM; + kref_init(&ctx->ref); msm_submitqueue_init(dev, ctx); - ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL; + ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current); file->driver_priv = ctx; return 0; @@ -615,7 +608,7 @@ static int msm_open(struct drm_device *dev, struct drm_file *file) static void context_close(struct msm_file_private *ctx) { msm_submitqueue_close(ctx); - kfree(ctx); + msm_file_private_put(ctx); } static void msm_postclose(struct drm_device *dev, struct drm_file *file) @@ -779,18 +772,19 @@ static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, } static int msm_ioctl_gem_info_iova(struct drm_device *dev, - struct drm_gem_object *obj, uint64_t *iova) + struct drm_file *file, struct drm_gem_object *obj, + uint64_t *iova) { - struct msm_drm_private *priv = dev->dev_private; + struct msm_file_private *ctx = file->driver_priv; - if (!priv->gpu) + if (!ctx->aspace) return -EINVAL; /* * Don't pin the memory here - just get an address so that userspace can * be productive */ - return msm_gem_get_iova(obj, priv->gpu->aspace, iova); + return msm_gem_get_iova(obj, ctx->aspace, iova); } static int msm_ioctl_gem_info(struct drm_device *dev, void *data, @@ -829,7 +823,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, args->value = msm_gem_mmap_offset(obj); break; case MSM_INFO_GET_IOVA: - ret = msm_ioctl_gem_info_iova(dev, obj, &args->value); + ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value); break; case MSM_INFO_SET_NAME: /* length check should leave room for terminating null: */ @@ -1358,6 +1352,7 @@ static int __init msm_drm_register(void) msm_dsi_register(); msm_edp_register(); msm_hdmi_register(); + msm_dp_register(); adreno_register(); return platform_driver_register(&msm_platform_driver); } @@ -1366,6 +1361,7 @@ static void __exit msm_drm_unregister(void) { DBG("fini"); platform_driver_unregister(&msm_platform_driver); + msm_dp_unregister(); msm_hdmi_unregister(); adreno_unregister(); msm_edp_unregister(); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index af259b0573ea..b9dd8f8f4887 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -57,6 +57,7 @@ struct msm_file_private { struct list_head submitqueues; int queueid; struct msm_gem_address_space *aspace; + struct kref ref; }; enum msm_mdp_plane_property { @@ -159,6 +160,8 @@ struct msm_drm_private { /* DSI is shared by mdp4 and mdp5 */ struct msm_dsi *dsi[2]; + struct msm_dp *dp; + /* when we have more than one 'msm_gpu' these need to be an array: */ struct msm_gpu *gpu; struct msm_file_private *lastctx; @@ -248,6 +251,10 @@ int msm_gem_map_vma(struct msm_gem_address_space *aspace, void msm_gem_close_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma); + +struct msm_gem_address_space * +msm_gem_address_space_get(struct msm_gem_address_space *aspace); + void msm_gem_address_space_put(struct msm_gem_address_space *aspace); struct msm_gem_address_space * @@ -302,9 +309,8 @@ void msm_gem_put_vaddr(struct drm_gem_object *obj); int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); int msm_gem_sync_object(struct drm_gem_object *obj, struct msm_fence_context *fctx, bool exclusive); -void msm_gem_move_to_active(struct drm_gem_object *obj, - struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence); -void msm_gem_move_to_inactive(struct drm_gem_object *obj); +void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu); +void msm_gem_active_put(struct drm_gem_object *obj); int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); int msm_gem_cpu_fini(struct drm_gem_object *obj); void msm_gem_free_object(struct drm_gem_object *obj); @@ -378,6 +384,63 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, } #endif +#ifdef CONFIG_DRM_MSM_DP +int __init msm_dp_register(void); +void __exit msm_dp_unregister(void); +int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, + struct drm_encoder *encoder); +int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder); +int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder); +int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder); +void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +void msm_dp_irq_postinstall(struct msm_dp *dp_display); + +void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor); + +#else +static inline int __init msm_dp_register(void) +{ + return -EINVAL; +} +static inline void __exit msm_dp_unregister(void) +{ +} +static inline int msm_dp_modeset_init(struct msm_dp *dp_display, + struct drm_device *dev, + struct drm_encoder *encoder) +{ + return -EINVAL; +} +static inline int msm_dp_display_enable(struct msm_dp *dp, + struct drm_encoder *encoder) +{ + return -EINVAL; +} +static inline int msm_dp_display_disable(struct msm_dp *dp, + struct drm_encoder *encoder) +{ + return -EINVAL; +} +static inline void msm_dp_display_mode_set(struct msm_dp *dp, + struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ +} + +static inline void msm_dp_irq_postinstall(struct msm_dp *dp_display) +{ +} + +static inline void msm_dp_debugfs_init(struct msm_dp *dp_display, + struct drm_minor *minor) +{ +} + +#endif + void __init msm_mdp_register(void); void __exit msm_mdp_unregister(void); void __init msm_dpu_register(void); @@ -398,8 +461,9 @@ void msm_perf_debugfs_cleanup(struct msm_drm_private *priv); #else static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; } __printf(3, 4) -static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, - const char *fmt, ...) {} +static inline void msm_rd_dump_submit(struct msm_rd_state *rd, + struct msm_gem_submit *submit, + const char *fmt, ...) {} static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {} static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {} #endif @@ -419,7 +483,8 @@ struct msm_gpu_submitqueue; int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx); struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx, u32 id); -int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, +int msm_submitqueue_create(struct drm_device *drm, + struct msm_file_private *ctx, u32 prio, u32 flags, u32 *id); int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx, struct drm_msm_submitqueue_query *args); @@ -428,6 +493,26 @@ void msm_submitqueue_close(struct msm_file_private *ctx); void msm_submitqueue_destroy(struct kref *kref); +static inline void __msm_file_private_destroy(struct kref *kref) +{ + struct msm_file_private *ctx = container_of(kref, + struct msm_file_private, ref); + + msm_gem_address_space_put(ctx->aspace); + kfree(ctx); +} + +static inline void msm_file_private_put(struct msm_file_private *ctx) +{ + kref_put(&ctx->ref, __msm_file_private_destroy); +} + +static inline struct msm_file_private *msm_file_private_get( + struct msm_file_private *ctx) +{ + kref_get(&ctx->ref); + return ctx; +} #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index b2f49152b4d4..ec602113be78 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -52,26 +52,14 @@ static void sync_for_device(struct msm_gem_object *msm_obj) { struct device *dev = msm_obj->base.dev->dev; - if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { - dma_sync_sg_for_device(dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); - } else { - dma_map_sg(dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); - } + dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); } static void sync_for_cpu(struct msm_gem_object *msm_obj) { struct device *dev = msm_obj->base.dev->dev; - if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { - dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); - } else { - dma_unmap_sg(dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); - } + dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); } /* allocate pages from VRAM carveout, used when no IOMMU: */ @@ -126,7 +114,7 @@ static struct page **get_pages(struct drm_gem_object *obj) msm_obj->pages = p; - msm_obj->sgt = drm_prime_pages_to_sg(p, npages); + msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages); if (IS_ERR(msm_obj->sgt)) { void *ptr = ERR_CAST(msm_obj->sgt); @@ -753,31 +741,31 @@ int msm_gem_sync_object(struct drm_gem_object *obj, return 0; } -void msm_gem_move_to_active(struct drm_gem_object *obj, - struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) +void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) { struct msm_gem_object *msm_obj = to_msm_bo(obj); + WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); - msm_obj->gpu = gpu; - if (exclusive) - dma_resv_add_excl_fence(obj->resv, fence); - else - dma_resv_add_shared_fence(obj->resv, fence); - list_del_init(&msm_obj->mm_list); - list_add_tail(&msm_obj->mm_list, &gpu->active_list); + + if (!atomic_fetch_inc(&msm_obj->active_count)) { + msm_obj->gpu = gpu; + list_del_init(&msm_obj->mm_list); + list_add_tail(&msm_obj->mm_list, &gpu->active_list); + } } -void msm_gem_move_to_inactive(struct drm_gem_object *obj) +void msm_gem_active_put(struct drm_gem_object *obj) { - struct drm_device *dev = obj->dev; - struct msm_drm_private *priv = dev->dev_private; struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct msm_drm_private *priv = obj->dev->dev_private; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); - msm_obj->gpu = NULL; - list_del_init(&msm_obj->mm_list); - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + if (!atomic_dec_return(&msm_obj->active_count)) { + msm_obj->gpu = NULL; + list_del_init(&msm_obj->mm_list); + list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + } } int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) @@ -852,11 +840,28 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) seq_puts(m, " vmas:"); - list_for_each_entry(vma, &msm_obj->vmas, list) - seq_printf(m, " [%s: %08llx,%s,inuse=%d]", - vma->aspace != NULL ? vma->aspace->name : NULL, - vma->iova, vma->mapped ? "mapped" : "unmapped", + list_for_each_entry(vma, &msm_obj->vmas, list) { + const char *name, *comm; + if (vma->aspace) { + struct msm_gem_address_space *aspace = vma->aspace; + struct task_struct *task = + get_pid_task(aspace->pid, PIDTYPE_PID); + if (task) { + comm = kstrdup(task->comm, GFP_KERNEL); + } else { + comm = NULL; + } + name = aspace->name; + } else { + name = comm = NULL; + } + seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]", + name, comm ? ":" : "", comm ? comm : "", + vma->aspace, vma->iova, + vma->mapped ? "mapped" : "unmapped", vma->inuse); + kfree(comm); + } seq_puts(m, "\n"); } diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 972490b14ba5..a1bf741b9b89 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -24,6 +24,11 @@ struct msm_gem_address_space { spinlock_t lock; /* Protects drm_mm node allocation/removal */ struct msm_mmu *mmu; struct kref kref; + + /* For address spaces associated with a specific process, this + * will be non-NULL: + */ + struct pid *pid; }; struct msm_gem_vma { @@ -83,12 +88,14 @@ struct msm_gem_object { struct mutex lock; /* Protects resources associated with bo */ char name[32]; /* Identifier to print for the debugfs files */ + + atomic_t active_count; }; #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) static inline bool is_active(struct msm_gem_object *msm_obj) { - return msm_obj->gpu != NULL; + return atomic_read(&msm_obj->active_count); } static inline bool is_purgeable(struct msm_gem_object *msm_obj) @@ -142,6 +149,7 @@ struct msm_gem_submit { bool valid; /* true if no cmdstream patching needed */ bool in_rb; /* "sudo" mode, copy cmds into RB */ struct msm_ringbuffer *ring; + struct msm_file_private *ctx; unsigned int nr_cmds; unsigned int nr_bos; u32 ident; /* A "identifier" for the submit for logging */ diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index d7c8948427fe..515ef80816a0 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -19,7 +19,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */ return NULL; - return drm_prime_pages_to_sg(msm_obj->pages, npages); + return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages); } void *msm_gem_prime_vmap(struct drm_gem_object *obj) diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 722d61668a97..482576d7a39a 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -6,6 +6,7 @@ #include "msm_drv.h" #include "msm_gem.h" +#include "msm_gpu_trace.h" static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock) { @@ -87,7 +88,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) mutex_unlock(&dev->struct_mutex); if (freed > 0) - pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT); + trace_msm_gem_purge(freed << PAGE_SHIFT); return freed; } @@ -123,7 +124,7 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) *(unsigned long *)ptr += unmapped; if (unmapped > 0) - pr_info_ratelimited("Purging %u vmaps\n", unmapped); + trace_msm_gem_purge_vmaps(unmapped); return NOTIFY_DONE; } diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 8cb9aa15ff90..aa5c60a7132d 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -27,7 +27,7 @@ #define BO_PINNED 0x2000 static struct msm_gem_submit *submit_create(struct drm_device *dev, - struct msm_gpu *gpu, struct msm_gem_address_space *aspace, + struct msm_gpu *gpu, struct msm_gpu_submitqueue *queue, uint32_t nr_bos, uint32_t nr_cmds) { @@ -43,7 +43,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, return NULL; submit->dev = dev; - submit->aspace = aspace; + submit->aspace = queue->ctx->aspace; submit->gpu = gpu; submit->fence = NULL; submit->cmd = (void *)&submit->bos[nr_bos]; @@ -677,7 +677,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, } } - submit = submit_create(dev, gpu, ctx->aspace, queue, args->nr_bos, + submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds); if (!submit) { ret = -ENOMEM; @@ -785,7 +785,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, } } - msm_gpu_submit(gpu, submit, ctx); + msm_gpu_submit(gpu, submit); args->fence = submit->fence->seqno; diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 5f6a11211b64..f914ddbaea89 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -17,6 +17,7 @@ msm_gem_address_space_destroy(struct kref *kref) drm_mm_takedown(&aspace->mm); if (aspace->mmu) aspace->mmu->funcs->destroy(aspace->mmu); + put_pid(aspace->pid); kfree(aspace); } @@ -27,6 +28,15 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace) kref_put(&aspace->kref, msm_gem_address_space_destroy); } +struct msm_gem_address_space * +msm_gem_address_space_get(struct msm_gem_address_space *aspace) +{ + if (!IS_ERR_OR_NULL(aspace)) + kref_get(&aspace->kref); + + return aspace; +} + /* Actually unmap memory for the vma */ void msm_gem_purge_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma) @@ -78,8 +88,10 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, size, prot); - if (ret) + if (ret) { vma->mapped = false; + vma->inuse--; + } return ret; } diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 57ddc9438351..55d16489d0f3 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -24,7 +24,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { - struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev)); + struct msm_gpu *gpu = dev_to_gpu(dev); struct dev_pm_opp *opp; opp = devfreq_recommended_opp(dev, freq, flags); @@ -32,6 +32,8 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, if (IS_ERR(opp)) return PTR_ERR(opp); + trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp)); + if (gpu->funcs->gpu_set_freq) gpu->funcs->gpu_set_freq(gpu, opp); else @@ -45,7 +47,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, static int msm_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *status) { - struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev)); + struct msm_gpu *gpu = dev_to_gpu(dev); ktime_t time; if (gpu->funcs->gpu_get_freq) @@ -64,7 +66,7 @@ static int msm_devfreq_get_dev_status(struct device *dev, static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq) { - struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev)); + struct msm_gpu *gpu = dev_to_gpu(dev); if (gpu->funcs->gpu_get_freq) *freq = gpu->funcs->gpu_get_freq(gpu); @@ -200,6 +202,7 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu) int ret; DBG("%s", gpu->name); + trace_msm_gpu_resume(0); ret = enable_pwrrail(gpu); if (ret) @@ -225,6 +228,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu) int ret; DBG("%s", gpu->name); + trace_msm_gpu_suspend(0); devfreq_suspend_device(gpu->devfreq.devfreq); @@ -520,7 +524,7 @@ static void recover_worker(struct work_struct *work) struct msm_ringbuffer *ring = gpu->rb[i]; list_for_each_entry(submit, &ring->submits, node) - gpu->funcs->submit(gpu, submit, NULL); + gpu->funcs->submit(gpu, submit); } } @@ -694,8 +698,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; - /* move to inactive: */ - msm_gem_move_to_inactive(&msm_obj->base); + + msm_gem_active_put(&msm_obj->base); msm_gem_unpin_iova(&msm_obj->base, submit->aspace); drm_gem_object_put_locked(&msm_obj->base); } @@ -747,8 +751,7 @@ void msm_gpu_retire(struct msm_gpu *gpu) } /* add bo's to gpu's ring, and kick gpu: */ -void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, - struct msm_file_private *ctx) +void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) { struct drm_device *dev = gpu->dev; struct msm_drm_private *priv = dev->dev_private; @@ -771,6 +774,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; + struct drm_gem_object *drm_obj = &msm_obj->base; uint64_t iova; /* can't happen yet.. but when we add 2d support we'll have @@ -783,13 +787,15 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova); if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) - msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); + dma_resv_add_excl_fence(drm_obj->resv, submit->fence); else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) - msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence); + dma_resv_add_shared_fence(drm_obj->resv, submit->fence); + + msm_gem_active_get(drm_obj, gpu); } - gpu->funcs->submit(gpu, submit, ctx); - priv->lastctx = ctx; + gpu->funcs->submit(gpu, submit); + priv->lastctx = submit->queue->ctx; hangcheck_timer_reset(gpu); } @@ -824,6 +830,30 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu) return 0; } +/* Return a new address space for a msm_drm_private instance */ +struct msm_gem_address_space * +msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task) +{ + struct msm_gem_address_space *aspace = NULL; + if (!gpu) + return NULL; + + /* + * If the target doesn't support private address spaces then return + * the global one + */ + if (gpu->funcs->create_private_address_space) { + aspace = gpu->funcs->create_private_address_space(gpu); + if (!IS_ERR(aspace)) + aspace->pid = get_pid(task_pid(task)); + } + + if (IS_ERR_OR_NULL(aspace)) + aspace = msm_gem_address_space_get(gpu->aspace); + + return aspace; +} + int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, const char *name, struct msm_gpu_config *config) @@ -892,7 +922,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, gpu->gpu_cx = NULL; gpu->pdev = pdev; - platform_set_drvdata(pdev, gpu); + platform_set_drvdata(pdev, &gpu->adreno_smmu); msm_devfreq_init(gpu); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 37cffac4cbe3..6c9e1fdc1a76 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -7,6 +7,7 @@ #ifndef __MSM_GPU_H__ #define __MSM_GPU_H__ +#include <linux/adreno-smmu-priv.h> #include <linux/clk.h> #include <linux/interconnect.h> #include <linux/pm_opp.h> @@ -45,8 +46,7 @@ struct msm_gpu_funcs { int (*hw_init)(struct msm_gpu *gpu); int (*pm_suspend)(struct msm_gpu *gpu); int (*pm_resume)(struct msm_gpu *gpu); - void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit, - struct msm_file_private *ctx); + void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit); void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); irqreturn_t (*irq)(struct msm_gpu *irq); struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); @@ -66,6 +66,9 @@ struct msm_gpu_funcs { void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp); struct msm_gem_address_space *(*create_address_space) (struct msm_gpu *gpu, struct platform_device *pdev); + struct msm_gem_address_space *(*create_private_address_space) + (struct msm_gpu *gpu); + uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); }; struct msm_gpu { @@ -74,6 +77,8 @@ struct msm_gpu { struct platform_device *pdev; const struct msm_gpu_funcs *funcs; + struct adreno_smmu_priv adreno_smmu; + /* performance counters (hw & sw): */ spinlock_t perf_lock; bool perfcntr_active; @@ -144,6 +149,12 @@ struct msm_gpu { bool hw_apriv; }; +static inline struct msm_gpu *dev_to_gpu(struct device *dev) +{ + struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev); + return container_of(adreno_smmu, struct msm_gpu, adreno_smmu); +} + /* It turns out that all targets use the same ringbuffer size */ #define MSM_GPU_RINGBUFFER_SZ SZ_32K #define MSM_GPU_RINGBUFFER_BLKSIZE 32 @@ -184,6 +195,7 @@ struct msm_gpu_submitqueue { u32 flags; u32 prio; int faults; + struct msm_file_private *ctx; struct list_head node; struct kref ref; }; @@ -283,13 +295,15 @@ int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs); void msm_gpu_retire(struct msm_gpu *gpu); -void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, - struct msm_file_private *ctx); +void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit); int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, const char *name, struct msm_gpu_config *config); +struct msm_gem_address_space * +msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task); + void msm_gpu_cleanup(struct msm_gpu *gpu); struct msm_gpu *adreno_load_gpu(struct drm_device *dev); diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h index 122b84789238..03e0c2536b94 100644 --- a/drivers/gpu/drm/msm/msm_gpu_trace.h +++ b/drivers/gpu/drm/msm/msm_gpu_trace.h @@ -83,6 +83,89 @@ TRACE_EVENT(msm_gpu_submit_retired, __entry->start_ticks, __entry->end_ticks) ); + +TRACE_EVENT(msm_gpu_freq_change, + TP_PROTO(u32 freq), + TP_ARGS(freq), + TP_STRUCT__entry( + __field(u32, freq) + ), + TP_fast_assign( + /* trace freq in MHz to match intel_gpu_freq_change, to make life easier + * for userspace + */ + __entry->freq = DIV_ROUND_UP(freq, 1000000); + ), + TP_printk("new_freq=%u", __entry->freq) +); + + +TRACE_EVENT(msm_gmu_freq_change, + TP_PROTO(u32 freq, u32 perf_index), + TP_ARGS(freq, perf_index), + TP_STRUCT__entry( + __field(u32, freq) + __field(u32, perf_index) + ), + TP_fast_assign( + __entry->freq = freq; + __entry->perf_index = perf_index; + ), + TP_printk("freq=%u, perf_index=%u", __entry->freq, __entry->perf_index) +); + + +TRACE_EVENT(msm_gem_purge, + TP_PROTO(u32 bytes), + TP_ARGS(bytes), + TP_STRUCT__entry( + __field(u32, bytes) + ), + TP_fast_assign( + __entry->bytes = bytes; + ), + TP_printk("Purging %u bytes", __entry->bytes) +); + + +TRACE_EVENT(msm_gem_purge_vmaps, + TP_PROTO(u32 unmapped), + TP_ARGS(unmapped), + TP_STRUCT__entry( + __field(u32, unmapped) + ), + TP_fast_assign( + __entry->unmapped = unmapped; + ), + TP_printk("Purging %u vmaps", __entry->unmapped) +); + + +TRACE_EVENT(msm_gpu_suspend, + TP_PROTO(int dummy), + TP_ARGS(dummy), + TP_STRUCT__entry( + __field(u32, dummy) + ), + TP_fast_assign( + __entry->dummy = dummy; + ), + TP_printk("%u", __entry->dummy) +); + + +TRACE_EVENT(msm_gpu_resume, + TP_PROTO(int dummy), + TP_ARGS(dummy), + TP_STRUCT__entry( + __field(u32, dummy) + ), + TP_fast_assign( + __entry->dummy = dummy; + ), + TP_printk("%u", __entry->dummy) +); + #endif #undef TRACE_INCLUDE_PATH diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c index 310a31b05faa..379496186c7f 100644 --- a/drivers/gpu/drm/msm/msm_gpummu.c +++ b/drivers/gpu/drm/msm/msm_gpummu.c @@ -30,21 +30,20 @@ static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova, { struct msm_gpummu *gpummu = to_msm_gpummu(mmu); unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE; - struct scatterlist *sg; + struct sg_dma_page_iter dma_iter; unsigned prot_bits = 0; - unsigned i, j; if (prot & IOMMU_WRITE) prot_bits |= 1; if (prot & IOMMU_READ) prot_bits |= 2; - for_each_sg(sgt->sgl, sg, sgt->nents, i) { - dma_addr_t addr = sg->dma_address; - for (j = 0; j < sg->length / GPUMMU_PAGE_SIZE; j++, idx++) { - gpummu->table[idx] = addr | prot_bits; - addr += GPUMMU_PAGE_SIZE; - } + for_each_sgtable_dma_page(sgt, &dma_iter, 0) { + dma_addr_t addr = sg_page_iter_dma_address(&dma_iter); + int i; + + for (i = 0; i < PAGE_SIZE; i += GPUMMU_PAGE_SIZE) + gpummu->table[idx++] = (addr + i) | prot_bits; } /* we can improve by deferring flush for multiple map() */ @@ -102,7 +101,7 @@ struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu) } gpummu->gpu = gpu; - msm_mmu_init(&gpummu->base, dev, &funcs); + msm_mmu_init(&gpummu->base, dev, &funcs, MSM_MMU_GPUMMU); return &gpummu->base; } diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 3a381a9674c9..3a83ffdb3b90 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -4,15 +4,210 @@ * Author: Rob Clark <robdclark@gmail.com> */ +#include <linux/adreno-smmu-priv.h> +#include <linux/io-pgtable.h> #include "msm_drv.h" #include "msm_mmu.h" struct msm_iommu { struct msm_mmu base; struct iommu_domain *domain; + atomic_t pagetables; }; + #define to_msm_iommu(x) container_of(x, struct msm_iommu, base) +struct msm_iommu_pagetable { + struct msm_mmu base; + struct msm_mmu *parent; + struct io_pgtable_ops *pgtbl_ops; + phys_addr_t ttbr; + u32 asid; +}; +static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu) +{ + return container_of(mmu, struct msm_iommu_pagetable, base); +} + +static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova, + size_t size) +{ + struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); + struct io_pgtable_ops *ops = pagetable->pgtbl_ops; + size_t unmapped = 0; + + /* Unmap the block one page at a time */ + while (size) { + unmapped += ops->unmap(ops, iova, 4096, NULL); + iova += 4096; + size -= 4096; + } + + iommu_flush_tlb_all(to_msm_iommu(pagetable->parent)->domain); + + return (unmapped == size) ? 0 : -EINVAL; +} + +static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, + struct sg_table *sgt, size_t len, int prot) +{ + struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); + struct io_pgtable_ops *ops = pagetable->pgtbl_ops; + struct scatterlist *sg; + size_t mapped = 0; + u64 addr = iova; + unsigned int i; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + size_t size = sg->length; + phys_addr_t phys = sg_phys(sg); + + /* Map the block one page at a time */ + while (size) { + if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) { + msm_iommu_pagetable_unmap(mmu, iova, mapped); + return -EINVAL; + } + + phys += 4096; + addr += 4096; + size -= 4096; + mapped += 4096; + } + } + + return 0; +} + +static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu) +{ + struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); + struct msm_iommu *iommu = to_msm_iommu(pagetable->parent); + struct adreno_smmu_priv *adreno_smmu = + dev_get_drvdata(pagetable->parent->dev); + + /* + * If this is the last attached pagetable for the parent, + * disable TTBR0 in the arm-smmu driver + */ + if (atomic_dec_return(&iommu->pagetables) == 0) + adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL); + + free_io_pgtable_ops(pagetable->pgtbl_ops); + kfree(pagetable); +} + +int msm_iommu_pagetable_params(struct msm_mmu *mmu, + phys_addr_t *ttbr, int *asid) +{ + struct msm_iommu_pagetable *pagetable; + + if (mmu->type != MSM_MMU_IOMMU_PAGETABLE) + return -EINVAL; + + pagetable = to_pagetable(mmu); + + if (ttbr) + *ttbr = pagetable->ttbr; + + if (asid) + *asid = pagetable->asid; + + return 0; +} + +static const struct msm_mmu_funcs pagetable_funcs = { + .map = msm_iommu_pagetable_map, + .unmap = msm_iommu_pagetable_unmap, + .destroy = msm_iommu_pagetable_destroy, +}; + +static void msm_iommu_tlb_flush_all(void *cookie) +{ +} + +static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ +} + +static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, void *cookie) +{ +} + +static const struct iommu_flush_ops null_tlb_ops = { + .tlb_flush_all = msm_iommu_tlb_flush_all, + .tlb_flush_walk = msm_iommu_tlb_flush_walk, + .tlb_flush_leaf = msm_iommu_tlb_flush_walk, + .tlb_add_page = msm_iommu_tlb_add_page, +}; + +struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) +{ + struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev); + struct msm_iommu *iommu = to_msm_iommu(parent); + struct msm_iommu_pagetable *pagetable; + const struct io_pgtable_cfg *ttbr1_cfg = NULL; + struct io_pgtable_cfg ttbr0_cfg; + int ret; + + /* Get the pagetable configuration from the domain */ + if (adreno_smmu->cookie) + ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie); + if (!ttbr1_cfg) + return ERR_PTR(-ENODEV); + + pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL); + if (!pagetable) + return ERR_PTR(-ENOMEM); + + msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs, + MSM_MMU_IOMMU_PAGETABLE); + + /* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */ + ttbr0_cfg = *ttbr1_cfg; + + /* The incoming cfg will have the TTBR1 quirk enabled */ + ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1; + ttbr0_cfg.tlb = &null_tlb_ops; + + pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, + &ttbr0_cfg, iommu->domain); + + if (!pagetable->pgtbl_ops) { + kfree(pagetable); + return ERR_PTR(-ENOMEM); + } + + /* + * If this is the first pagetable that we've allocated, send it back to + * the arm-smmu driver as a trigger to set up TTBR0 + */ + if (atomic_inc_return(&iommu->pagetables) == 1) { + ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg); + if (ret) { + free_io_pgtable_ops(pagetable->pgtbl_ops); + kfree(pagetable); + return ERR_PTR(ret); + } + } + + /* Needed later for TLB flush */ + pagetable->parent = parent; + pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr; + + /* + * TODO we would like each set of page tables to have a unique ASID + * to optimize TLB invalidation. But iommu_flush_tlb_all() will + * end up flushing the ASID used for TTBR1 pagetables, which is not + * what we want. So for now just use the same ASID as TTBR1. + */ + pagetable->asid = 0; + + return &pagetable->base; +} + static int msm_fault_handler(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags, void *arg) { @@ -36,7 +231,11 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, struct msm_iommu *iommu = to_msm_iommu(mmu); size_t ret; - ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot); + /* The arm-smmu driver expects the addresses to be sign extended */ + if (iova & BIT_ULL(48)) + iova |= GENMASK_ULL(63, 49); + + ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot); WARN_ON(!ret); return (ret == len) ? 0 : -EINVAL; @@ -46,6 +245,9 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len) { struct msm_iommu *iommu = to_msm_iommu(mmu); + if (iova & BIT_ULL(48)) + iova |= GENMASK_ULL(63, 49); + iommu_unmap(iommu->domain, iova, len); return 0; @@ -78,9 +280,11 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain) return ERR_PTR(-ENOMEM); iommu->domain = domain; - msm_mmu_init(&iommu->base, dev, &funcs); + msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU); iommu_set_fault_handler(domain, msm_fault_handler, iommu); + atomic_set(&iommu->pagetables, 0); + ret = iommu_attach_device(iommu->domain, dev); if (ret) { kfree(iommu); diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index 3a534ee59bf6..61ade89d9e48 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -17,18 +17,26 @@ struct msm_mmu_funcs { void (*destroy)(struct msm_mmu *mmu); }; +enum msm_mmu_type { + MSM_MMU_GPUMMU, + MSM_MMU_IOMMU, + MSM_MMU_IOMMU_PAGETABLE, +}; + struct msm_mmu { const struct msm_mmu_funcs *funcs; struct device *dev; int (*handler)(void *arg, unsigned long iova, int flags); void *arg; + enum msm_mmu_type type; }; static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, - const struct msm_mmu_funcs *funcs) + const struct msm_mmu_funcs *funcs, enum msm_mmu_type type) { mmu->dev = dev; mmu->funcs = funcs; + mmu->type = type; } struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain); @@ -41,7 +49,13 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, mmu->handler = handler; } +struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent); + void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base, dma_addr_t *tran_error); + +int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr, + int *asid); + #endif /* __MSM_MMU_H__ */ diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h index 7764373d0ed2..0987d6bf848c 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.h +++ b/drivers/gpu/drm/msm/msm_ringbuffer.h @@ -31,6 +31,7 @@ struct msm_rbmemptrs { volatile uint32_t fence; volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT]; + volatile u64 ttbr0; }; struct msm_ringbuffer { diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c index a1d94be7883a..c3d206105d28 100644 --- a/drivers/gpu/drm/msm/msm_submitqueue.c +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -12,6 +12,8 @@ void msm_submitqueue_destroy(struct kref *kref) struct msm_gpu_submitqueue *queue = container_of(kref, struct msm_gpu_submitqueue, ref); + msm_file_private_put(queue->ctx); + kfree(queue); } @@ -49,8 +51,10 @@ void msm_submitqueue_close(struct msm_file_private *ctx) * No lock needed in close and there won't * be any more user ioctls coming our way */ - list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) + list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) { + list_del(&entry->node); msm_submitqueue_put(entry); + } } int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, @@ -81,6 +85,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, write_lock(&ctx->queuelock); + queue->ctx = msm_file_private_get(ctx); queue->id = ctx->queueid++; if (id) diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 8c549c3931af..35122aef037b 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -21,6 +21,7 @@ #include <drm/drm_connector.h> #include <drm/drm_drv.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_irq.h> @@ -81,8 +82,26 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb) clk_disable_unprepare(mxsfb->clk_axi); } +static struct drm_framebuffer * +mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + const struct drm_format_info *info; + + info = drm_get_format_info(dev, mode_cmd); + if (!info) + return ERR_PTR(-EINVAL); + + if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) { + dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n"); + return ERR_PTR(-EINVAL); + } + + return drm_gem_fb_create(dev, file_priv, mode_cmd); +} + static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = { - .fb_create = drm_gem_fb_create, + .fb_create = mxsfb_fb_create, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 6416b6907aeb..f9e962fd94d0 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c @@ -615,7 +615,7 @@ nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); int ret; - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (ret == 0) { if (disp->image[nv_crtc->index]) nouveau_bo_unpin(disp->image[nv_crtc->index]); @@ -1172,7 +1172,7 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, return -ENOMEM; if (new_bo != old_bo) { - ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM, true); + ret = nouveau_bo_pin(new_bo, NOUVEAU_GEM_DOMAIN_VRAM, true); if (ret) goto fail_free; } @@ -1336,10 +1336,11 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num) drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); ret = nouveau_bo_new(&nouveau_drm(dev)->client, 64*64*4, 0x100, - TTM_PL_FLAG_VRAM, 0, 0x0000, NULL, NULL, + NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL, &nv_crtc->cursor.nvbo); if (!ret) { - ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, + NOUVEAU_GEM_DOMAIN_VRAM, false); if (!ret) { ret = nouveau_bo_map(nv_crtc->cursor.nvbo); if (ret) diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 3ee836dc5058..7739f46470d3 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -134,7 +134,7 @@ nv04_display_init(struct drm_device *dev, bool resume, bool runtime) if (!fb || !fb->obj[0]) continue; nvbo = nouveau_gem_object(fb->obj[0]); - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, true); if (ret) NV_ERROR(drm, "Could not pin framebuffer\n"); } @@ -144,7 +144,8 @@ nv04_display_init(struct drm_device *dev, bool resume, bool runtime) if (!nv_crtc->cursor.nvbo) continue; - ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, true); + ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, + NOUVEAU_GEM_DOMAIN_VRAM, true); if (!ret && nv_crtc->cursor.set_offset) ret = nouveau_bo_map(nv_crtc->cursor.nvbo); if (ret) diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c index 193ba9498f3d..37e63e98cd08 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c +++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c @@ -142,7 +142,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, return ret; nvbo = nouveau_gem_object(fb->obj[0]); - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (ret) return ret; @@ -387,7 +387,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, return ret; nvbo = nouveau_gem_object(fb->obj[0]); - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 7799530e07c1..b111fe24a06b 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2069,6 +2069,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_wait_for_fences(dev, state, false); drm_atomic_helper_wait_for_dependencies(state); drm_atomic_helper_update_legacy_modeset_state(dev, state); + drm_atomic_helper_calc_timestamping_constants(state); if (atom->lock_core) mutex_lock(&disp->mutex); @@ -2622,10 +2623,11 @@ nv50_display_create(struct drm_device *dev) dev->mode_config.normalize_zpos = true; /* small shared memory area we use for notifiers and semaphores */ - ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, + ret = nouveau_bo_new(&drm->client, 4096, 0x1000, + NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL, &disp->sync); if (!ret) { - ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true); + ret = nouveau_bo_pin(disp->sync, NOUVEAU_GEM_DOMAIN_VRAM, true); if (!ret) { ret = nouveau_bo_map(disp->sync); if (ret) diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 447ecc9fec42..0356474ad6f6 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -542,7 +542,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) return 0; nvbo = nouveau_gem_object(fb->obj[0]); - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, true); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 21537ca1dd39..9a5be6f32424 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -328,7 +328,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART, 0, 0, &chan->ntfy); if (ret == 0) - ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false); + ret = nouveau_bo_pin(chan->ntfy, NOUVEAU_GEM_DOMAIN_GART, + false); if (ret) goto done; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 9140387f30dc..2ee75646ad6f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -44,6 +44,9 @@ #include <nvif/if500b.h> #include <nvif/if900b.h> +static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, + struct ttm_resource *reg); + /* * NV10-NV40 tiling helpers */ @@ -137,6 +140,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) struct nouveau_bo *nvbo = nouveau_bo(bo); WARN_ON(nvbo->pin_refcnt > 0); + nouveau_bo_del_io_reserve_lru(bo); nv10_bo_put_tile_region(dev, nvbo->tile, NULL); /* @@ -158,8 +162,7 @@ roundup_64(u64 x, u32 y) } static void -nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, - int *align, u64 *size) +nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct nvif_device *device = &drm->client.device; @@ -192,7 +195,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, } struct nouveau_bo * -nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags, +nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, u32 tile_mode, u32 tile_flags) { struct nouveau_drm *drm = cli->drm; @@ -218,7 +221,7 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags, * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated * into in nouveau_gem_new(). */ - if (flags & TTM_PL_FLAG_UNCACHED) { + if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) { /* Determine if we can get a cache-coherent map, forcing * uncached mapping if we can't. */ @@ -258,9 +261,9 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags, * Skip page sizes that can't support needed domains. */ if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && - (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram) + (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) continue; - if ((flags & TTM_PL_FLAG_TT) && + if ((domain & NOUVEAU_GEM_DOMAIN_GART) && (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) continue; @@ -287,13 +290,13 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags, } nvbo->page = vmm->page[pi].shift; - nouveau_bo_fixup_align(nvbo, flags, align, size); + nouveau_bo_fixup_align(nvbo, align, size); return nvbo; } int -nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags, +nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain, struct sg_table *sg, struct dma_resv *robj) { int type = sg ? ttm_bo_type_sg : ttm_bo_type_device; @@ -303,7 +306,8 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags, acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo)); nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; - nouveau_bo_placement_set(nvbo, flags, 0); + nouveau_bo_placement_set(nvbo, domain, 0); + INIT_LIST_HEAD(&nvbo->io_reserve_lru); ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type, &nvbo->placement, align >> PAGE_SHIFT, false, @@ -318,19 +322,19 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags, int nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, - uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, + uint32_t domain, uint32_t tile_mode, uint32_t tile_flags, struct sg_table *sg, struct dma_resv *robj, struct nouveau_bo **pnvbo) { struct nouveau_bo *nvbo; int ret; - nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode, + nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode, tile_flags); if (IS_ERR(nvbo)) return PTR_ERR(nvbo); - ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj); + ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj); if (ret) return ret; @@ -339,27 +343,49 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, } static void -set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags) +set_placement_list(struct nouveau_drm *drm, struct ttm_place *pl, unsigned *n, + uint32_t domain, uint32_t flags) { *n = 0; - if (type & TTM_PL_FLAG_VRAM) - pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags; - if (type & TTM_PL_FLAG_TT) - pl[(*n)++].flags = TTM_PL_FLAG_TT | flags; - if (type & TTM_PL_FLAG_SYSTEM) - pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags; + if (domain & NOUVEAU_GEM_DOMAIN_VRAM) { + struct nvif_mmu *mmu = &drm->client.mmu; + const u8 type = mmu->type[drm->ttm.type_vram].type; + + pl[*n].mem_type = TTM_PL_VRAM; + pl[*n].flags = flags & ~TTM_PL_FLAG_CACHED; + + /* Some BARs do not support being ioremapped WC */ + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && + type & NVIF_MEM_UNCACHED) + pl[*n].flags &= ~TTM_PL_FLAG_WC; + + (*n)++; + } + if (domain & NOUVEAU_GEM_DOMAIN_GART) { + pl[*n].mem_type = TTM_PL_TT; + pl[*n].flags = flags; + + if (drm->agp.bridge) + pl[*n].flags &= ~TTM_PL_FLAG_CACHED; + + (*n)++; + } + if (domain & NOUVEAU_GEM_DOMAIN_CPU) { + pl[*n].mem_type = TTM_PL_SYSTEM; + pl[(*n)++].flags = flags; + } } static void -set_placement_range(struct nouveau_bo *nvbo, uint32_t type) +set_placement_range(struct nouveau_bo *nvbo, uint32_t domain) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT; unsigned i, fpfn, lpfn; if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && - nvbo->mode && (type & TTM_PL_FLAG_VRAM) && + nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) && nvbo->bo.mem.num_pages < vram_pages / 4) { /* * Make sure that the color and depth buffers are handled @@ -386,26 +412,28 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type) } void -nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) +nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain, + uint32_t busy) { + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_placement *pl = &nvbo->placement; uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED : TTM_PL_MASK_CACHING) | (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); pl->placement = nvbo->placements; - set_placement_list(nvbo->placements, &pl->num_placement, - type, flags); + set_placement_list(drm, nvbo->placements, &pl->num_placement, + domain, flags); pl->busy_placement = nvbo->busy_placements; - set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, - type | busy, flags); + set_placement_list(drm, nvbo->busy_placements, &pl->num_busy_placement, + domain | busy, flags); - set_placement_range(nvbo, type); + set_placement_range(nvbo, domain); } int -nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) +nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; @@ -417,7 +445,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) return ret; if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && - memtype == TTM_PL_FLAG_VRAM && contig) { + domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) { if (!nvbo->contig) { nvbo->contig = true; force = true; @@ -426,10 +454,22 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) } if (nvbo->pin_refcnt) { - if (!(memtype & (1 << bo->mem.mem_type)) || evict) { + bool error = evict; + + switch (bo->mem.mem_type) { + case TTM_PL_VRAM: + error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM); + break; + case TTM_PL_TT: + error |= !(domain & NOUVEAU_GEM_DOMAIN_GART); + default: + break; + } + + if (error) { NV_ERROR(drm, "bo %p pinned elsewhere: " "0x%08x vs 0x%08x\n", bo, - 1 << bo->mem.mem_type, memtype); + bo->mem.mem_type, domain); ret = -EBUSY; } nvbo->pin_refcnt++; @@ -437,14 +477,14 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) } if (evict) { - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0); + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); ret = nouveau_bo_validate(nvbo, false, false); if (ret) goto out; } nvbo->pin_refcnt++; - nouveau_bo_placement_set(nvbo, memtype, 0); + nouveau_bo_placement_set(nvbo, domain, 0); /* drop pin_refcnt temporarily, so we don't trip the assertion * in nouveau_bo_move() that makes sure we're not trying to @@ -490,7 +530,16 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) if (ref) goto out; - nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); + switch (bo->mem.mem_type) { + case TTM_PL_VRAM: + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0); + break; + case TTM_PL_TT: + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); + break; + default: + break; + } ret = nouveau_bo_validate(nvbo, false, false); if (ret == 0) { @@ -574,6 +623,26 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) PAGE_SIZE, DMA_FROM_DEVICE); } +void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo) +{ + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); + + mutex_lock(&drm->ttm.io_reserve_mutex); + list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); + mutex_unlock(&drm->ttm.io_reserve_mutex); +} + +void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo) +{ + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); + + mutex_lock(&drm->ttm.io_reserve_mutex); + list_del_init(&nvbo->io_reserve_lru); + mutex_unlock(&drm->ttm.io_reserve_mutex); +} + int nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, bool no_wait_gpu) @@ -646,6 +715,36 @@ nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) return nouveau_sgdma_create_ttm(bo, page_flags); } +static int +nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, + struct ttm_resource *reg) +{ +#if IS_ENABLED(CONFIG_AGP) + struct nouveau_drm *drm = nouveau_bdev(bdev); +#endif + if (!reg) + return -EINVAL; +#if IS_ENABLED(CONFIG_AGP) + if (drm->agp.bridge) + return ttm_agp_bind(ttm, reg); +#endif + return nouveau_sgdma_bind(bdev, ttm, reg); +} + +static void +nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm) +{ +#if IS_ENABLED(CONFIG_AGP) + struct nouveau_drm *drm = nouveau_bdev(bdev); + + if (drm->agp.bridge) { + ttm_agp_unbind(ttm); + return; + } +#endif + nouveau_sgdma_unbind(bdev, ttm); +} + static void nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) { @@ -653,11 +752,11 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) switch (bo->mem.mem_type) { case TTM_PL_VRAM: - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, - TTM_PL_FLAG_SYSTEM); + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, + NOUVEAU_GEM_DOMAIN_CPU); break; default: - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0); break; } @@ -725,7 +824,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, if (ret == 0) { ret = ttm_bo_move_accel_cleanup(bo, &fence->base, - evict, + evict, false, new_reg); nouveau_fence_unref(&fence); } @@ -811,7 +910,8 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, struct ttm_place placement_memtype = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING + .mem_type = TTM_PL_TT, + .flags = TTM_PL_MASK_CACHING }; struct ttm_placement placement; struct ttm_resource tmp_reg; @@ -826,7 +926,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) return ret; - ret = ttm_tt_bind(bo->ttm, &tmp_reg, &ctx); + ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx); + if (ret) + goto out; + + ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg); if (ret) goto out; @@ -848,7 +952,8 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, struct ttm_place placement_memtype = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING + .mem_type = TTM_PL_TT, + .flags = TTM_PL_MASK_CACHING }; struct ttm_placement placement; struct ttm_resource tmp_reg; @@ -888,6 +993,8 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, if (bo->destroy != nouveau_bo_del_ttm) return; + nouveau_bo_del_io_reserve_lru(bo); + if (mem && new_reg->mem_type != TTM_PL_SYSTEM && mem->mem.page == nvbo->page) { list_for_each_entry(vma, &nvbo->vma_list, head) { @@ -969,9 +1076,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, /* Fake bo copy. */ if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) { - BUG_ON(bo->mem.mm_node != NULL); - bo->mem = *new_reg; - new_reg->mm_node = NULL; + ttm_bo_move_null(bo, new_reg); goto out; } @@ -1018,32 +1123,60 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) filp->private_data); } +static void +nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm, + struct ttm_resource *reg) +{ + struct nouveau_mem *mem = nouveau_mem(reg); + + if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { + switch (reg->mem_type) { + case TTM_PL_TT: + if (mem->kind) + nvif_object_unmap_handle(&mem->mem.object); + break; + case TTM_PL_VRAM: + nvif_object_unmap_handle(&mem->mem.object); + break; + default: + break; + } + } +} + static int nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) { struct nouveau_drm *drm = nouveau_bdev(bdev); struct nvkm_device *device = nvxx_device(&drm->client.device); struct nouveau_mem *mem = nouveau_mem(reg); + int ret; + mutex_lock(&drm->ttm.io_reserve_mutex); +retry: switch (reg->mem_type) { case TTM_PL_SYSTEM: /* System memory */ - return 0; + ret = 0; + goto out; case TTM_PL_TT: #if IS_ENABLED(CONFIG_AGP) if (drm->agp.bridge) { - reg->bus.offset = reg->start << PAGE_SHIFT; - reg->bus.base = drm->agp.base; + reg->bus.offset = (reg->start << PAGE_SHIFT) + + drm->agp.base; reg->bus.is_iomem = !drm->agp.cma; } #endif - if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind) + if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || + !mem->kind) { /* untiled */ + ret = 0; break; + } fallthrough; /* tiled memory */ case TTM_PL_VRAM: - reg->bus.offset = reg->start << PAGE_SHIFT; - reg->bus.base = device->func->resource_addr(device, 1); + reg->bus.offset = (reg->start << PAGE_SHIFT) + + device->func->resource_addr(device, 1); reg->bus.is_iomem = true; if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { union { @@ -1052,7 +1185,6 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) } args; u64 handle, length; u32 argc = 0; - int ret; switch (mem->mem.object.oclass) { case NVIF_CLASS_MEM_NV50: @@ -1078,39 +1210,46 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) &handle, &length); if (ret != 1) { if (WARN_ON(ret == 0)) - return -EINVAL; - return ret; + ret = -EINVAL; + goto out; } - reg->bus.base = 0; reg->bus.offset = handle; + ret = 0; } break; default: - return -EINVAL; + ret = -EINVAL; } - return 0; + +out: + if (ret == -ENOSPC) { + struct nouveau_bo *nvbo; + + nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru, + typeof(*nvbo), + io_reserve_lru); + if (nvbo) { + list_del_init(&nvbo->io_reserve_lru); + drm_vma_node_unmap(&nvbo->bo.base.vma_node, + bdev->dev_mapping); + nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem); + goto retry; + } + + } + mutex_unlock(&drm->ttm.io_reserve_mutex); + return ret; } static void nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg) { struct nouveau_drm *drm = nouveau_bdev(bdev); - struct nouveau_mem *mem = nouveau_mem(reg); - if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { - switch (reg->mem_type) { - case TTM_PL_TT: - if (mem->kind) - nvif_object_unmap_handle(&mem->mem.object); - break; - case TTM_PL_VRAM: - nvif_object_unmap_handle(&mem->mem.object); - break; - default: - break; - } - } + mutex_lock(&drm->ttm.io_reserve_mutex); + nouveau_ttm_io_mem_free_locked(drm, reg); + mutex_unlock(&drm->ttm.io_reserve_mutex); } static int @@ -1131,7 +1270,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) return 0; if (bo->mem.mem_type == TTM_PL_SYSTEM) { - nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0); + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, + 0); ret = nouveau_bo_validate(nvbo, false, false); if (ret) @@ -1155,35 +1295,36 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) nvbo->busy_placements[i].lpfn = mappable; } - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0); return nouveau_bo_validate(nvbo, false, false); } static int -nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) +nouveau_ttm_tt_populate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { struct ttm_dma_tt *ttm_dma = (void *)ttm; struct nouveau_drm *drm; struct device *dev; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); - if (ttm->state != tt_unpopulated) + if (ttm_tt_is_populated(ttm)) return 0; if (slave && ttm->sg) { /* make userspace faulting work */ drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, ttm_dma->dma_address, ttm->num_pages); - ttm->state = tt_unbound; + ttm_tt_set_populated(ttm); return 0; } - drm = nouveau_bdev(ttm->bdev); + drm = nouveau_bdev(bdev); dev = drm->dev->dev; #if IS_ENABLED(CONFIG_AGP) if (drm->agp.bridge) { - return ttm_agp_tt_populate(ttm, ctx); + return ttm_pool_populate(ttm, ctx); } #endif @@ -1196,7 +1337,8 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) } static void -nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) +nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { struct ttm_dma_tt *ttm_dma = (void *)ttm; struct nouveau_drm *drm; @@ -1206,12 +1348,12 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) if (slave) return; - drm = nouveau_bdev(ttm->bdev); + drm = nouveau_bdev(bdev); dev = drm->dev->dev; #if IS_ENABLED(CONFIG_AGP) if (drm->agp.bridge) { - ttm_agp_tt_unpopulate(ttm); + ttm_pool_unpopulate(ttm); return; } #endif @@ -1226,6 +1368,22 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) ttm_unmap_and_unpopulate_pages(dev, ttm_dma); } +static void +nouveau_ttm_tt_destroy(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) +{ +#if IS_ENABLED(CONFIG_AGP) + struct nouveau_drm *drm = nouveau_bdev(bdev); + if (drm->agp.bridge) { + ttm_agp_unbind(ttm); + ttm_tt_destroy_common(bdev, ttm); + ttm_agp_destroy(ttm); + return; + } +#endif + nouveau_sgdma_destroy(bdev, ttm); +} + void nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive) { @@ -1241,6 +1399,9 @@ struct ttm_bo_driver nouveau_bo_driver = { .ttm_tt_create = &nouveau_ttm_tt_create, .ttm_tt_populate = &nouveau_ttm_tt_populate, .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, + .ttm_tt_bind = &nouveau_ttm_tt_bind, + .ttm_tt_unbind = &nouveau_ttm_tt_unbind, + .ttm_tt_destroy = &nouveau_ttm_tt_destroy, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = nouveau_bo_evict_flags, .move_notify = nouveau_bo_move_ntfy, diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index aecb7481df0d..2a23c8207436 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -18,6 +18,7 @@ struct nouveau_bo { bool force_coherent; struct ttm_bo_kmap_obj kmap; struct list_head head; + struct list_head io_reserve_lru; /* protected by ttm_bo_reserve() */ struct drm_file *reserved_by; @@ -76,10 +77,10 @@ extern struct ttm_bo_driver nouveau_bo_driver; void nouveau_bo_move_init(struct nouveau_drm *); struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align, - u32 flags, u32 tile_mode, u32 tile_flags); -int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 flags, + u32 domain, u32 tile_mode, u32 tile_flags); +int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 domain, struct sg_table *sg, struct dma_resv *robj); -int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags, +int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 domain, u32 tile_mode, u32 tile_flags, struct sg_table *sg, struct dma_resv *robj, struct nouveau_bo **); @@ -96,6 +97,8 @@ int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, bool no_wait_gpu); void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo); void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo); +void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo); +void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo); /* TODO: submit equivalent to TTM generic API upstream? */ static inline void __iomem * @@ -119,13 +122,13 @@ nouveau_bo_unmap_unpin_unref(struct nouveau_bo **pnvbo) } static inline int -nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 flags, +nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 domain, struct nouveau_bo **pnvbo) { - int ret = nouveau_bo_new(cli, size, align, flags, + int ret = nouveau_bo_new(cli, size, align, domain, 0, 0, NULL, NULL, pnvbo); if (ret == 0) { - ret = nouveau_bo_pin(*pnvbo, flags, true); + ret = nouveau_bo_pin(*pnvbo, domain, true); if (ret == 0) { ret = nouveau_bo_map(*pnvbo); if (ret == 0) diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index b80e4ebf14a6..8f099601d2f2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -163,9 +163,9 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, atomic_set(&chan->killed, 0); /* allocate memory for dma push buffer */ - target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; + target = NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT; if (nouveau_vram_pushbuf) - target = TTM_PL_FLAG_VRAM; + target = NOUVEAU_GEM_DOMAIN_VRAM; ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL, &chan->push.buffer); diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 4e8112fde3e6..af70ef8e5471 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -254,12 +254,12 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage) chunk->pagemap.owner = drm->dev; ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0, - TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL, + NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL, &chunk->bo); if (ret) goto out_release; - ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (ret) goto out_bo_free; @@ -346,7 +346,7 @@ nouveau_dmem_resume(struct nouveau_drm *drm) mutex_lock(&drm->dmem->mutex); list_for_each_entry(chunk, &drm->dmem->chunks, list) { - ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false); /* FIXME handle pin failure */ WARN_ON(ret); } diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index 810bf6956568..7b640e05bd4c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -239,8 +239,8 @@ nv50_dp_mode_valid(struct drm_connector *connector, return MODE_NO_INTERLACE; max_clock = outp->dp.link_nr * outp->dp.link_bw; - ds_clock = drm_dp_downstream_max_clock(outp->dp.dpcd, - outp->dp.downstream_ports); + ds_clock = drm_dp_downstream_max_dotclock(outp->dp.dpcd, + outp->dp.downstream_ports); if (ds_clock) max_clock = min(max_clock, ds_clock); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 73ebf5fba2fc..b8025507a9e4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -164,6 +164,8 @@ struct nouveau_drm { int type_vram; int type_host[2]; int type_ncoh[2]; + struct mutex io_reserve_mutex; + struct list_head io_reserve_lru; } ttm; /* GEM interface support */ diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index fad8030ec1f8..24ec5339efb4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -341,7 +341,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, if (ret) goto out_unref; - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (ret) { NV_ERROR(drm, "failed to pin fb: %d\n", ret); goto out_unref; @@ -378,8 +378,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; info->fbops = &nouveau_fbcon_sw_ops; - info->fix.smem_start = nvbo->bo.mem.bus.base + - nvbo->bo.mem.bus.offset; + info->fix.smem_start = nvbo->bo.mem.bus.offset; info->fix.smem_len = nvbo->bo.mem.num_pages << PAGE_SHIFT; info->screen_base = nvbo_kmap_obj_iovirtual(nvbo); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 81f111ad3f4f..89adadf4706b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -176,20 +176,12 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, { struct nouveau_drm *drm = cli->drm; struct nouveau_bo *nvbo; - u32 flags = 0; int ret; - if (domain & NOUVEAU_GEM_DOMAIN_VRAM) - flags |= TTM_PL_FLAG_VRAM; - if (domain & NOUVEAU_GEM_DOMAIN_GART) - flags |= TTM_PL_FLAG_TT; - if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) - flags |= TTM_PL_FLAG_SYSTEM; + if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART))) + domain |= NOUVEAU_GEM_DOMAIN_CPU; - if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) - flags |= TTM_PL_FLAG_UNCACHED; - - nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode, + nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode, tile_flags); if (IS_ERR(nvbo)) return PTR_ERR(nvbo); @@ -202,7 +194,7 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, return ret; } - ret = nouveau_bo_init(nvbo, size, align, flags, NULL, NULL); + ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL); if (ret) { nouveau_bo_ref(NULL, &nvbo); return ret; @@ -296,32 +288,28 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, struct ttm_buffer_object *bo = &nvbo->bo; uint32_t domains = valid_domains & nvbo->valid_domains & (write_domains ? write_domains : read_domains); - uint32_t pref_flags = 0, valid_flags = 0; + uint32_t pref_domains = 0;; if (!domains) return -EINVAL; - if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) - valid_flags |= TTM_PL_FLAG_VRAM; - - if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) - valid_flags |= TTM_PL_FLAG_TT; + valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART); if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && bo->mem.mem_type == TTM_PL_VRAM) - pref_flags |= TTM_PL_FLAG_VRAM; + pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM; else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && bo->mem.mem_type == TTM_PL_TT) - pref_flags |= TTM_PL_FLAG_TT; + pref_domains |= NOUVEAU_GEM_DOMAIN_GART; else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) - pref_flags |= TTM_PL_FLAG_VRAM; + pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM; else - pref_flags |= TTM_PL_FLAG_TT; + pref_domains |= NOUVEAU_GEM_DOMAIN_GART; - nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); + nouveau_bo_placement_set(nvbo, pref_domains, valid_domains); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index bae6a3eccee0..b2ecb91f8ddc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -32,7 +32,7 @@ struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj) struct nouveau_bo *nvbo = nouveau_gem_object(obj); int npages = nvbo->bo.num_pages; - return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages); + return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages); } void *nouveau_gem_prime_vmap(struct drm_gem_object *obj) @@ -64,14 +64,12 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, struct nouveau_bo *nvbo; struct dma_resv *robj = attach->dmabuf->resv; u64 size = attach->dmabuf->size; - u32 flags = 0; int align = 0; int ret; - flags = TTM_PL_FLAG_TT; - dma_resv_lock(robj, NULL); - nvbo = nouveau_bo_alloc(&drm->client, &size, &align, flags, 0, 0); + nvbo = nouveau_bo_alloc(&drm->client, &size, &align, + NOUVEAU_GEM_DOMAIN_GART, 0, 0); if (IS_ERR(nvbo)) { obj = ERR_CAST(nvbo); goto unlock; @@ -88,7 +86,8 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, goto unlock; } - ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj); + ret = nouveau_bo_init(nvbo, size, align, NOUVEAU_GEM_DOMAIN_GART, + sg, robj); if (ret) { nouveau_bo_ref(NULL, &nvbo); obj = ERR_PTR(ret); @@ -108,7 +107,7 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj) int ret; /* pin buffer into GTT */ - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT, false); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_GART, false); if (ret) return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index eef75c53a197..806d9ec310f5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -14,87 +14,65 @@ struct nouveau_sgdma_be { struct nouveau_mem *mem; }; -static void -nouveau_sgdma_destroy(struct ttm_tt *ttm) +void +nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; if (ttm) { + nouveau_sgdma_unbind(bdev, ttm); + ttm_tt_destroy_common(bdev, ttm); ttm_dma_tt_fini(&nvbe->ttm); kfree(nvbe); } } -static int -nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_resource *reg) +int +nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; + struct nouveau_drm *drm = nouveau_bdev(bdev); struct nouveau_mem *mem = nouveau_mem(reg); int ret; + if (nvbe->mem) + return 0; + ret = nouveau_mem_host(reg, &nvbe->ttm); if (ret) return ret; - ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]); - if (ret) { - nouveau_mem_fini(mem); - return ret; + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { + ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]); + if (ret) { + nouveau_mem_fini(mem); + return ret; + } } nvbe->mem = mem; return 0; } -static void -nv04_sgdma_unbind(struct ttm_tt *ttm) +void +nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; - nouveau_mem_fini(nvbe->mem); -} - -static struct ttm_backend_func nv04_sgdma_backend = { - .bind = nv04_sgdma_bind, - .unbind = nv04_sgdma_unbind, - .destroy = nouveau_sgdma_destroy -}; - -static int -nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_resource *reg) -{ - struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; - struct nouveau_mem *mem = nouveau_mem(reg); - int ret; - - ret = nouveau_mem_host(reg, &nvbe->ttm); - if (ret) - return ret; - - nvbe->mem = mem; - return 0; + if (nvbe->mem) { + nouveau_mem_fini(nvbe->mem); + nvbe->mem = NULL; + } } -static struct ttm_backend_func nv50_sgdma_backend = { - .bind = nv50_sgdma_bind, - .unbind = nv04_sgdma_unbind, - .destroy = nouveau_sgdma_destroy -}; - struct ttm_tt * nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags) { - struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_sgdma_be *nvbe; nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); if (!nvbe) return NULL; - if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) - nvbe->ttm.ttm.func = &nv04_sgdma_backend; - else - nvbe->ttm.ttm.func = &nv50_sgdma_backend; - if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) { kfree(nvbe); return NULL; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 53c6f8827322..427341753441 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -123,13 +123,51 @@ const struct ttm_resource_manager_func nv04_gart_manager = { .free = nouveau_manager_del, }; +static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct ttm_buffer_object *bo = vma->vm_private_data; + pgprot_t prot; + vm_fault_t ret; + + ret = ttm_bo_vm_reserve(bo, vmf); + if (ret) + return ret; + + nouveau_bo_del_io_reserve_lru(bo); + + prot = vm_get_page_prot(vma->vm_flags); + ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1); + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) + return ret; + + nouveau_bo_add_io_reserve_lru(bo); + + dma_resv_unlock(bo->base.resv); + + return ret; +} + +static struct vm_operations_struct nouveau_ttm_vm_ops = { + .fault = nouveau_ttm_fault, + .open = ttm_bo_vm_open, + .close = ttm_bo_vm_close, + .access = ttm_bo_vm_access +}; + int nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *file_priv = filp->private_data; struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); + int ret; - return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); + ret = ttm_bo_mmap(filp, vma, &drm->ttm.bdev); + if (ret) + return ret; + + vma->vm_ops = &nouveau_ttm_vm_ops; + return 0; } static int @@ -156,24 +194,13 @@ nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind) static int nouveau_ttm_init_vram(struct nouveau_drm *drm) { - struct nvif_mmu *mmu = &drm->client.mmu; if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { - /* Some BARs do not support being ioremapped WC */ - const u8 type = mmu->type[drm->ttm.type_vram].type; struct ttm_resource_manager *man = kzalloc(sizeof(*man), GFP_KERNEL); + if (!man) return -ENOMEM; - man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; - man->default_caching = TTM_PL_FLAG_WC; - - if (type & NVIF_MEM_UNCACHED) { - man->available_caching = TTM_PL_FLAG_UNCACHED; - man->default_caching = TTM_PL_FLAG_UNCACHED; - } - man->func = &nouveau_vram_manager; - man->use_io_reserve_lru = true; ttm_resource_manager_init(man, drm->gem.vram_available >> PAGE_SHIFT); @@ -181,9 +208,7 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm) ttm_resource_manager_set_used(man, true); return 0; } else { - return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, - TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC, - TTM_PL_FLAG_WC, false, + return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, false, drm->gem.vram_available >> PAGE_SHIFT); } } @@ -208,25 +233,14 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm) { struct ttm_resource_manager *man; unsigned long size_pages = drm->gem.gart_available >> PAGE_SHIFT; - unsigned available_caching, default_caching; const struct ttm_resource_manager_func *func = NULL; - if (drm->agp.bridge) { - available_caching = TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_WC; - default_caching = TTM_PL_FLAG_WC; - } else { - available_caching = TTM_PL_MASK_CACHING; - default_caching = TTM_PL_FLAG_CACHED; - } if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) func = &nouveau_gart_manager; else if (!drm->agp.bridge) func = &nv04_gart_manager; else - return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT, - available_caching, default_caching, - true, + return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT, true, size_pages); man = kzalloc(sizeof(*man), GFP_KERNEL); @@ -234,8 +248,6 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm) return -ENOMEM; man->func = func; - man->available_caching = available_caching; - man->default_caching = default_caching; man->use_tt = true; ttm_resource_manager_init(man, size_pages); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man); @@ -339,6 +351,9 @@ nouveau_ttm_init(struct nouveau_drm *drm) return ret; } + mutex_init(&drm->ttm.io_reserve_mutex); + INIT_LIST_HEAD(&drm->ttm.io_reserve_lru); + NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20)); NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20)); return 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h index eaf25461cd91..69552049bb96 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.h +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h @@ -22,4 +22,7 @@ int nouveau_ttm_mmap(struct file *, struct vm_area_struct *); int nouveau_ttm_global_init(struct nouveau_drm *); void nouveau_ttm_global_release(struct nouveau_drm *); +int nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg); +void nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm); +void nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm); #endif diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index 6b697ee6bc0e..1253fdec712d 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c @@ -130,10 +130,11 @@ nv17_fence_create(struct nouveau_drm *drm) priv->base.context_del = nv10_fence_context_del; spin_lock_init(&priv->lock); - ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, + ret = nouveau_bo_new(&drm->client, 4096, 0x1000, + NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL, &priv->bo); if (!ret) { - ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (!ret) { ret = nouveau_bo_map(priv->bo); if (ret) diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index 49b46f51073c..447238e3cbe7 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c @@ -81,10 +81,11 @@ nv50_fence_create(struct nouveau_drm *drm) priv->base.context_del = nv10_fence_context_del; spin_lock_init(&priv->lock); - ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, + ret = nouveau_bo_new(&drm->client, 4096, 0x1000, + NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL, &priv->bo); if (!ret) { - ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (!ret) { ret = nouveau_bo_map(priv->bo); if (ret) diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 7ed36b3a6b7d..7c9c928c3196 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c @@ -209,12 +209,13 @@ nv84_fence_create(struct nouveau_drm *drm) mutex_init(&priv->mutex); /* Use VRAM if there is any ; otherwise fallback to system memory */ - domain = drm->client.device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : - /* - * fences created in sysmem must be non-cached or we - * will lose CPU/GPU coherency! - */ - TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; + domain = drm->client.device.info.ram_size != 0 ? + NOUVEAU_GEM_DOMAIN_VRAM : + /* + * fences created in sysmem must be non-cached or we + * will lose CPU/GPU coherency! + */ + NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT; ret = nouveau_bo_new(&drm->client, 16 * drm->chan.nr, 0, domain, 0, 0, NULL, NULL, &priv->bo); if (ret == 0) { diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index d0d12d5dd76c..f67f223c6479 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -1297,10 +1297,9 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, omap_obj->dma_addr = sg_dma_address(sgt->sgl); } else { /* Create pages list from sgt */ - struct sg_page_iter iter; struct page **pages; unsigned int npages; - unsigned int i = 0; + unsigned int ret; npages = DIV_ROUND_UP(size, PAGE_SIZE); pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); @@ -1311,14 +1310,9 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, } omap_obj->pages = pages; - - for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) { - pages[i++] = sg_page_iter_page(&iter); - if (i > npages) - break; - } - - if (WARN_ON(i != npages)) { + ret = drm_prime_sg_to_page_addr_arrays(sgt, pages, NULL, + npages); + if (ret) { omap_gem_free_object(obj); obj = ERR_PTR(-ENOMEM); goto done; diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 8d97d07c5871..b9dbedf8f15e 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -324,13 +324,30 @@ config DRM_PANEL_SAMSUNG_S6E63J0X03 select VIDEOMODE_HELPERS config DRM_PANEL_SAMSUNG_S6E63M0 - tristate "Samsung S6E63M0 RGB/SPI panel" + tristate "Samsung S6E63M0 RGB panel" depends on OF - depends on SPI depends on BACKLIGHT_CLASS_DEVICE help Say Y here if you want to enable support for Samsung S6E63M0 - AMOLED LCD panel. + AMOLED LCD panel. This panel can be accessed using SPI or + DSI. + +config DRM_PANEL_SAMSUNG_S6E63M0_SPI + tristate "Samsung S6E63M0 RGB SPI interface" + depends on SPI + depends on DRM_PANEL_SAMSUNG_S6E63M0 + default DRM_PANEL_SAMSUNG_S6E63M0 + help + Say Y here if you want to be able to access the Samsung + S6E63M0 panel using SPI. + +config DRM_PANEL_SAMSUNG_S6E63M0_DSI + tristate "Samsung S6E63M0 RGB DSI interface" + depends on DRM_MIPI_DSI + depends on DRM_PANEL_SAMSUNG_S6E63M0 + help + Say Y here if you want to be able to access the Samsung + S6E63M0 panel using DSI. config DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 tristate "Samsung AMS452EF01 panel with S6E88A0 DSI video mode controller" diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index 15a4e7752951..2ba560bca61d 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -34,6 +34,8 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_SPI) += panel-samsung-s6e63m0-spi.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI) += panel-samsung-s6e63m0-dsi.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01) += panel-samsung-s6e88a0-ams452ef01.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c new file mode 100644 index 000000000000..eec74c10ddda --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DSI interface to the Samsung S6E63M0 panel. + * (C) 2019 Linus Walleij + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/of_device.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_print.h> + +#include "panel-samsung-s6e63m0.h" + +#define MCS_GLOBAL_PARAM 0xb0 +#define S6E63M0_DSI_MAX_CHUNK 15 /* CMD + 15 bytes max */ + +static int s6e63m0_dsi_dcs_read(struct device *dev, const u8 cmd, u8 *data) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); + int ret; + + ret = mipi_dsi_dcs_read(dsi, cmd, data, 1); + if (ret < 0) { + dev_err(dev, "could not read DCS CMD %02x\n", cmd); + return ret; + } + + dev_info(dev, "DSI read CMD %02x = %02x\n", cmd, *data); + + return 0; +} + +static int s6e63m0_dsi_dcs_write(struct device *dev, const u8 *data, size_t len) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); + const u8 *seqp = data; + u8 cmd; + u8 cmdwritten; + int remain; + int chunk; + int ret; + + dev_info(dev, "DSI writing dcs seq: %*ph\n", (int)len, data); + + /* Pick out and skip past the DCS command */ + cmd = *seqp; + seqp++; + cmdwritten = 0; + remain = len - 1; + chunk = remain; + + /* Send max S6E63M0_DSI_MAX_CHUNK bytes at a time */ + if (chunk > S6E63M0_DSI_MAX_CHUNK) + chunk = S6E63M0_DSI_MAX_CHUNK; + ret = mipi_dsi_dcs_write(dsi, cmd, seqp, chunk); + if (ret < 0) { + dev_err(dev, "error sending DCS command seq cmd %02x\n", cmd); + return ret; + } + cmdwritten += chunk; + seqp += chunk; + + while (cmdwritten < remain) { + chunk = remain - cmdwritten; + if (chunk > S6E63M0_DSI_MAX_CHUNK) + chunk = S6E63M0_DSI_MAX_CHUNK; + ret = mipi_dsi_dcs_write(dsi, MCS_GLOBAL_PARAM, &cmdwritten, 1); + if (ret < 0) { + dev_err(dev, "error sending CMD %02x global param %02x\n", + cmd, cmdwritten); + return ret; + } + ret = mipi_dsi_dcs_write(dsi, cmd, seqp, chunk); + if (ret < 0) { + dev_err(dev, "error sending CMD %02x chunk\n", cmd); + return ret; + } + cmdwritten += chunk; + seqp += chunk; + } + dev_info(dev, "sent command %02x %02x bytes\n", cmd, cmdwritten); + + usleep_range(8000, 9000); + + return 0; +} + +static int s6e63m0_dsi_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + int ret; + + dsi->lanes = 2; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->hs_rate = 349440000; + dsi->lp_rate = 9600000; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | + MIPI_DSI_MODE_EOT_PACKET | + MIPI_DSI_MODE_VIDEO_BURST; + + ret = s6e63m0_probe(dev, s6e63m0_dsi_dcs_read, s6e63m0_dsi_dcs_write, + true); + if (ret) + return ret; + + ret = mipi_dsi_attach(dsi); + if (ret < 0) + s6e63m0_remove(dev); + + return ret; +} + +static int s6e63m0_dsi_remove(struct mipi_dsi_device *dsi) +{ + mipi_dsi_detach(dsi); + return s6e63m0_remove(&dsi->dev); +} + +static const struct of_device_id s6e63m0_dsi_of_match[] = { + { .compatible = "samsung,s6e63m0" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, s6e63m0_dsi_of_match); + +static struct mipi_dsi_driver s6e63m0_dsi_driver = { + .probe = s6e63m0_dsi_probe, + .remove = s6e63m0_dsi_remove, + .driver = { + .name = "panel-samsung-s6e63m0", + .of_match_table = s6e63m0_dsi_of_match, + }, +}; +module_mipi_dsi_driver(s6e63m0_dsi_driver); + +MODULE_AUTHOR("Linus Walleij <linusw@kernel.org>"); +MODULE_DESCRIPTION("s6e63m0 LCD DSI Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c new file mode 100644 index 000000000000..d298d780220d --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/spi/spi.h> +#include <linux/delay.h> + +#include <drm/drm_print.h> + +#include "panel-samsung-s6e63m0.h" + +#define DATA_MASK 0x100 + +static int s6e63m0_spi_dcs_read(struct device *dev, const u8 cmd, u8 *data) +{ + /* + * FIXME: implement reading DCS commands over SPI so we can + * properly identify which physical panel is connected. + */ + *data = 0; + + return 0; +} + +static int s6e63m0_spi_write_word(struct device *dev, u16 data) +{ + struct spi_device *spi = to_spi_device(dev); + struct spi_transfer xfer = { + .len = 2, + .tx_buf = &data, + }; + struct spi_message msg; + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + return spi_sync(spi, &msg); +} + +static int s6e63m0_spi_dcs_write(struct device *dev, const u8 *data, size_t len) +{ + int ret = 0; + + dev_dbg(dev, "SPI writing dcs seq: %*ph\n", (int)len, data); + ret = s6e63m0_spi_write_word(dev, *data); + + while (!ret && --len) { + ++data; + ret = s6e63m0_spi_write_word(dev, *data | DATA_MASK); + } + + if (ret) { + dev_err(dev, "SPI error %d writing dcs seq: %*ph\n", ret, + (int)len, data); + } + + usleep_range(300, 310); + + return ret; +} + +static int s6e63m0_spi_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + int ret; + + spi->bits_per_word = 9; + spi->mode = SPI_MODE_3; + ret = spi_setup(spi); + if (ret < 0) { + dev_err(dev, "spi setup failed.\n"); + return ret; + } + return s6e63m0_probe(dev, s6e63m0_spi_dcs_read, s6e63m0_spi_dcs_write, + false); +} + +static int s6e63m0_spi_remove(struct spi_device *spi) +{ + return s6e63m0_remove(&spi->dev); +} + +static const struct of_device_id s6e63m0_spi_of_match[] = { + { .compatible = "samsung,s6e63m0" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, s6e63m0_spi_of_match); + +static struct spi_driver s6e63m0_spi_driver = { + .probe = s6e63m0_spi_probe, + .remove = s6e63m0_spi_remove, + .driver = { + .name = "panel-samsung-s6e63m0", + .of_match_table = s6e63m0_spi_of_match, + }, +}; +module_spi_driver(s6e63m0_spi_driver); + +MODULE_AUTHOR("PaweÅ‚ Chmiel <pawel.mikolaj.chmiel@gmail.com>"); +MODULE_DESCRIPTION("s6e63m0 LCD SPI Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c index 2cc772fdc456..3eee67e2d86a 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c @@ -16,25 +16,34 @@ #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/regulator/consumer.h> -#include <linux/spi/spi.h> #include <video/mipi_display.h> +#include "panel-samsung-s6e63m0.h" + /* Manufacturer Command Set */ #define MCS_ELVSS_ON 0xb1 #define MCS_MIECTL1 0xc0 #define MCS_BCMODE 0xc1 +#define MCS_ERROR_CHECK 0xd5 +#define MCS_READ_ID1 0xda +#define MCS_READ_ID2 0xdb +#define MCS_READ_ID3 0xdc +#define MCS_LEVEL_2_KEY 0xf0 +#define MCS_MTP_KEY 0xf1 #define MCS_DISCTL 0xf2 #define MCS_SRCCTL 0xf6 #define MCS_IFCTL 0xf7 #define MCS_PANELCTL 0xF8 #define MCS_PGAMMACTL 0xfa +#define S6E63M0_LCD_ID_VALUE_M2 0xA4 +#define S6E63M0_LCD_ID_VALUE_SM2 0xB4 +#define S6E63M0_LCD_ID_VALUE_SM2_1 0xB6 + #define NUM_GAMMA_LEVELS 11 #define GAMMA_TABLE_COUNT 23 -#define DATA_MASK 0x100 - #define MAX_BRIGHTNESS (NUM_GAMMA_LEVELS - 1) /* array of gamma tables for gamma value 2.2 */ @@ -87,8 +96,11 @@ static u8 const s6e63m0_gamma_22[NUM_GAMMA_LEVELS][GAMMA_TABLE_COUNT] = { struct s6e63m0 { struct device *dev; + int (*dcs_read)(struct device *dev, const u8 cmd, u8 *val); + int (*dcs_write)(struct device *dev, const u8 *data, size_t len); struct drm_panel panel; struct backlight_device *bl_dev; + u8 lcd_type; struct regulator_bulk_data supplies[2]; struct gpio_desc *reset_gpio; @@ -134,42 +146,20 @@ static int s6e63m0_clear_error(struct s6e63m0 *ctx) return ret; } -static int s6e63m0_spi_write_word(struct s6e63m0 *ctx, u16 data) +static void s6e63m0_dcs_read(struct s6e63m0 *ctx, const u8 cmd, u8 *data) { - struct spi_device *spi = to_spi_device(ctx->dev); - struct spi_transfer xfer = { - .len = 2, - .tx_buf = &data, - }; - struct spi_message msg; - - spi_message_init(&msg); - spi_message_add_tail(&xfer, &msg); + if (ctx->error < 0) + return; - return spi_sync(spi, &msg); + ctx->error = ctx->dcs_read(ctx->dev, cmd, data); } static void s6e63m0_dcs_write(struct s6e63m0 *ctx, const u8 *data, size_t len) { - int ret = 0; - if (ctx->error < 0 || len == 0) return; - dev_dbg(ctx->dev, "writing dcs seq: %*ph\n", (int)len, data); - ret = s6e63m0_spi_write_word(ctx, *data); - - while (!ret && --len) { - ++data; - ret = s6e63m0_spi_write_word(ctx, *data | DATA_MASK); - } - - if (ret) { - dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret, (int)len, data); - ctx->error = ret; - } - - usleep_range(300, 310); + ctx->error = ctx->dcs_write(ctx->dev, data, len); } #define s6e63m0_dcs_write_seq_static(ctx, seq ...) \ @@ -178,6 +168,43 @@ static void s6e63m0_dcs_write(struct s6e63m0 *ctx, const u8 *data, size_t len) s6e63m0_dcs_write(ctx, d, ARRAY_SIZE(d)); \ }) +static int s6e63m0_check_lcd_type(struct s6e63m0 *ctx) +{ + u8 id1, id2, id3; + int ret; + + s6e63m0_dcs_read(ctx, MCS_READ_ID1, &id1); + s6e63m0_dcs_read(ctx, MCS_READ_ID2, &id2); + s6e63m0_dcs_read(ctx, MCS_READ_ID3, &id3); + + ret = s6e63m0_clear_error(ctx); + if (ret) { + dev_err(ctx->dev, "error checking LCD type (%d)\n", ret); + ctx->lcd_type = 0x00; + return ret; + } + + dev_info(ctx->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3); + + /* We attempt to detect what panel is mounted on the controller */ + switch (id2) { + case S6E63M0_LCD_ID_VALUE_M2: + dev_info(ctx->dev, "detected LCD panel AMS397GE MIPI M2\n"); + break; + case S6E63M0_LCD_ID_VALUE_SM2: + case S6E63M0_LCD_ID_VALUE_SM2_1: + dev_info(ctx->dev, "detected LCD panel AMS397GE MIPI SM2\n"); + break; + default: + dev_info(ctx->dev, "unknown LCD panel type %02x\n", id2); + break; + } + + ctx->lcd_type = id2; + + return 0; +} + static void s6e63m0_init(struct s6e63m0 *ctx) { s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL, @@ -249,8 +276,6 @@ static void s6e63m0_init(struct s6e63m0 *ctx) s6e63m0_dcs_write_seq_static(ctx, MCS_ELVSS_ON, 0x0b); - - s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE); } static int s6e63m0_power_on(struct s6e63m0 *ctx) @@ -263,6 +288,9 @@ static int s6e63m0_power_on(struct s6e63m0 *ctx) msleep(25); + /* Be sure to send a reset pulse */ + gpiod_set_value(ctx->reset_gpio, 1); + msleep(5); gpiod_set_value(ctx->reset_gpio, 0); msleep(120); @@ -292,8 +320,10 @@ static int s6e63m0_disable(struct drm_panel *panel) backlight_disable(ctx->bl_dev); + s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF); + msleep(10); s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE); - msleep(200); + msleep(120); ctx->enabled = false; @@ -331,6 +361,15 @@ static int s6e63m0_prepare(struct drm_panel *panel) if (ret < 0) return ret; + /* Magic to unlock level 2 control of the display */ + s6e63m0_dcs_write_seq_static(ctx, MCS_LEVEL_2_KEY, 0x5a, 0x5a); + /* Magic to unlock MTP reading */ + s6e63m0_dcs_write_seq_static(ctx, MCS_MTP_KEY, 0x5a, 0x5a); + + ret = s6e63m0_check_lcd_type(ctx); + if (ret < 0) + return ret; + s6e63m0_init(ctx); ret = s6e63m0_clear_error(ctx); @@ -350,7 +389,15 @@ static int s6e63m0_enable(struct drm_panel *panel) if (ctx->enabled) return 0; + s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE); + msleep(120); s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON); + msleep(10); + + s6e63m0_dcs_write_seq_static(ctx, MCS_ERROR_CHECK, + 0xE7, 0x14, 0x60, 0x17, 0x0A, 0x49, 0xC3, + 0x8F, 0x19, 0x64, 0x91, 0x84, 0x76, 0x20, + 0x0F, 0x00); backlight_enable(ctx->bl_dev); @@ -429,9 +476,11 @@ static int s6e63m0_backlight_register(struct s6e63m0 *ctx) return ret; } -static int s6e63m0_probe(struct spi_device *spi) +int s6e63m0_probe(struct device *dev, + int (*dcs_read)(struct device *dev, const u8 cmd, u8 *val), + int (*dcs_write)(struct device *dev, const u8 *data, size_t len), + bool dsi_mode) { - struct device *dev = &spi->dev; struct s6e63m0 *ctx; int ret; @@ -439,7 +488,9 @@ static int s6e63m0_probe(struct spi_device *spi) if (!ctx) return -ENOMEM; - spi_set_drvdata(spi, ctx); + ctx->dcs_read = dcs_read; + ctx->dcs_write = dcs_write; + dev_set_drvdata(dev, ctx); ctx->dev = dev; ctx->enabled = false; @@ -460,15 +511,8 @@ static int s6e63m0_probe(struct spi_device *spi) return PTR_ERR(ctx->reset_gpio); } - spi->bits_per_word = 9; - spi->mode = SPI_MODE_3; - ret = spi_setup(spi); - if (ret < 0) { - dev_err(dev, "spi setup failed.\n"); - return ret; - } - drm_panel_init(&ctx->panel, dev, &s6e63m0_drm_funcs, + dsi_mode ? DRM_MODE_CONNECTOR_DSI : DRM_MODE_CONNECTOR_DPI); ret = s6e63m0_backlight_register(ctx); @@ -479,31 +523,17 @@ static int s6e63m0_probe(struct spi_device *spi) return 0; } +EXPORT_SYMBOL_GPL(s6e63m0_probe); -static int s6e63m0_remove(struct spi_device *spi) +int s6e63m0_remove(struct device *dev) { - struct s6e63m0 *ctx = spi_get_drvdata(spi); + struct s6e63m0 *ctx = dev_get_drvdata(dev); drm_panel_remove(&ctx->panel); return 0; } - -static const struct of_device_id s6e63m0_of_match[] = { - { .compatible = "samsung,s6e63m0" }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, s6e63m0_of_match); - -static struct spi_driver s6e63m0_driver = { - .probe = s6e63m0_probe, - .remove = s6e63m0_remove, - .driver = { - .name = "panel-samsung-s6e63m0", - .of_match_table = s6e63m0_of_match, - }, -}; -module_spi_driver(s6e63m0_driver); +EXPORT_SYMBOL_GPL(s6e63m0_remove); MODULE_AUTHOR("PaweÅ‚ Chmiel <pawel.mikolaj.chmiel@gmail.com>"); MODULE_DESCRIPTION("s6e63m0 LCD Driver"); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h new file mode 100644 index 000000000000..c669fec91763 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _PANEL_SAMSUNG_S6E63M0_H +#define _PANEL_SAMSUNG_S6E63M0_H + +int s6e63m0_probe(struct device *dev, + int (*dcs_read)(struct device *dev, const u8 cmd, u8 *val), + int (*dcs_write)(struct device *dev, const u8 *data, + size_t len), + bool dsi_mode); +int s6e63m0_remove(struct device *dev); + +#endif /* _PANEL_SAMSUNG_S6E63M0_H */ diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h index 953f7536a773..2e9cbd1c4a58 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.h +++ b/drivers/gpu/drm/panfrost/panfrost_device.h @@ -70,6 +70,9 @@ struct panfrost_compatible { int num_pm_domains; /* Only required if num_pm_domains > 1. */ const char * const *pm_domain_names; + + /* Vendor implementation quirks callback */ + void (*vendor_quirk)(struct panfrost_device *pfdev); }; struct panfrost_device { diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 36463c89e966..37d4cb7a5491 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -656,7 +656,18 @@ static const struct panfrost_compatible default_data = { .pm_domain_names = NULL, }; +static const struct panfrost_compatible amlogic_data = { + .num_supplies = ARRAY_SIZE(default_supplies), + .supply_names = default_supplies, + .vendor_quirk = panfrost_gpu_amlogic_quirk, +}; + static const struct of_device_id dt_match[] = { + /* Set first to probe before the generic compatibles */ + { .compatible = "amlogic,meson-gxm-mali", + .data = &amlogic_data, }, + { .compatible = "amlogic,meson-g12a-mali", + .data = &amlogic_data, }, { .compatible = "arm,mali-t604", .data = &default_data, }, { .compatible = "arm,mali-t624", .data = &default_data, }, { .compatible = "arm,mali-t628", .data = &default_data, }, diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 33355dd302f1..1a6cea0e0bd7 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -41,8 +41,8 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) for (i = 0; i < n_sgt; i++) { if (bo->sgts[i].sgl) { - dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl, - bo->sgts[i].nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(pfdev->dev, &bo->sgts[i], + DMA_BIDIRECTIONAL, 0); sg_free_table(&bo->sgts[i]); } } diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index e0f190e43813..e1b2a3376624 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -76,6 +76,17 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev) return 0; } +void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev) +{ + /* + * The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs + * these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order + * to operate correctly. + */ + gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK); + gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16)); +} + static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev) { u32 quirks = 0; @@ -136,6 +147,10 @@ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev) if (quirks) gpu_write(pfdev, GPU_JM_CONFIG, quirks); + + /* Here goes platform specific quirks */ + if (pfdev->comp->vendor_quirk) + pfdev->comp->vendor_quirk(pfdev); } #define MAX_HW_REVS 6 @@ -305,6 +320,8 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev) int ret; u32 val; + panfrost_gpu_init_quirks(pfdev); + /* Just turn on everything for now */ gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present); ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO, @@ -344,6 +361,7 @@ int panfrost_gpu_init(struct panfrost_device *pfdev) dma_set_mask_and_coherent(pfdev->dev, DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features))); + dma_set_max_seg_size(pfdev->dev, UINT_MAX); irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu"); if (irq <= 0) @@ -356,7 +374,6 @@ int panfrost_gpu_init(struct panfrost_device *pfdev) return err; } - panfrost_gpu_init_quirks(pfdev); panfrost_gpu_power_on(pfdev); return 0; diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h index 4112412087b2..468c51e7e46d 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.h +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h @@ -16,4 +16,6 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev); void panfrost_gpu_power_on(struct panfrost_device *pfdev); void panfrost_gpu_power_off(struct panfrost_device *pfdev); +void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev); + #endif diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index e8f7b11352d2..776448c527ea 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -253,7 +253,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, struct io_pgtable_ops *ops = mmu->pgtbl_ops; u64 start_iova = iova; - for_each_sg(sgt->sgl, sgl, sgt->nents, count) { + for_each_sgtable_dma_sg(sgt, sgl, count) { unsigned long paddr = sg_dma_address(sgl); size_t len = sg_dma_len(sgl); @@ -517,10 +517,9 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, if (ret) goto err_pages; - if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) { - ret = -EINVAL; + ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0); + if (ret) goto err_map; - } mmu_map_sg(pfdev, bomapping->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h index ea38ac60581c..eddaa62ad8b0 100644 --- a/drivers/gpu/drm/panfrost/panfrost_regs.h +++ b/drivers/gpu/drm/panfrost/panfrost_regs.h @@ -51,6 +51,10 @@ #define GPU_STATUS 0x34 #define GPU_STATUS_PRFCNT_ACTIVE BIT(2) #define GPU_LATEST_FLUSH_ID 0x38 +#define GPU_PWR_KEY 0x50 /* (WO) Power manager key register */ +#define GPU_PWR_KEY_UNLOCK 0x2968A819 +#define GPU_PWR_OVERRIDE0 0x54 /* (RW) Power manager override settings */ +#define GPU_PWR_OVERRIDE1 0x58 /* (RW) Power manager override settings */ #define GPU_FAULT_STATUS 0x3C #define GPU_FAULT_ADDRESS_LO 0x40 #define GPU_FAULT_ADDRESS_HI 0x44 diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index f838b6d689aa..2bc364412e8b 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -64,16 +64,24 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) qbo->placement.placement = qbo->placements; qbo->placement.busy_placement = qbo->placements; - if (domain == QXL_GEM_DOMAIN_VRAM) - qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; + if (domain == QXL_GEM_DOMAIN_VRAM) { + qbo->placements[c].mem_type = TTM_PL_VRAM; + qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag; + } if (domain == QXL_GEM_DOMAIN_SURFACE) { - qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag; - qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; + qbo->placements[c].mem_type = TTM_PL_PRIV; + qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag; + qbo->placements[c].mem_type = TTM_PL_VRAM; + qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag; + } + if (domain == QXL_GEM_DOMAIN_CPU) { + qbo->placements[c].mem_type = TTM_PL_SYSTEM; + qbo->placements[c++].flags = TTM_PL_MASK_CACHING | pflag; + } + if (!c) { + qbo->placements[c].mem_type = TTM_PL_SYSTEM; + qbo->placements[c++].flags = TTM_PL_MASK_CACHING; } - if (domain == QXL_GEM_DOMAIN_CPU) - qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; - if (!c) - qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; qbo->placement.num_placement = c; qbo->placement.num_busy_placement = c; for (i = 0; i < c; ++i) { @@ -167,6 +175,7 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr) void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset) { + unsigned long offset; void *rptr; int ret; struct io_mapping *map; @@ -178,9 +187,8 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, else goto fallback; - ret = qxl_ttm_io_mem_reserve(bo->tbo.bdev, &bo->tbo.mem); - - return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset); + offset = bo->tbo.mem.start << PAGE_SHIFT; + return io_mapping_map_atomic_wc(map, offset + page_offset); fallback: if (bo->kptr) { rptr = bo->kptr + (page_offset * PAGE_SIZE); diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 7aae0a96f043..fd691fff8394 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -55,7 +55,8 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, static const struct ttm_place placements = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM + .mem_type = TTM_PL_SYSTEM, + .flags = TTM_PL_MASK_CACHING }; if (!qxl_ttm_bo_is_qxl_bo(bo)) { @@ -81,13 +82,12 @@ int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, return 0; case TTM_PL_VRAM: mem->bus.is_iomem = true; - mem->bus.base = qdev->vram_base; - mem->bus.offset = mem->start << PAGE_SHIFT; + mem->bus.offset = (mem->start << PAGE_SHIFT) + qdev->vram_base; break; case TTM_PL_PRIV: mem->bus.is_iomem = true; - mem->bus.base = qdev->surfaceram_base; - mem->bus.offset = mem->start << PAGE_SHIFT; + mem->bus.offset = (mem->start << PAGE_SHIFT) + + qdev->surfaceram_base; break; default: return -EINVAL; @@ -104,7 +104,8 @@ struct qxl_ttm_tt { u64 offset; }; -static int qxl_ttm_backend_bind(struct ttm_tt *ttm, +static int qxl_ttm_backend_bind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, struct ttm_resource *bo_mem) { struct qxl_ttm_tt *gtt = (void *)ttm; @@ -118,25 +119,22 @@ static int qxl_ttm_backend_bind(struct ttm_tt *ttm, return -1; } -static void qxl_ttm_backend_unbind(struct ttm_tt *ttm) +static void qxl_ttm_backend_unbind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { /* Not implemented */ } -static void qxl_ttm_backend_destroy(struct ttm_tt *ttm) +static void qxl_ttm_backend_destroy(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { struct qxl_ttm_tt *gtt = (void *)ttm; + ttm_tt_destroy_common(bdev, ttm); ttm_tt_fini(>t->ttm); kfree(gtt); } -static struct ttm_backend_func qxl_backend_func = { - .bind = &qxl_ttm_backend_bind, - .unbind = &qxl_ttm_backend_unbind, - .destroy = &qxl_ttm_backend_destroy, -}; - static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { @@ -147,7 +145,6 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo, gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL); if (gtt == NULL) return NULL; - gtt->ttm.func = &qxl_backend_func; gtt->qdev = qdev; if (ttm_tt_init(>t->ttm, bo, page_flags)) { kfree(gtt); @@ -156,16 +153,6 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo, return >t->ttm; } -static void qxl_move_null(struct ttm_buffer_object *bo, - struct ttm_resource *new_mem) -{ - struct ttm_resource *old_mem = &bo->mem; - - BUG_ON(old_mem->mm_node != NULL); - *old_mem = *new_mem; - new_mem->mm_node = NULL; -} - static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem) @@ -178,7 +165,7 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict, return ret; if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { - qxl_move_null(bo, new_mem); + ttm_bo_move_null(bo, new_mem); return 0; } return ttm_bo_move_memcpy(bo, ctx, new_mem); @@ -202,6 +189,9 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo, static struct ttm_bo_driver qxl_bo_driver = { .ttm_tt_create = &qxl_ttm_tt_create, + .ttm_tt_bind = &qxl_ttm_backend_bind, + .ttm_tt_destroy = &qxl_ttm_backend_destroy, + .ttm_tt_unbind = &qxl_ttm_backend_unbind, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = &qxl_evict_flags, .move = &qxl_bo_move, @@ -213,8 +203,7 @@ static int qxl_ttm_init_mem_type(struct qxl_device *qdev, unsigned int type, uint64_t size) { - return ttm_range_man_init(&qdev->mman.bdev, type, TTM_PL_MASK_CACHING, - TTM_PL_FLAG_CACHED, false, size); + return ttm_range_man_init(&qdev->mman.bdev, type, false, size); } int qxl_ttm_init(struct qxl_device *qdev) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index cc4f58d16589..a6d8de01194a 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2815,10 +2815,12 @@ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enabl extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); -extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, +extern int radeon_ttm_tt_set_userptr(struct radeon_device *rdev, + struct ttm_tt *ttm, uint64_t addr, uint32_t flags); -extern bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm); -extern bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm); +extern bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev, struct ttm_tt *ttm); +extern bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev, struct ttm_tt *ttm); +bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev, struct ttm_tt *ttm); extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base); extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon); diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 33ae1b883268..21ce2f9502c0 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -160,7 +160,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) p->relocs[i].allowed_domains = domain; } - if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) { + if (radeon_ttm_tt_has_userptr(p->rdev, p->relocs[i].robj->tbo.ttm)) { uint32_t domain = p->relocs[i].preferred_domains; if (!(domain & RADEON_GEM_DOMAIN_GTT)) { DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is " diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 7f5dfe04789e..e5c4271e64ed 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -331,7 +331,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, goto handle_lockup; bo = gem_to_radeon_bo(gobj); - r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); + r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags); if (r) goto release_object; @@ -420,7 +420,7 @@ int radeon_mode_dumb_mmap(struct drm_file *filp, return -ENOENT; } robj = gem_to_radeon_bo(gobj); - if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { + if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) { drm_gem_object_put(gobj); return -EPERM; } @@ -721,7 +721,7 @@ int radeon_gem_op_ioctl(struct drm_device *dev, void *data, robj = gem_to_radeon_bo(gobj); r = -EPERM; - if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) + if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) goto out; r = radeon_bo_reserve(robj, false); diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index f93829f08a4d..97b9b6dd6dd3 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -53,7 +53,7 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn, struct ttm_operation_ctx ctx = { false, false }; long r; - if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) + if (!bo->tbo.ttm || !radeon_ttm_tt_is_bound(bo->tbo.bdev, bo->tbo.ttm)) return true; if (!mmu_notifier_range_blockable(range)) diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index bb7582afd803..316e35d3f8a9 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -112,58 +112,58 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) { rbo->placements[c].fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; + rbo->placements[c].mem_type = TTM_PL_VRAM; rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; + TTM_PL_FLAG_UNCACHED; } rbo->placements[c].fpfn = 0; + rbo->placements[c].mem_type = TTM_PL_VRAM; rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; + TTM_PL_FLAG_UNCACHED; } if (domain & RADEON_GEM_DOMAIN_GTT) { if (rbo->flags & RADEON_GEM_GTT_UC) { rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_TT; + rbo->placements[c].mem_type = TTM_PL_TT; + rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED; } else if ((rbo->flags & RADEON_GEM_GTT_WC) || (rbo->rdev->flags & RADEON_IS_AGP)) { rbo->placements[c].fpfn = 0; + rbo->placements[c].mem_type = TTM_PL_TT; rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_TT; + TTM_PL_FLAG_UNCACHED; } else { rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | - TTM_PL_FLAG_TT; + rbo->placements[c].mem_type = TTM_PL_TT; + rbo->placements[c++].flags = TTM_PL_FLAG_CACHED; } } if (domain & RADEON_GEM_DOMAIN_CPU) { if (rbo->flags & RADEON_GEM_GTT_UC) { rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_SYSTEM; + rbo->placements[c].mem_type = TTM_PL_SYSTEM; + rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED; } else if ((rbo->flags & RADEON_GEM_GTT_WC) || rbo->rdev->flags & RADEON_IS_AGP) { rbo->placements[c].fpfn = 0; + rbo->placements[c].mem_type = TTM_PL_SYSTEM; rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_SYSTEM; + TTM_PL_FLAG_UNCACHED; } else { rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | - TTM_PL_FLAG_SYSTEM; + rbo->placements[c].mem_type = TTM_PL_SYSTEM; + rbo->placements[c++].flags = TTM_PL_FLAG_CACHED; } } if (!c) { rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_MASK_CACHING | - TTM_PL_FLAG_SYSTEM; + rbo->placements[c].mem_type = TTM_PL_SYSTEM; + rbo->placements[c++].flags = TTM_PL_MASK_CACHING; } rbo->placement.num_placement = c; @@ -171,7 +171,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) for (i = 0; i < c; ++i) { if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && - (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && + (rbo->placements[i].mem_type == TTM_PL_VRAM) && !rbo->placements[i].fpfn) rbo->placements[i].lpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; @@ -331,7 +331,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, struct ttm_operation_ctx ctx = { false, false }; int r, i; - if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) + if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm)) return -EPERM; if (bo->pin_count) { @@ -360,7 +360,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, radeon_ttm_placement_from_domain(bo, domain); for (i = 0; i < bo->placement.num_placement; i++) { /* force to pin into visible video ram */ - if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && + if ((bo->placements[i].mem_type == TTM_PL_VRAM) && !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) && (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) bo->placements[i].lpfn = @@ -824,7 +824,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; for (i = 0; i < rbo->placement.num_placement; i++) { /* Force into visible VRAM */ - if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && + if ((rbo->placements[i].mem_type == TTM_PL_VRAM) && (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn)) rbo->placements[i].lpfn = lpfn; } diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index b906e8fbd5f3..b9de0e51c0be 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c @@ -36,7 +36,7 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj) struct radeon_bo *bo = gem_to_radeon_bo(obj); int npages = bo->tbo.num_pages; - return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); + return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages); } void *radeon_gem_prime_vmap(struct drm_gem_object *obj) @@ -121,7 +121,7 @@ struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj, int flags) { struct radeon_bo *bo = gem_to_radeon_bo(gobj); - if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) + if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm)) return ERR_PTR(-EPERM); return drm_gem_prime_export(gobj, flags); } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 74ad50c7491c..36150b7f31a9 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -56,6 +56,10 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev); static void radeon_ttm_debugfs_fini(struct radeon_device *rdev); +static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, + struct ttm_resource *bo_mem); + struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) { struct radeon_mman *mman; @@ -69,17 +73,13 @@ struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) static int radeon_ttm_init_vram(struct radeon_device *rdev) { return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_VRAM, - TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC, - TTM_PL_FLAG_WC, false, - rdev->mc.real_vram_size >> PAGE_SHIFT); + false, rdev->mc.real_vram_size >> PAGE_SHIFT); } static int radeon_ttm_init_gtt(struct radeon_device *rdev) { return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_TT, - TTM_PL_MASK_CACHING, - TTM_PL_FLAG_CACHED, true, - rdev->mc.gtt_size >> PAGE_SHIFT); + true, rdev->mc.gtt_size >> PAGE_SHIFT); } static void radeon_evict_flags(struct ttm_buffer_object *bo, @@ -88,7 +88,8 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo, static const struct ttm_place placements = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM + .mem_type = TTM_PL_SYSTEM, + .flags = TTM_PL_MASK_CACHING }; struct radeon_bo *rbo; @@ -119,7 +120,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo, RADEON_GEM_DOMAIN_GTT); rbo->placement.num_busy_placement = 0; for (i = 0; i < rbo->placement.num_placement; i++) { - if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) { + if (rbo->placements[i].mem_type == TTM_PL_VRAM) { if (rbo->placements[i].fpfn < fpfn) rbo->placements[i].fpfn = fpfn; } else { @@ -141,23 +142,14 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo, static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) { struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); + struct radeon_device *rdev = radeon_get_rdev(bo->bdev); - if (radeon_ttm_tt_has_userptr(bo->ttm)) + if (radeon_ttm_tt_has_userptr(rdev, bo->ttm)) return -EPERM; return drm_vma_node_verify_access(&rbo->tbo.base.vma_node, filp->private_data); } -static void radeon_move_null(struct ttm_buffer_object *bo, - struct ttm_resource *new_mem) -{ - struct ttm_resource *old_mem = &bo->mem; - - BUG_ON(old_mem->mm_node != NULL); - *old_mem = *new_mem; - new_mem->mm_node = NULL; -} - static int radeon_move_blit(struct ttm_buffer_object *bo, bool evict, bool no_wait_gpu, struct ttm_resource *new_mem, @@ -208,7 +200,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, if (IS_ERR(fence)) return PTR_ERR(fence); - r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem); + r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, new_mem); radeon_fence_unref(&fence); return r; } @@ -233,7 +225,8 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, placement.busy_placement = &placements; placements.fpfn = 0; placements.lpfn = 0; - placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + placements.mem_type = TTM_PL_TT; + placements.flags = TTM_PL_MASK_CACHING; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx); if (unlikely(r)) { return r; @@ -244,7 +237,12 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, goto out_cleanup; } - r = ttm_tt_bind(bo->ttm, &tmp_mem, &ctx); + r = ttm_tt_populate(bo->bdev, bo->ttm, &ctx); + if (unlikely(r)) { + goto out_cleanup; + } + + r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } @@ -278,7 +276,8 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, placement.busy_placement = &placements; placements.fpfn = 0; placements.lpfn = 0; - placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + placements.mem_type = TTM_PL_TT; + placements.flags = TTM_PL_MASK_CACHING; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx); if (unlikely(r)) { return r; @@ -316,7 +315,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, rdev = radeon_get_rdev(bo->bdev); if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { - radeon_move_null(bo, new_mem); + ttm_bo_move_null(bo, new_mem); return 0; } if ((old_mem->mem_type == TTM_PL_TT && @@ -324,7 +323,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_TT)) { /* bind is enough */ - radeon_move_null(bo, new_mem); + ttm_bo_move_null(bo, new_mem); return 0; } if (!rdev->ring[radeon_copy_ring_index(rdev)].ready || @@ -372,8 +371,8 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso #if IS_ENABLED(CONFIG_AGP) if (rdev->flags & RADEON_IS_AGP) { /* RADEON_IS_AGP is set only if AGP is active */ - mem->bus.offset = mem->start << PAGE_SHIFT; - mem->bus.base = rdev->mc.agp_base; + mem->bus.offset = (mem->start << PAGE_SHIFT) + + rdev->mc.agp_base; mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; } #endif @@ -383,7 +382,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso /* check if it's visible */ if ((mem->bus.offset + bus_size) > rdev->mc.visible_vram_size) return -EINVAL; - mem->bus.base = rdev->mc.aper_base; + mem->bus.offset += rdev->mc.aper_base; mem->bus.is_iomem = true; #ifdef __alpha__ /* @@ -392,12 +391,10 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso */ if (mem->placement & TTM_PL_FLAG_WC) mem->bus.addr = - ioremap_wc(mem->bus.base + mem->bus.offset, - bus_size); + ioremap_wc(mem->bus.offset, bus_size); else mem->bus.addr = - ioremap(mem->bus.base + mem->bus.offset, - bus_size); + ioremap(mem->bus.offset, bus_size); if (!mem->bus.addr) return -ENOMEM; @@ -407,7 +404,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso * It then can be used to build PTEs for VRAM * access, as done in ttm_bo_vm_fault(). */ - mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + + mem->bus.offset = (mem->bus.offset & 0x0ffffffffUL) + rdev->ddev->hose->dense_mem_base; #endif break; @@ -427,12 +424,13 @@ struct radeon_ttm_tt { uint64_t userptr; struct mm_struct *usermm; uint32_t userflags; + bool bound; }; /* prepare the sg table with the user pages */ -static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) +static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { - struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); + struct radeon_device *rdev = radeon_get_rdev(bdev); struct radeon_ttm_tt *gtt = (void *)ttm; unsigned pinned = 0; int r; @@ -491,9 +489,9 @@ release_pages: return r; } -static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) +static void radeon_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { - struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); + struct radeon_device *rdev = radeon_get_rdev(bdev); struct radeon_ttm_tt *gtt = (void *)ttm; struct sg_page_iter sg_iter; @@ -520,17 +518,28 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) sg_free_table(ttm->sg); } -static int radeon_ttm_backend_bind(struct ttm_tt *ttm, +static bool radeon_ttm_backend_is_bound(struct ttm_tt *ttm) +{ + struct radeon_ttm_tt *gtt = (void*)ttm; + + return (gtt->bound); +} + +static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, struct ttm_resource *bo_mem) { struct radeon_ttm_tt *gtt = (void*)ttm; - struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); + struct radeon_device *rdev = radeon_get_rdev(bdev); uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ | RADEON_GART_PAGE_WRITE; int r; + if (gtt->bound) + return 0; + if (gtt->userptr) { - radeon_ttm_tt_pin_userptr(ttm); + radeon_ttm_tt_pin_userptr(bdev, ttm); flags &= ~RADEON_GART_PAGE_WRITE; } @@ -548,34 +557,36 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm, ttm->num_pages, (unsigned)gtt->offset); return r; } + gtt->bound = true; return 0; } -static void radeon_ttm_backend_unbind(struct ttm_tt *ttm) +static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { struct radeon_ttm_tt *gtt = (void *)ttm; - struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); + struct radeon_device *rdev = radeon_get_rdev(bdev); + + if (!gtt->bound) + return; radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages); if (gtt->userptr) - radeon_ttm_tt_unpin_userptr(ttm); + radeon_ttm_tt_unpin_userptr(bdev, ttm); + gtt->bound = false; } -static void radeon_ttm_backend_destroy(struct ttm_tt *ttm) +static void radeon_ttm_backend_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { struct radeon_ttm_tt *gtt = (void *)ttm; + radeon_ttm_backend_unbind(bdev, ttm); + ttm_tt_destroy_common(bdev, ttm); + ttm_dma_tt_fini(>t->ttm); kfree(gtt); } -static struct ttm_backend_func radeon_backend_func = { - .bind = &radeon_ttm_backend_bind, - .unbind = &radeon_ttm_backend_unbind, - .destroy = &radeon_ttm_backend_destroy, -}; - static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { @@ -594,7 +605,6 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo, if (gtt == NULL) { return NULL; } - gtt->ttm.ttm.func = &radeon_backend_func; if (ttm_dma_tt_init(>t->ttm, bo, page_flags)) { kfree(gtt); return NULL; @@ -602,18 +612,25 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo, return >t->ttm.ttm; } -static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm) +static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev, + struct ttm_tt *ttm) { - if (!ttm || ttm->func != &radeon_backend_func) +#if IS_ENABLED(CONFIG_AGP) + if (rdev->flags & RADEON_IS_AGP) return NULL; - return (struct radeon_ttm_tt *)ttm; +#endif + + if (!ttm) + return NULL; + return container_of(ttm, struct radeon_ttm_tt, ttm.ttm); } -static int radeon_ttm_tt_populate(struct ttm_tt *ttm, - struct ttm_operation_ctx *ctx) +static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) { - struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); - struct radeon_device *rdev; + struct radeon_device *rdev = radeon_get_rdev(bdev); + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm); bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); if (gtt && gtt->userptr) { @@ -622,21 +639,20 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm, return -ENOMEM; ttm->page_flags |= TTM_PAGE_FLAG_SG; - ttm->state = tt_unbound; + ttm_tt_set_populated(ttm); return 0; } if (slave && ttm->sg) { drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); - ttm->state = tt_unbound; + ttm_tt_set_populated(ttm); return 0; } - rdev = radeon_get_rdev(ttm->bdev); #if IS_ENABLED(CONFIG_AGP) if (rdev->flags & RADEON_IS_AGP) { - return ttm_agp_tt_populate(ttm, ctx); + return ttm_pool_populate(ttm, ctx); } #endif @@ -649,10 +665,10 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm, return ttm_populate_and_map_pages(rdev->dev, >t->ttm, ctx); } -static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) +static void radeon_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { - struct radeon_device *rdev; - struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); + struct radeon_device *rdev = radeon_get_rdev(bdev); + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm); bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); if (gtt && gtt->userptr) { @@ -664,10 +680,9 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) if (slave) return; - rdev = radeon_get_rdev(ttm->bdev); #if IS_ENABLED(CONFIG_AGP) if (rdev->flags & RADEON_IS_AGP) { - ttm_agp_tt_unpopulate(ttm); + ttm_pool_unpopulate(ttm); return; } #endif @@ -682,10 +697,11 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) ttm_unmap_and_unpopulate_pages(rdev->dev, >t->ttm); } -int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, +int radeon_ttm_tt_set_userptr(struct radeon_device *rdev, + struct ttm_tt *ttm, uint64_t addr, uint32_t flags) { - struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm); if (gtt == NULL) return -EINVAL; @@ -696,9 +712,69 @@ int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, return 0; } -bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm) +bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) +{ +#if IS_ENABLED(CONFIG_AGP) + struct radeon_device *rdev = radeon_get_rdev(bdev); + if (rdev->flags & RADEON_IS_AGP) + return ttm_agp_is_bound(ttm); +#endif + return radeon_ttm_backend_is_bound(ttm); +} + +static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, + struct ttm_resource *bo_mem) +{ +#if IS_ENABLED(CONFIG_AGP) + struct radeon_device *rdev = radeon_get_rdev(bdev); +#endif + + if (!bo_mem) + return -EINVAL; +#if IS_ENABLED(CONFIG_AGP) + if (rdev->flags & RADEON_IS_AGP) + return ttm_agp_bind(ttm, bo_mem); +#endif + + return radeon_ttm_backend_bind(bdev, ttm, bo_mem); +} + +static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) +{ +#if IS_ENABLED(CONFIG_AGP) + struct radeon_device *rdev = radeon_get_rdev(bdev); + + if (rdev->flags & RADEON_IS_AGP) { + ttm_agp_unbind(ttm); + return; + } +#endif + radeon_ttm_backend_unbind(bdev, ttm); +} + +static void radeon_ttm_tt_destroy(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) +{ +#if IS_ENABLED(CONFIG_AGP) + struct radeon_device *rdev = radeon_get_rdev(bdev); + + if (rdev->flags & RADEON_IS_AGP) { + ttm_agp_unbind(ttm); + ttm_tt_destroy_common(bdev, ttm); + ttm_agp_destroy(ttm); + return; + } +#endif + radeon_ttm_backend_destroy(bdev, ttm); +} + +bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev, + struct ttm_tt *ttm) { - struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm); if (gtt == NULL) return false; @@ -706,9 +782,10 @@ bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm) return !!gtt->userptr; } -bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm) +bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev, + struct ttm_tt *ttm) { - struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm); if (gtt == NULL) return false; @@ -720,6 +797,9 @@ static struct ttm_bo_driver radeon_bo_driver = { .ttm_tt_create = &radeon_ttm_tt_create, .ttm_tt_populate = &radeon_ttm_tt_populate, .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, + .ttm_tt_bind = &radeon_ttm_tt_bind, + .ttm_tt_unbind = &radeon_ttm_tt_unbind, + .ttm_tt_destroy = &radeon_ttm_tt_destroy, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = &radeon_evict_flags, .move = &radeon_bo_move, diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 010fb3b7a205..27b14eff532c 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -942,7 +942,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev, bo_va->flags &= ~RADEON_VM_PAGE_VALID; bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED; - if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm)) + if (bo_va->bo && radeon_ttm_tt_is_readonly(rdev, bo_va->bo->tbo.ttm)) bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE; if (mem) { diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index f65d1489dc50..b47e74421e34 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -22,11 +22,11 @@ config DRM_RCAR_CMM Enable support for R-Car Color Management Module (CMM). config DRM_RCAR_DW_HDMI - tristate "R-Car DU Gen3 HDMI Encoder Support" + tristate "R-Car Gen3 and RZ/G2 DU HDMI Encoder Support" depends on DRM && OF select DRM_DW_HDMI help - Enable support for R-Car Gen3 internal HDMI encoder. + Enable support for R-Car Gen3 or RZ/G2 internal HDMI encoder. config DRM_RCAR_LVDS tristate "R-Car DU LVDS Encoder Support" @@ -49,3 +49,4 @@ config DRM_RCAR_VSP config DRM_RCAR_WRITEBACK bool default y if ARM64 + depends on DRM_RCAR_DU diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index f53b0ec71085..447be991fa25 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -186,6 +186,35 @@ static const struct rcar_du_device_info rcar_du_r8a774c0_info = { .lvds_clk_mask = BIT(1) | BIT(0), }; +static const struct rcar_du_device_info rcar_du_r8a774e1_info = { + .gen = 3, + .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + | RCAR_DU_FEATURE_VSP1_SOURCE + | RCAR_DU_FEATURE_INTERLACED + | RCAR_DU_FEATURE_TVM_SYNC, + .channels_mask = BIT(3) | BIT(1) | BIT(0), + .routes = { + /* + * R8A774E1 has one RGB output, one LVDS output and one HDMI + * output. + */ + [RCAR_DU_OUTPUT_DPAD0] = { + .possible_crtcs = BIT(2), + .port = 0, + }, + [RCAR_DU_OUTPUT_HDMI0] = { + .possible_crtcs = BIT(1), + .port = 1, + }, + [RCAR_DU_OUTPUT_LVDS0] = { + .possible_crtcs = BIT(0), + .port = 2, + }, + }, + .num_lvds = 1, + .dpll_mask = BIT(1), +}; + static const struct rcar_du_device_info rcar_du_r8a7779_info = { .gen = 1, .features = RCAR_DU_FEATURE_INTERLACED @@ -216,8 +245,9 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = { .channels_mask = BIT(2) | BIT(1) | BIT(0), .routes = { /* - * R8A7790 has one RGB output, two LVDS outputs and one - * (currently unsupported) TCON output. + * R8A7742 and R8A7790 each have one RGB output and two LVDS + * outputs. Additionally R8A7790 supports one TCON output + * (currently unsupported by the driver). */ [RCAR_DU_OUTPUT_DPAD0] = { .possible_crtcs = BIT(2) | BIT(1) | BIT(0), @@ -443,6 +473,7 @@ static const struct rcar_du_device_info rcar_du_r8a7799x_info = { }; static const struct of_device_id rcar_du_of_table[] = { + { .compatible = "renesas,du-r8a7742", .data = &rcar_du_r8a7790_info }, { .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info }, { .compatible = "renesas,du-r8a7744", .data = &rzg1_du_r8a7743_info }, { .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info }, @@ -450,6 +481,7 @@ static const struct of_device_id rcar_du_of_table[] = { { .compatible = "renesas,du-r8a774a1", .data = &rcar_du_r8a774a1_info }, { .compatible = "renesas,du-r8a774b1", .data = &rcar_du_r8a774b1_info }, { .compatible = "renesas,du-r8a774c0", .data = &rcar_du_r8a774c0_info }, + { .compatible = "renesas,du-r8a774e1", .data = &rcar_du_r8a774e1_info }, { .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info }, { .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info }, { .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info }, @@ -458,6 +490,7 @@ static const struct of_device_id rcar_du_of_table[] = { { .compatible = "renesas,du-r8a7794", .data = &rcar_du_r8a7794_info }, { .compatible = "renesas,du-r8a7795", .data = &rcar_du_r8a7795_info }, { .compatible = "renesas,du-r8a7796", .data = &rcar_du_r8a7796_info }, + { .compatible = "renesas,du-r8a77961", .data = &rcar_du_r8a7796_info }, { .compatible = "renesas,du-r8a77965", .data = &rcar_du_r8a77965_info }, { .compatible = "renesas,du-r8a77970", .data = &rcar_du_r8a77970_info }, { .compatible = "renesas,du-r8a77980", .data = &rcar_du_r8a77970_info }, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 482329102f19..72dda446355f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -40,6 +40,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_RGB565, .bpp = 16, .planes = 1, + .hsub = 1, .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP, .edf = PnDDCR4_EDF_NONE, }, { @@ -47,6 +48,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_ARGB555, .bpp = 16, .planes = 1, + .hsub = 1, .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB, .edf = PnDDCR4_EDF_NONE, }, { @@ -61,6 +63,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_XBGR32, .bpp = 32, .planes = 1, + .hsub = 1, .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP, .edf = PnDDCR4_EDF_RGB888, }, { @@ -68,6 +71,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_ABGR32, .bpp = 32, .planes = 1, + .hsub = 1, .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_16BPP, .edf = PnDDCR4_EDF_ARGB8888, }, { @@ -75,6 +79,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_UYVY, .bpp = 16, .planes = 1, + .hsub = 2, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, { @@ -82,6 +87,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_YUYV, .bpp = 16, .planes = 1, + .hsub = 2, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, { @@ -89,6 +95,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_NV12M, .bpp = 12, .planes = 2, + .hsub = 2, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, { @@ -96,6 +103,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_NV21M, .bpp = 12, .planes = 2, + .hsub = 2, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, { @@ -103,6 +111,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_NV16M, .bpp = 16, .planes = 2, + .hsub = 2, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, @@ -115,156 +124,187 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_RGB332, .bpp = 8, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_ARGB4444, .v4l2 = V4L2_PIX_FMT_ARGB444, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_XRGB4444, .v4l2 = V4L2_PIX_FMT_XRGB444, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_RGBA4444, .v4l2 = V4L2_PIX_FMT_RGBA444, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_RGBX4444, .v4l2 = V4L2_PIX_FMT_RGBX444, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_ABGR4444, .v4l2 = V4L2_PIX_FMT_ABGR444, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_XBGR4444, .v4l2 = V4L2_PIX_FMT_XBGR444, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_BGRA4444, .v4l2 = V4L2_PIX_FMT_BGRA444, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_BGRX4444, .v4l2 = V4L2_PIX_FMT_BGRX444, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_RGBA5551, .v4l2 = V4L2_PIX_FMT_RGBA555, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_RGBX5551, .v4l2 = V4L2_PIX_FMT_RGBX555, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_ABGR1555, .v4l2 = V4L2_PIX_FMT_ABGR555, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_XBGR1555, .v4l2 = V4L2_PIX_FMT_XBGR555, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_BGRA5551, .v4l2 = V4L2_PIX_FMT_BGRA555, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_BGRX5551, .v4l2 = V4L2_PIX_FMT_BGRX555, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_BGR888, .v4l2 = V4L2_PIX_FMT_RGB24, .bpp = 24, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_RGB888, .v4l2 = V4L2_PIX_FMT_BGR24, .bpp = 24, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_RGBA8888, .v4l2 = V4L2_PIX_FMT_BGRA32, .bpp = 32, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_RGBX8888, .v4l2 = V4L2_PIX_FMT_BGRX32, .bpp = 32, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_ABGR8888, .v4l2 = V4L2_PIX_FMT_RGBA32, .bpp = 32, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_XBGR8888, .v4l2 = V4L2_PIX_FMT_RGBX32, .bpp = 32, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_BGRA8888, .v4l2 = V4L2_PIX_FMT_ARGB32, .bpp = 32, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_BGRX8888, .v4l2 = V4L2_PIX_FMT_XRGB32, .bpp = 32, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_YVYU, .v4l2 = V4L2_PIX_FMT_YVYU, .bpp = 16, .planes = 1, + .hsub = 2, }, { .fourcc = DRM_FORMAT_NV61, .v4l2 = V4L2_PIX_FMT_NV61M, .bpp = 16, .planes = 2, + .hsub = 2, }, { .fourcc = DRM_FORMAT_YUV420, .v4l2 = V4L2_PIX_FMT_YUV420M, .bpp = 12, .planes = 3, + .hsub = 2, }, { .fourcc = DRM_FORMAT_YVU420, .v4l2 = V4L2_PIX_FMT_YVU420M, .bpp = 12, .planes = 3, + .hsub = 2, }, { .fourcc = DRM_FORMAT_YUV422, .v4l2 = V4L2_PIX_FMT_YUV422M, .bpp = 16, .planes = 3, + .hsub = 2, }, { .fourcc = DRM_FORMAT_YVU422, .v4l2 = V4L2_PIX_FMT_YVU422M, .bpp = 16, .planes = 3, + .hsub = 2, }, { .fourcc = DRM_FORMAT_YUV444, .v4l2 = V4L2_PIX_FMT_YUV444M, .bpp = 24, .planes = 3, + .hsub = 1, }, { .fourcc = DRM_FORMAT_YVU444, .v4l2 = V4L2_PIX_FMT_YVU444M, .bpp = 24, .planes = 3, + .hsub = 1, }, }; @@ -311,6 +351,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, { struct rcar_du_device *rcdu = dev->dev_private; const struct rcar_du_format_info *format; + unsigned int chroma_pitch; unsigned int max_pitch; unsigned int align; unsigned int i; @@ -353,10 +394,19 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, return ERR_PTR(-EINVAL); } + /* + * Calculate the chroma plane(s) pitch using the horizontal subsampling + * factor. For semi-planar formats, the U and V planes are combined, the + * pitch must thus be doubled. + */ + chroma_pitch = mode_cmd->pitches[0] / format->hsub; + if (format->planes == 2) + chroma_pitch *= 2; + for (i = 1; i < format->planes; ++i) { - if (mode_cmd->pitches[i] != mode_cmd->pitches[0]) { + if (mode_cmd->pitches[i] != chroma_pitch) { dev_dbg(dev->dev, - "luma and chroma pitches do not match\n"); + "luma and chroma pitches are not compatible\n"); return ERR_PTR(-EINVAL); } } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h index 0346504d8c59..8f5fff176754 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h @@ -22,6 +22,7 @@ struct rcar_du_format_info { u32 v4l2; unsigned int bpp; unsigned int planes; + unsigned int hsub; unsigned int pnmr; unsigned int edf; }; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index f1a81c9b184d..f6a69aa116e6 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c @@ -13,6 +13,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_plane_helper.h> #include <drm/drm_vblank.h> @@ -197,9 +198,8 @@ int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb, goto fail; ret = vsp1_du_map_sg(vsp->vsp, sgt); - if (!ret) { + if (ret) { sg_free_table(sgt); - ret = -ENOMEM; goto fail; } } @@ -279,7 +279,7 @@ static void rcar_du_vsp_plane_atomic_update(struct drm_plane *plane, if (plane->state->visible) rcar_du_vsp_plane_setup(rplane); - else + else if (old_state->crtc) vsp1_du_atomic_update(rplane->vsp->vsp, crtc->vsp_pipe, rplane->index, NULL); } @@ -341,6 +341,13 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = { .atomic_destroy_state = rcar_du_vsp_plane_atomic_destroy_state, }; +static void rcar_du_vsp_cleanup(struct drm_device *dev, void *res) +{ + struct rcar_du_vsp *vsp = res; + + put_device(vsp->vsp); +} + int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, unsigned int crtcs) { @@ -357,6 +364,10 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, vsp->vsp = &pdev->dev; + ret = drmm_add_action(rcdu->ddev, rcar_du_vsp_cleanup, vsp); + if (ret < 0) + return ret; + ret = vsp1_du_init(vsp->vsp); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c index bced729a96fe..70dbbe44bb23 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c @@ -978,11 +978,13 @@ static const struct rcar_lvds_device_info rcar_lvds_r8a77995_info = { }; static const struct of_device_id rcar_lvds_of_table[] = { + { .compatible = "renesas,r8a7742-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a7743-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a7744-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a774a1-lvds", .data = &rcar_lvds_gen3_info }, { .compatible = "renesas,r8a774b1-lvds", .data = &rcar_lvds_gen3_info }, { .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info }, + { .compatible = "renesas,r8a774e1-lvds", .data = &rcar_lvds_gen3_info }, { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info }, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index b9275ba7c5a5..62e5d0970525 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -36,8 +36,8 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj) rk_obj->dma_addr = rk_obj->mm.start; - ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl, - rk_obj->sgt->nents, prot); + ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt, + prot); if (ret < rk_obj->base.size) { DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n", ret, rk_obj->base.size); @@ -85,7 +85,8 @@ static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj) rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT; - rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); + rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev, + rk_obj->pages, rk_obj->num_pages); if (IS_ERR(rk_obj->sgt)) { ret = PTR_ERR(rk_obj->sgt); goto err_put_pages; @@ -98,11 +99,10 @@ static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj) * TODO: Replace this by drm_clflush_sg() once it can be implemented * without relying on symbols that are not exported. */ - for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i) + for_each_sgtable_sg(rk_obj->sgt, s, i) sg_dma_address(s) = sg_phys(s); - dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents, - DMA_TO_DEVICE); + dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE); return 0; @@ -350,8 +350,8 @@ void rockchip_gem_free_object(struct drm_gem_object *obj) if (private->domain) { rockchip_gem_iommu_unmap(rk_obj); } else { - dma_unmap_sg(drm->dev, rk_obj->sgt->sgl, - rk_obj->sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(drm->dev, rk_obj->sgt, + DMA_BIDIRECTIONAL, 0); } drm_prime_gem_destroy(obj, rk_obj->sgt); } else { @@ -442,7 +442,7 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj) int ret; if (rk_obj->pages) - return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); + return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages); sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) @@ -460,23 +460,6 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj) return sgt; } -static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt, - int count) -{ - struct scatterlist *s; - dma_addr_t expected = sg_dma_address(sgt->sgl); - unsigned int i; - unsigned long size = 0; - - for_each_sg(sgt->sgl, s, count, i) { - if (sg_dma_address(s) != expected) - break; - expected = sg_dma_address(s) + sg_dma_len(s); - size += sg_dma_len(s); - } - return size; -} - static int rockchip_gem_iommu_map_sg(struct drm_device *drm, struct dma_buf_attachment *attach, @@ -493,15 +476,13 @@ rockchip_gem_dma_map_sg(struct drm_device *drm, struct sg_table *sg, struct rockchip_gem_object *rk_obj) { - int count = dma_map_sg(drm->dev, sg->sgl, sg->nents, - DMA_BIDIRECTIONAL); - if (!count) - return -EINVAL; + int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0); + if (err) + return err; - if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) { + if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) { DRM_ERROR("failed to map sg_table to contiguous linear address.\n"); - dma_unmap_sg(drm->dev, sg->sgl, sg->nents, - DMA_BIDIRECTIONAL); + dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0); return -EINVAL; } diff --git a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c index bd990d178765..1d696ec001cf 100644 --- a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c +++ b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c @@ -5,6 +5,8 @@ #define PREFIX_STR "[drm_dp_mst_helper]" +#include <linux/random.h> + #include <drm/drm_dp_mst_helper.h> #include <drm/drm_print.h> @@ -237,6 +239,21 @@ int igt_dp_mst_sideband_msg_req_decode(void *unused) in.u.i2c_write.bytes = data; DO_TEST(); + in.req_type = DP_QUERY_STREAM_ENC_STATUS; + in.u.enc_status.stream_id = 1; + DO_TEST(); + get_random_bytes(in.u.enc_status.client_id, + sizeof(in.u.enc_status.client_id)); + DO_TEST(); + in.u.enc_status.stream_event = 3; + DO_TEST(); + in.u.enc_status.valid_stream_event = 0; + DO_TEST(); + in.u.enc_status.stream_behavior = 3; + DO_TEST(); + in.u.enc_status.valid_stream_behavior = 1; + DO_TEST(); + #undef DO_TEST return 0; } diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index b25443255be6..f38de08e0c95 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -12,6 +12,7 @@ #include <linux/gpio/consumer.h> #include <drm/drm_atomic.h> +#include <drm/drm_bridge.h> #include <drm/drm_edid.h> #include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> @@ -116,6 +117,7 @@ struct tegra_output { struct device_node *of_node; struct device *dev; + struct drm_bridge *bridge; struct drm_panel *panel; struct i2c_adapter *ddc; const struct edid *edid; diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index 723df142a981..a2bac20ff19d 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -98,8 +98,8 @@ static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo, * the SG table needs to be copied to avoid overwriting any * other potential users of the original SG table. */ - err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents, - GFP_KERNEL); + err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, + obj->sgt->orig_nents, GFP_KERNEL); if (err < 0) goto free; } else { @@ -196,8 +196,7 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) bo->iova = bo->mm->start; - bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl, - bo->sgt->nents, prot); + bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot); if (!bo->size) { dev_err(tegra->drm->dev, "failed to map buffer\n"); err = -ENOMEM; @@ -264,8 +263,7 @@ free: static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) { if (bo->pages) { - dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, - DMA_FROM_DEVICE); + dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0); drm_gem_put_pages(&bo->gem, bo->pages, true, true); sg_free_table(bo->sgt); kfree(bo->sgt); @@ -284,18 +282,15 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) bo->num_pages = bo->gem.size >> PAGE_SHIFT; - bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); + bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages); if (IS_ERR(bo->sgt)) { err = PTR_ERR(bo->sgt); goto put_pages; } - err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, - DMA_FROM_DEVICE); - if (err == 0) { - err = -EFAULT; + err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0); + if (err) goto free_sgt; - } return 0; @@ -571,7 +566,7 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, goto free; } - if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) + if (dma_map_sgtable(attach->dev, sgt, dir, 0)) goto free; return sgt; @@ -590,7 +585,7 @@ static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, struct tegra_bo *bo = to_tegra_bo(gem); if (bo->pages) - dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); + dma_unmap_sgtable(attach->dev, sgt, dir, 0); sg_free_table(sgt); kfree(sgt); @@ -609,8 +604,7 @@ static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf, struct drm_device *drm = gem->dev; if (bo->pages) - dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents, - DMA_FROM_DEVICE); + dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE); return 0; } @@ -623,8 +617,7 @@ static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf, struct drm_device *drm = gem->dev; if (bo->pages) - dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents, - DMA_TO_DEVICE); + dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE); return 0; } diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index a3adb9e4debf..5a4fd0dbf4cf 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -5,6 +5,7 @@ */ #include <drm/drm_atomic_helper.h> +#include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_simple_kms_helper.h> @@ -99,27 +100,38 @@ int tegra_output_probe(struct tegra_output *output) if (!output->of_node) output->of_node = output->dev->of_node; + err = drm_of_find_panel_or_bridge(output->of_node, -1, -1, + &output->panel, &output->bridge); + if (err && err != -ENODEV) + return err; + panel = of_parse_phandle(output->of_node, "nvidia,panel", 0); if (panel) { + /* + * Don't mix nvidia,panel phandle with the graph in a + * device-tree. + */ + WARN_ON(output->panel || output->bridge); + output->panel = of_drm_find_panel(panel); + of_node_put(panel); + if (IS_ERR(output->panel)) return PTR_ERR(output->panel); - - of_node_put(panel); } output->edid = of_get_property(output->of_node, "nvidia,edid", &size); ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0); if (ddc) { - output->ddc = of_find_i2c_adapter_by_node(ddc); + output->ddc = of_get_i2c_adapter_by_node(ddc); + of_node_put(ddc); + if (!output->ddc) { err = -EPROBE_DEFER; of_node_put(ddc); return err; } - - of_node_put(ddc); } output->hpd_gpio = devm_gpiod_get_from_of_node(output->dev, @@ -173,7 +185,7 @@ void tegra_output_remove(struct tegra_output *output) free_irq(output->hpd_irq, output); if (output->ddc) - put_device(&output->ddc->dev); + i2c_put_adapter(output->ddc); } int tegra_output_init(struct drm_device *drm, struct tegra_output *output) diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c index 4cd0461cc508..539d14935728 100644 --- a/drivers/gpu/drm/tegra/plane.c +++ b/drivers/gpu/drm/tegra/plane.c @@ -131,12 +131,9 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state) } if (sgt) { - err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents, - DMA_TO_DEVICE); - if (err == 0) { - err = -ENOMEM; + err = dma_map_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0); + if (err) goto unpin; - } /* * The display controller needs contiguous memory, so @@ -144,7 +141,7 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state) * map its SG table to a single contiguous chunk of * I/O virtual memory. */ - if (err > 1) { + if (sgt->nents > 1) { err = -EINVAL; goto unpin; } @@ -166,8 +163,7 @@ unpin: struct sg_table *sgt = state->sgt[i]; if (sgt) - dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, - DMA_TO_DEVICE); + dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0); host1x_bo_unpin(dc->dev, &bo->base, sgt); state->iova[i] = DMA_MAPPING_ERROR; @@ -186,8 +182,7 @@ static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state) struct sg_table *sgt = state->sgt[i]; if (sgt) - dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, - DMA_TO_DEVICE); + dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0); host1x_bo_unpin(dc->dev, &bo->base, sgt); state->iova[i] = DMA_MAPPING_ERROR; diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index 0562a7eb793f..4142a56ca764 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c @@ -7,7 +7,7 @@ #include <linux/clk.h> #include <drm/drm_atomic_helper.h> -#include <drm/drm_panel.h> +#include <drm/drm_bridge_connector.h> #include <drm/drm_simple_kms_helper.h> #include "drm.h" @@ -85,45 +85,13 @@ static void tegra_dc_write_regs(struct tegra_dc *dc, tegra_dc_writel(dc, table[i].value, table[i].offset); } -static const struct drm_connector_funcs tegra_rgb_connector_funcs = { - .reset = drm_atomic_helper_connector_reset, - .detect = tegra_output_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = tegra_output_connector_destroy, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static enum drm_mode_status -tegra_rgb_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - /* - * FIXME: For now, always assume that the mode is okay. There are - * unresolved issues with clk_round_rate(), which doesn't always - * reliably report whether a frequency can be set or not. - */ - return MODE_OK; -} - -static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = { - .get_modes = tegra_output_connector_get_modes, - .mode_valid = tegra_rgb_connector_mode_valid, -}; - static void tegra_rgb_encoder_disable(struct drm_encoder *encoder) { struct tegra_output *output = encoder_to_output(encoder); struct tegra_rgb *rgb = to_rgb(output); - if (output->panel) - drm_panel_disable(output->panel); - tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable)); tegra_dc_commit(rgb->dc); - - if (output->panel) - drm_panel_unprepare(output->panel); } static void tegra_rgb_encoder_enable(struct drm_encoder *encoder) @@ -132,9 +100,6 @@ static void tegra_rgb_encoder_enable(struct drm_encoder *encoder) struct tegra_rgb *rgb = to_rgb(output); u32 value; - if (output->panel) - drm_panel_prepare(output->panel); - tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable)); value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL; @@ -156,9 +121,6 @@ static void tegra_rgb_encoder_enable(struct drm_encoder *encoder) tegra_dc_writel(rgb->dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS); tegra_dc_commit(rgb->dc); - - if (output->panel) - drm_panel_enable(output->panel); } static int @@ -267,24 +229,68 @@ int tegra_dc_rgb_remove(struct tegra_dc *dc) int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc) { struct tegra_output *output = dc->rgb; + struct drm_connector *connector; int err; if (!dc->rgb) return -ENODEV; - drm_connector_init(drm, &output->connector, &tegra_rgb_connector_funcs, - DRM_MODE_CONNECTOR_LVDS); - drm_connector_helper_add(&output->connector, - &tegra_rgb_connector_helper_funcs); - output->connector.dpms = DRM_MODE_DPMS_OFF; - drm_simple_encoder_init(drm, &output->encoder, DRM_MODE_ENCODER_LVDS); drm_encoder_helper_add(&output->encoder, &tegra_rgb_encoder_helper_funcs); - drm_connector_attach_encoder(&output->connector, - &output->encoder); - drm_connector_register(&output->connector); + /* + * Wrap directly-connected panel into DRM bridge in order to let + * DRM core to handle panel for us. + */ + if (output->panel) { + output->bridge = devm_drm_panel_bridge_add(output->dev, + output->panel); + if (IS_ERR(output->bridge)) { + dev_err(output->dev, + "failed to wrap panel into bridge: %pe\n", + output->bridge); + return PTR_ERR(output->bridge); + } + + output->panel = NULL; + } + + /* + * Tegra devices that have LVDS panel utilize LVDS encoder bridge + * for converting up to 28 LCD LVTTL lanes into 5/4 LVDS lanes that + * go to display panel's receiver. + * + * Encoder usually have a power-down control which needs to be enabled + * in order to transmit data to the panel. Historically devices that + * use an older device-tree version didn't model the bridge, assuming + * that encoder is turned ON by default, while today's DRM allows us + * to model LVDS encoder properly. + * + * Newer device-trees utilize LVDS encoder bridge, which provides + * us with a connector and handles the display panel. + * + * For older device-trees we wrapped panel into the panel-bridge. + */ + if (output->bridge) { + err = drm_bridge_attach(&output->encoder, output->bridge, + NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (err) { + dev_err(output->dev, "failed to attach bridge: %d\n", + err); + return err; + } + + connector = drm_bridge_connector_init(drm, &output->encoder); + if (IS_ERR(connector)) { + dev_err(output->dev, + "failed to initialize bridge connector: %pe\n", + connector); + return PTR_ERR(connector); + } + + drm_connector_attach_encoder(connector, &output->encoder); + } err = tegra_output_init(drm, output); if (err < 0) { diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 45b5258c77a2..e88a17c2937f 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -3728,7 +3728,12 @@ static int tegra_sor_probe(struct platform_device *pdev) if (!sor->aux) return -EPROBE_DEFER; - sor->output.ddc = &sor->aux->ddc; + if (get_device(&sor->aux->ddc.dev)) { + if (try_module_get(sor->aux->ddc.owner)) + sor->output.ddc = &sor->aux->ddc; + else + put_device(&sor->aux->ddc.dev); + } } if (!sor->aux) { diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 09fe80e215c5..a98fd795b752 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -48,7 +48,7 @@ struct ttm_agp_backend { struct agp_bridge_data *bridge; }; -static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) +int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) { struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); struct page *dummy_read_page = ttm_bo_glob.dummy_read_page; @@ -57,6 +57,9 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); unsigned i; + if (agp_be->mem) + return 0; + mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY); if (unlikely(mem == NULL)) return -ENOMEM; @@ -81,8 +84,9 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) return ret; } +EXPORT_SYMBOL(ttm_agp_bind); -static void ttm_agp_unbind(struct ttm_tt *ttm) +void ttm_agp_unbind(struct ttm_tt *ttm) { struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); @@ -95,8 +99,20 @@ static void ttm_agp_unbind(struct ttm_tt *ttm) agp_be->mem = NULL; } } +EXPORT_SYMBOL(ttm_agp_unbind); + +bool ttm_agp_is_bound(struct ttm_tt *ttm) +{ + struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); + + if (!ttm) + return false; + + return (agp_be->mem != NULL); +} +EXPORT_SYMBOL(ttm_agp_is_bound); -static void ttm_agp_destroy(struct ttm_tt *ttm) +void ttm_agp_destroy(struct ttm_tt *ttm) { struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); @@ -105,12 +121,7 @@ static void ttm_agp_destroy(struct ttm_tt *ttm) ttm_tt_fini(ttm); kfree(agp_be); } - -static struct ttm_backend_func ttm_agp_func = { - .bind = ttm_agp_bind, - .unbind = ttm_agp_unbind, - .destroy = ttm_agp_destroy, -}; +EXPORT_SYMBOL(ttm_agp_destroy); struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo, struct agp_bridge_data *bridge, @@ -124,7 +135,6 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo, agp_be->mem = NULL; agp_be->bridge = bridge; - agp_be->ttm.func = &ttm_agp_func; if (ttm_tt_init(&agp_be->ttm, bo, page_flags)) { kfree(agp_be); @@ -134,18 +144,3 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo, return &agp_be->ttm; } EXPORT_SYMBOL(ttm_agp_tt_create); - -int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) -{ - if (ttm->state != tt_unpopulated) - return 0; - - return ttm_pool_populate(ttm, ctx); -} -EXPORT_SYMBOL(ttm_agp_tt_populate); - -void ttm_agp_tt_unpopulate(struct ttm_tt *ttm) -{ - ttm_pool_unpopulate(ttm); -} -EXPORT_SYMBOL(ttm_agp_tt_unpopulate); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index e3931e515906..70b3bee27850 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -64,34 +64,18 @@ static void ttm_bo_default_destroy(struct ttm_buffer_object *bo) kfree(bo); } -static inline int ttm_mem_type_from_place(const struct ttm_place *place, - uint32_t *mem_type) -{ - int pos; - - pos = ffs(place->flags & TTM_PL_MASK_MEM); - if (unlikely(!pos)) - return -EINVAL; - - *mem_type = pos - 1; - return 0; -} - static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, struct ttm_placement *placement) { struct drm_printer p = drm_debug_printer(TTM_PFX); - int i, ret, mem_type; struct ttm_resource_manager *man; + int i, mem_type; drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n", bo, bo->mem.num_pages, bo->mem.size >> 10, bo->mem.size >> 20); for (i = 0; i < placement->num_placement; i++) { - ret = ttm_mem_type_from_place(&placement->placement[i], - &mem_type); - if (ret) - return; + mem_type = placement->placement[i].mem_type; drm_printf(&p, " placement[%d]=0x%08X (%d)\n", i, placement->placement[i].flags, mem_type); man = ttm_manager_type(bo->bdev, mem_type); @@ -125,12 +109,6 @@ static struct kobj_type ttm_bo_glob_kobj_type = { .default_attrs = ttm_bo_global_attrs }; - -static inline uint32_t ttm_bo_type_flags(unsigned type) -{ - return 1 << (type); -} - static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, struct ttm_resource *mem) { @@ -263,11 +241,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type); int ret; - ret = ttm_mem_io_lock(old_man, true); - if (unlikely(ret != 0)) - goto out_err; - ttm_bo_unmap_virtual_locked(bo); - ttm_mem_io_unlock(old_man); + ttm_bo_unmap_virtual(bo); /* * Create and bind a ttm if required. @@ -286,7 +260,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, goto out_err; if (mem->mem_type != TTM_PL_SYSTEM) { - ret = ttm_tt_bind(bo->ttm, mem, ctx); + ret = ttm_tt_populate(bdev, bo->ttm, ctx); + if (ret) + goto out_err; + + ret = ttm_bo_tt_bind(bo, mem); if (ret) goto out_err; } @@ -320,17 +298,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, } moved: - bo->evicted = false; - ctx->bytes_moved += bo->num_pages << PAGE_SHIFT; return 0; out_err: new_man = ttm_manager_type(bdev, bo->mem.mem_type); - if (!new_man->use_tt) { - ttm_tt_destroy(bo->ttm); - bo->ttm = NULL; - } + if (!new_man->use_tt) + ttm_bo_tt_destroy(bo); return ret; } @@ -348,8 +322,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) if (bo->bdev->driver->move_notify) bo->bdev->driver->move_notify(bo, false, NULL); - ttm_tt_destroy(bo->ttm); - bo->ttm = NULL; + ttm_bo_tt_destroy(bo); ttm_resource_free(bo, &bo->mem); } @@ -538,7 +511,6 @@ static void ttm_bo_release(struct kref *kref) struct ttm_buffer_object *bo = container_of(kref, struct ttm_buffer_object, kref); struct ttm_bo_device *bdev = bo->bdev; - struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); size_t acc_size = bo->acc_size; int ret; @@ -556,9 +528,7 @@ static void ttm_bo_release(struct kref *kref) bo->bdev->driver->release_notify(bo); drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); - ttm_mem_io_lock(man, false); - ttm_mem_io_free_vm(bo); - ttm_mem_io_unlock(man); + ttm_mem_io_free(bdev, &bo->mem); } if (!dma_resv_test_signaled_rcu(bo->base.resv, true) || @@ -648,8 +618,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, evict_mem = bo->mem; evict_mem.mm_node = NULL; - evict_mem.bus.io_reserved_vm = false; - evict_mem.bus.io_reserved_count = 0; + evict_mem.bus.offset = 0; + evict_mem.bus.addr = NULL; ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx); if (ret) { @@ -666,9 +636,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, if (ret != -ERESTARTSYS) pr_err("Buffer eviction failed\n"); ttm_resource_free(bo, &evict_mem); - goto out; } - bo->evicted = true; out: return ret; } @@ -905,8 +873,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man, if ((cur_placement & caching) != 0) result |= (cur_placement & caching); - else if ((man->default_caching & caching) != 0) - result |= man->default_caching; else if ((TTM_PL_FLAG_CACHED & caching) != 0) result |= TTM_PL_FLAG_CACHED; else if ((TTM_PL_FLAG_WC & caching) != 0) @@ -917,25 +883,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man, return result; } -static bool ttm_bo_mt_compatible(struct ttm_resource_manager *man, - uint32_t mem_type, - const struct ttm_place *place, - uint32_t *masked_placement) -{ - uint32_t cur_flags = ttm_bo_type_flags(mem_type); - - if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0) - return false; - - if ((place->flags & man->available_caching) == 0) - return false; - - cur_flags |= (place->flags & man->available_caching); - - *masked_placement = cur_flags; - return true; -} - /** * ttm_bo_mem_placement - check if placement is compatible * @bo: BO to find memory for @@ -953,30 +900,18 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - uint32_t mem_type = TTM_PL_SYSTEM; struct ttm_resource_manager *man; uint32_t cur_flags = 0; - int ret; - ret = ttm_mem_type_from_place(place, &mem_type); - if (ret) - return ret; - - man = ttm_manager_type(bdev, mem_type); + man = ttm_manager_type(bdev, place->mem_type); if (!man || !ttm_resource_manager_used(man)) return -EBUSY; - if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) - return -EBUSY; - - cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags); - /* - * Use the access and other non-mapping-related flag bits from - * the memory placement flags to the current flags - */ - ttm_flag_masked(&cur_flags, place->flags, ~TTM_PL_MASK_MEMTYPE); + cur_flags = ttm_bo_select_caching(man, bo->mem.placement, + place->flags); + cur_flags |= place->flags & ~TTM_PL_MASK_CACHING; - mem->mem_type = mem_type; + mem->mem_type = place->mem_type; mem->placement = cur_flags; spin_lock(&ttm_bo_glob.lru_lock); @@ -1013,10 +948,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_resource_manager *man; ret = ttm_bo_mem_placement(bo, place, mem, ctx); - if (ret == -EBUSY) - continue; if (ret) - goto error; + continue; type_found = true; ret = ttm_resource_alloc(bo, place, mem); @@ -1041,10 +974,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, const struct ttm_place *place = &placement->busy_placement[i]; ret = ttm_bo_mem_placement(bo, place, mem, ctx); - if (ret == -EBUSY) - continue; if (ret) - goto error; + continue; type_found = true; ret = ttm_bo_mem_force_space(bo, place, mem, ctx); @@ -1082,8 +1013,8 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.page_alignment = bo->mem.page_alignment; - mem.bus.io_reserved_vm = false; - mem.bus.io_reserved_count = 0; + mem.bus.offset = 0; + mem.bus.addr = NULL; mem.mm_node = NULL; /* @@ -1115,7 +1046,7 @@ static bool ttm_bo_places_compat(const struct ttm_place *places, *new_flags = heap->flags; if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && - (*new_flags & mem->placement & TTM_PL_MASK_MEM) && + (mem->mem_type == heap->mem_type) && (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) || (mem->placement & TTM_PL_FLAG_CONTIGUOUS))) return true; @@ -1170,12 +1101,8 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, if (ret) return ret; } else { - /* - * Use the access and other non-mapping-related flag bits from - * the compatible memory placement flags to the active flags - */ - ttm_flag_masked(&bo->mem.placement, new_flags, - ~TTM_PL_MASK_MEMTYPE); + bo->mem.placement &= TTM_PL_MASK_CACHING; + bo->mem.placement |= new_flags & ~TTM_PL_MASK_CACHING; } /* * We might need to add a TTM. @@ -1232,7 +1159,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); - INIT_LIST_HEAD(&bo->io_reserve_lru); bo->bdev = bdev; bo->type = type; bo->num_pages = num_pages; @@ -1241,10 +1167,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, bo->mem.num_pages = bo->num_pages; bo->mem.mm_node = NULL; bo->mem.page_alignment = page_alignment; - bo->mem.bus.io_reserved_vm = false; - bo->mem.bus.io_reserved_count = 0; + bo->mem.bus.offset = 0; + bo->mem.bus.addr = NULL; bo->moving = NULL; - bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); + bo->mem.placement = TTM_PL_FLAG_CACHED; bo->acc_size = acc_size; bo->sg = sg; if (resv) { @@ -1325,9 +1251,9 @@ int ttm_bo_init(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_bo_init); -size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, - unsigned long bo_size, - unsigned struct_size) +static size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, + unsigned long bo_size, + unsigned struct_size) { unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; size_t size = 0; @@ -1337,7 +1263,6 @@ size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, size += ttm_round_pot(sizeof(struct ttm_tt)); return size; } -EXPORT_SYMBOL(ttm_bo_acc_size); size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, unsigned long bo_size, @@ -1500,8 +1425,6 @@ static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) * Other types need to be driver / IOCTL initialized. */ man->use_tt = true; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; ttm_resource_manager_init(man, 0); ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man); @@ -1545,25 +1468,13 @@ EXPORT_SYMBOL(ttm_bo_device_init); * buffer object vm functions. */ -void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) -{ - struct ttm_bo_device *bdev = bo->bdev; - - drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); - ttm_mem_io_free_vm(bo); -} - void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); - ttm_mem_io_lock(man, false); - ttm_bo_unmap_virtual_locked(bo); - ttm_mem_io_unlock(man); + drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); + ttm_mem_io_free(bdev, &bo->mem); } - - EXPORT_SYMBOL(ttm_bo_unmap_virtual); int ttm_bo_wait(struct ttm_buffer_object *bo, @@ -1647,7 +1558,7 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx) evict_mem = bo->mem; evict_mem.mm_node = NULL; - evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; + evict_mem.placement = TTM_PL_FLAG_CACHED; evict_mem.mem_type = TTM_PL_SYSTEM; ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx); @@ -1673,7 +1584,7 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx) if (bo->bdev->driver->swap_notify) bo->bdev->driver->swap_notify(bo); - ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); + ret = ttm_tt_swapout(bo->bdev, bo->ttm, bo->persistent_swap_storage); out: /** @@ -1698,3 +1609,22 @@ void ttm_bo_swapout_all(void) while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0); } EXPORT_SYMBOL(ttm_bo_swapout_all); + +void ttm_bo_tt_destroy(struct ttm_buffer_object *bo) +{ + if (bo->ttm == NULL) + return; + + ttm_tt_destroy(bo->bdev, bo->ttm); + bo->ttm = NULL; +} + +int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem) +{ + return bo->bdev->driver->ttm_tt_bind(bo->bdev, bo->ttm, mem); +} + +void ttm_bo_tt_unbind(struct ttm_buffer_object *bo) +{ + bo->bdev->driver->ttm_tt_unbind(bo->bdev, bo->ttm); +} diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index ee04716b2603..fb2a25f8408f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -67,10 +67,8 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, return ret; } - ttm_tt_unbind(ttm); + ttm_bo_tt_unbind(bo); ttm_bo_free_old_node(bo); - ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, - TTM_PL_MASK_MEM); old_mem->mem_type = TTM_PL_SYSTEM; } @@ -79,134 +77,56 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, return ret; if (new_mem->mem_type != TTM_PL_SYSTEM) { - ret = ttm_tt_bind(ttm, new_mem, ctx); + + ret = ttm_tt_populate(bo->bdev, ttm, ctx); if (unlikely(ret != 0)) return ret; - } - *old_mem = *new_mem; - new_mem->mm_node = NULL; + ret = ttm_bo_tt_bind(bo, new_mem); + if (unlikely(ret != 0)) + return ret; + } + ttm_bo_assign_mem(bo, new_mem); return 0; } EXPORT_SYMBOL(ttm_bo_move_ttm); -int ttm_mem_io_lock(struct ttm_resource_manager *man, bool interruptible) -{ - if (likely(!man->use_io_reserve_lru)) - return 0; - - if (interruptible) - return mutex_lock_interruptible(&man->io_reserve_mutex); - - mutex_lock(&man->io_reserve_mutex); - return 0; -} - -void ttm_mem_io_unlock(struct ttm_resource_manager *man) -{ - if (likely(!man->use_io_reserve_lru)) - return; - - mutex_unlock(&man->io_reserve_mutex); -} - -static int ttm_mem_io_evict(struct ttm_resource_manager *man) -{ - struct ttm_buffer_object *bo; - - bo = list_first_entry_or_null(&man->io_reserve_lru, - struct ttm_buffer_object, - io_reserve_lru); - if (!bo) - return -ENOSPC; - - list_del_init(&bo->io_reserve_lru); - ttm_bo_unmap_virtual_locked(bo); - return 0; -} - int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) { - struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); - int ret; - - if (mem->bus.io_reserved_count++) + if (mem->bus.offset || mem->bus.addr) return 0; + mem->bus.is_iomem = false; if (!bdev->driver->io_mem_reserve) return 0; - mem->bus.addr = NULL; - mem->bus.offset = 0; - mem->bus.base = 0; - mem->bus.is_iomem = false; -retry: - ret = bdev->driver->io_mem_reserve(bdev, mem); - if (ret == -ENOSPC) { - ret = ttm_mem_io_evict(man); - if (ret == 0) - goto retry; - } - return ret; + return bdev->driver->io_mem_reserve(bdev, mem); } void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_resource *mem) { - if (--mem->bus.io_reserved_count) + if (!mem->bus.offset && !mem->bus.addr) return; - if (!bdev->driver->io_mem_free) - return; - - bdev->driver->io_mem_free(bdev, mem); -} - -int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) -{ - struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type); - struct ttm_resource *mem = &bo->mem; - int ret; - - if (mem->bus.io_reserved_vm) - return 0; - - ret = ttm_mem_io_reserve(bo->bdev, mem); - if (unlikely(ret != 0)) - return ret; - mem->bus.io_reserved_vm = true; - if (man->use_io_reserve_lru) - list_add_tail(&bo->io_reserve_lru, - &man->io_reserve_lru); - return 0; -} + if (bdev->driver->io_mem_free) + bdev->driver->io_mem_free(bdev, mem); -void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) -{ - struct ttm_resource *mem = &bo->mem; - - if (!mem->bus.io_reserved_vm) - return; - - mem->bus.io_reserved_vm = false; - list_del_init(&bo->io_reserve_lru); - ttm_mem_io_free(bo->bdev, mem); + mem->bus.offset = 0; + mem->bus.addr = NULL; } static int ttm_resource_ioremap(struct ttm_bo_device *bdev, struct ttm_resource *mem, void **virtual) { - struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); int ret; void *addr; *virtual = NULL; - (void) ttm_mem_io_lock(man, false); ret = ttm_mem_io_reserve(bdev, mem); - ttm_mem_io_unlock(man); if (ret || !mem->bus.is_iomem) return ret; @@ -216,15 +136,11 @@ static int ttm_resource_ioremap(struct ttm_bo_device *bdev, size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; if (mem->placement & TTM_PL_FLAG_WC) - addr = ioremap_wc(mem->bus.base + mem->bus.offset, - bus_size); + addr = ioremap_wc(mem->bus.offset, bus_size); else - addr = ioremap(mem->bus.base + mem->bus.offset, - bus_size); + addr = ioremap(mem->bus.offset, bus_size); if (!addr) { - (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, mem); - ttm_mem_io_unlock(man); return -ENOMEM; } } @@ -236,15 +152,9 @@ static void ttm_resource_iounmap(struct ttm_bo_device *bdev, struct ttm_resource *mem, void *virtual) { - struct ttm_resource_manager *man; - - man = ttm_manager_type(bdev, mem->mem_type); - if (virtual && mem->bus.addr == NULL) iounmap(virtual); - (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, mem); - ttm_mem_io_unlock(man); } static int ttm_copy_io_page(void *dst, void *src, unsigned long page) @@ -342,7 +252,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, * Don't move nonexistent data. Clear destination instead. */ if (old_iomap == NULL && - (ttm == NULL || (ttm->state == tt_unpopulated && + (ttm == NULL || (!ttm_tt_is_populated(ttm) && !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); goto out2; @@ -352,7 +262,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, * TTM might be null for moves within the same region. */ if (ttm) { - ret = ttm_tt_populate(ttm, ctx); + ret = ttm_tt_populate(bdev, ttm, ctx); if (ret) goto out1; } @@ -387,13 +297,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, mb(); out2: old_copy = *old_mem; - *old_mem = *new_mem; - new_mem->mm_node = NULL; - if (!man->use_tt) { - ttm_tt_destroy(ttm); - bo->ttm = NULL; - } + ttm_bo_assign_mem(bo, new_mem); + + if (!man->use_tt) + ttm_bo_tt_destroy(bo); out1: ttm_resource_iounmap(bdev, old_mem, new_iomap); @@ -458,7 +366,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, INIT_LIST_HEAD(&fbo->base.ddestroy); INIT_LIST_HEAD(&fbo->base.lru); INIT_LIST_HEAD(&fbo->base.swap); - INIT_LIST_HEAD(&fbo->base.io_reserve_lru); fbo->base.moving = NULL; drm_vma_node_reset(&fbo->base.base.vma_node); @@ -516,12 +423,10 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo, } else { map->bo_kmap_type = ttm_bo_map_iomap; if (mem->placement & TTM_PL_FLAG_WC) - map->virtual = ioremap_wc(bo->mem.bus.base + - bo->mem.bus.offset + offset, + map->virtual = ioremap_wc(bo->mem.bus.offset + offset, size); else - map->virtual = ioremap(bo->mem.bus.base + - bo->mem.bus.offset + offset, + map->virtual = ioremap(bo->mem.bus.offset + offset, size); } return (!map->virtual) ? -ENOMEM : 0; @@ -543,7 +448,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, BUG_ON(!ttm); - ret = ttm_tt_populate(ttm, &ctx); + ret = ttm_tt_populate(bo->bdev, ttm, &ctx); if (ret) return ret; @@ -573,8 +478,6 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { - struct ttm_resource_manager *man = - ttm_manager_type(bo->bdev, bo->mem.mem_type); unsigned long offset, size; int ret; @@ -585,9 +488,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, if (start_page > bo->num_pages) return -EINVAL; - (void) ttm_mem_io_lock(man, false); ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); - ttm_mem_io_unlock(man); if (ret) return ret; if (!bo->mem.bus.is_iomem) { @@ -602,10 +503,6 @@ EXPORT_SYMBOL(ttm_bo_kmap); void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) { - struct ttm_buffer_object *bo = map->bo; - struct ttm_resource_manager *man = - ttm_manager_type(bo->bdev, bo->mem.mem_type); - if (!map->virtual) return; switch (map->bo_kmap_type) { @@ -623,167 +520,116 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) default: BUG(); } - (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(map->bo->bdev, &map->bo->mem); - ttm_mem_io_unlock(man); map->virtual = NULL; map->page = NULL; } EXPORT_SYMBOL(ttm_bo_kunmap); -int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, - struct dma_fence *fence, - bool evict, - struct ttm_resource *new_mem) +static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo, + bool dst_use_tt) { - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); - struct ttm_resource *old_mem = &bo->mem; int ret; - struct ttm_buffer_object *ghost_obj; - - dma_resv_add_excl_fence(bo->base.resv, fence); - if (evict) { - ret = ttm_bo_wait(bo, false, false); - if (ret) - return ret; - - if (!man->use_tt) { - ttm_tt_destroy(bo->ttm); - bo->ttm = NULL; - } - ttm_bo_free_old_node(bo); - } else { - /** - * This should help pipeline ordinary buffer moves. - * - * Hang old buffer memory on a new buffer object, - * and leave it to be released when the GPU - * operation has completed. - */ - - dma_fence_put(bo->moving); - bo->moving = dma_fence_get(fence); - - ret = ttm_buffer_object_transfer(bo, &ghost_obj); - if (ret) - return ret; - - dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); - - /** - * If we're not moving to fixed memory, the TTM object - * needs to stay alive. Otherwhise hang it on the ghost - * bo to be unbound and destroyed. - */ - - if (man->use_tt) - ghost_obj->ttm = NULL; - else - bo->ttm = NULL; - - dma_resv_unlock(&ghost_obj->base._resv); - ttm_bo_put(ghost_obj); - } - - *old_mem = *new_mem; - new_mem->mm_node = NULL; + ret = ttm_bo_wait(bo, false, false); + if (ret) + return ret; + if (!dst_use_tt) + ttm_bo_tt_destroy(bo); + ttm_bo_free_old_node(bo); return 0; } -EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); -int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, - struct dma_fence *fence, bool evict, - struct ttm_resource *new_mem) +static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo, + struct dma_fence *fence, + bool dst_use_tt) { - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_resource *old_mem = &bo->mem; - - struct ttm_resource_manager *from = ttm_manager_type(bdev, old_mem->mem_type); - struct ttm_resource_manager *to = ttm_manager_type(bdev, new_mem->mem_type); - + struct ttm_buffer_object *ghost_obj; int ret; - dma_resv_add_excl_fence(bo->base.resv, fence); - - if (!evict) { - struct ttm_buffer_object *ghost_obj; - - /** - * This should help pipeline ordinary buffer moves. - * - * Hang old buffer memory on a new buffer object, - * and leave it to be released when the GPU - * operation has completed. - */ + /** + * This should help pipeline ordinary buffer moves. + * + * Hang old buffer memory on a new buffer object, + * and leave it to be released when the GPU + * operation has completed. + */ - dma_fence_put(bo->moving); - bo->moving = dma_fence_get(fence); + dma_fence_put(bo->moving); + bo->moving = dma_fence_get(fence); - ret = ttm_buffer_object_transfer(bo, &ghost_obj); - if (ret) - return ret; - - dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); + ret = ttm_buffer_object_transfer(bo, &ghost_obj); + if (ret) + return ret; - /** - * If we're not moving to fixed memory, the TTM object - * needs to stay alive. Otherwhise hang it on the ghost - * bo to be unbound and destroyed. - */ + dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); - if (to->use_tt) - ghost_obj->ttm = NULL; - else - bo->ttm = NULL; + /** + * If we're not moving to fixed memory, the TTM object + * needs to stay alive. Otherwhise hang it on the ghost + * bo to be unbound and destroyed. + */ - dma_resv_unlock(&ghost_obj->base._resv); - ttm_bo_put(ghost_obj); + if (dst_use_tt) + ghost_obj->ttm = NULL; + else + bo->ttm = NULL; - } else if (!from->use_tt) { + dma_resv_unlock(&ghost_obj->base._resv); + ttm_bo_put(ghost_obj); + return 0; +} - /** - * BO doesn't have a TTM we need to bind/unbind. Just remember - * this eviction and free up the allocation - */ +static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, + struct dma_fence *fence) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); - spin_lock(&from->move_lock); - if (!from->move || dma_fence_is_later(fence, from->move)) { - dma_fence_put(from->move); - from->move = dma_fence_get(fence); - } - spin_unlock(&from->move_lock); + /** + * BO doesn't have a TTM we need to bind/unbind. Just remember + * this eviction and free up the allocation + */ + spin_lock(&from->move_lock); + if (!from->move || dma_fence_is_later(fence, from->move)) { + dma_fence_put(from->move); + from->move = dma_fence_get(fence); + } + spin_unlock(&from->move_lock); - ttm_bo_free_old_node(bo); + ttm_bo_free_old_node(bo); - dma_fence_put(bo->moving); - bo->moving = dma_fence_get(fence); + dma_fence_put(bo->moving); + bo->moving = dma_fence_get(fence); +} - } else { - /** - * Last resort, wait for the move to be completed. - * - * Should never happen in pratice. - */ +int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, + struct dma_fence *fence, + bool evict, + bool pipeline, + struct ttm_resource *new_mem) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); + int ret = 0; - ret = ttm_bo_wait(bo, false, false); - if (ret) - return ret; + dma_resv_add_excl_fence(bo->base.resv, fence); + if (!evict) + ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); + else if (!from->use_tt && pipeline) + ttm_bo_move_pipeline_evict(bo, fence); + else + ret = ttm_bo_wait_free_node(bo, man->use_tt); - if (!to->use_tt) { - ttm_tt_destroy(bo->ttm); - bo->ttm = NULL; - } - ttm_bo_free_old_node(bo); - } + if (ret) + return ret; - *old_mem = *new_mem; - new_mem->mm_node = NULL; + ttm_bo_assign_mem(bo, new_mem); return 0; } -EXPORT_SYMBOL(ttm_bo_pipeline_move); +EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) { diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 01693e8f24b7..98a006fc30a5 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -101,8 +101,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, if (bdev->driver->io_mem_pfn) return bdev->driver->io_mem_pfn(bo, page_offset); - return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) - + page_offset; + return (bo->mem.bus.offset >> PAGE_SHIFT) + page_offset; } /** @@ -281,8 +280,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, pgoff_t i; vm_fault_t ret = VM_FAULT_NOPAGE; unsigned long address = vmf->address; - struct ttm_resource_manager *man = - ttm_manager_type(bdev, bo->mem.mem_type); /* * Refuse to fault imported pages. This should be handled @@ -321,24 +318,17 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, if (unlikely(ret != 0)) return ret; - err = ttm_mem_io_lock(man, true); + err = ttm_mem_io_reserve(bdev, &bo->mem); if (unlikely(err != 0)) - return VM_FAULT_NOPAGE; - err = ttm_mem_io_reserve_vm(bo); - if (unlikely(err != 0)) { - ret = VM_FAULT_SIGBUS; - goto out_io_unlock; - } + return VM_FAULT_SIGBUS; page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); page_last = vma_pages(vma) + vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); - if (unlikely(page_offset >= bo->num_pages)) { - ret = VM_FAULT_SIGBUS; - goto out_io_unlock; - } + if (unlikely(page_offset >= bo->num_pages)) + return VM_FAULT_SIGBUS; prot = ttm_io_prot(bo->mem.placement, prot); if (!bo->mem.bus.is_iomem) { @@ -350,21 +340,17 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, }; ttm = bo->ttm; - if (ttm_tt_populate(bo->ttm, &ctx)) { - ret = VM_FAULT_OOM; - goto out_io_unlock; - } + if (ttm_tt_populate(bdev, bo->ttm, &ctx)) + return VM_FAULT_OOM; } else { /* Iomem should not be marked encrypted */ prot = pgprot_decrypted(prot); } /* We don't prefault on huge faults. Yet. */ - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) { - ret = ttm_bo_vm_insert_huge(vmf, bo, page_offset, - fault_page_size, prot); - goto out_io_unlock; - } + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) + return ttm_bo_vm_insert_huge(vmf, bo, page_offset, + fault_page_size, prot); /* * Speculatively prefault a number of pages. Only error on @@ -376,8 +362,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, } else { page = ttm->pages[page_offset]; if (unlikely(!page && i == 0)) { - ret = VM_FAULT_OOM; - goto out_io_unlock; + return VM_FAULT_OOM; } else if (unlikely(!page)) { break; } @@ -404,7 +389,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, /* Never error on prefaulted PTEs */ if (unlikely((ret & VM_FAULT_ERROR))) { if (i == 0) - goto out_io_unlock; + return VM_FAULT_NOPAGE; else break; } @@ -413,9 +398,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, if (unlikely(++page_offset >= page_last)) break; } - ret = VM_FAULT_NOPAGE; -out_io_unlock: - ttm_mem_io_unlock(man); return ret; } EXPORT_SYMBOL(ttm_bo_vm_fault_reserved); diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index acd63b70d814..89d50f38c0f2 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -259,7 +259,7 @@ static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob, return false; } -/** +/* * At this point we only support a single shrink callback. * Extend this if needed, perhaps using a linked list of callbacks. * Note that this function is reentrant: @@ -554,7 +554,6 @@ ttm_check_under_lowerlimit(struct ttm_mem_global *glob, return false; } -EXPORT_SYMBOL(ttm_check_under_lowerlimit); static int ttm_mem_global_reserve(struct ttm_mem_global *glob, struct ttm_mem_zone *single_zone, @@ -682,9 +681,3 @@ size_t ttm_round_pot(size_t size) return 0; } EXPORT_SYMBOL(ttm_round_pot); - -uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob) -{ - return glob->zone_kernel->max_mem; -} -EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size); diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index b40a4678c296..14660f723f71 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -1044,7 +1044,7 @@ ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update) put_pages: ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, ttm->caching_state); - ttm->state = tt_unpopulated; + ttm_tt_set_unpopulated(ttm); } int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) @@ -1053,7 +1053,7 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) unsigned i; int ret; - if (ttm->state != tt_unpopulated) + if (ttm_tt_is_populated(ttm)) return 0; if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx)) @@ -1083,7 +1083,7 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) } } - ttm->state = tt_unbound; + ttm_tt_set_populated(ttm); return 0; } EXPORT_SYMBOL(ttm_pool_populate); diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index faefaaef7909..5e2df11685e7 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -894,7 +894,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, unsigned i; int ret; - if (ttm->state != tt_unpopulated) + if (ttm_tt_is_populated(ttm)) return 0; if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx)) @@ -982,7 +982,7 @@ skip_huge: } } - ttm->state = tt_unbound; + ttm_tt_set_populated(ttm); return 0; } EXPORT_SYMBOL_GPL(ttm_dma_populate); @@ -1076,7 +1076,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) /* shrink pool if necessary (only on !is_cached pools)*/ if (npages) ttm_dma_page_pool_free(pool, npages, false); - ttm->state = tt_unpopulated; + ttm_tt_set_unpopulated(ttm); } EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index 770c8988c139..1da0e277c511 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -113,10 +113,7 @@ static void ttm_range_man_free(struct ttm_resource_manager *man, static const struct ttm_resource_manager_func ttm_range_manager_func; int ttm_range_man_init(struct ttm_bo_device *bdev, - unsigned type, - uint32_t available_caching, - uint32_t default_caching, - bool use_tt, + unsigned type, bool use_tt, unsigned long p_size) { struct ttm_resource_manager *man; @@ -127,8 +124,6 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, return -ENOMEM; man = &rman->manager; - man->available_caching = available_caching; - man->default_caching = default_caching; man->use_tt = use_tt; man->func = &ttm_range_manager_func; diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 33b642532e5c..b325b9264203 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -65,10 +65,7 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man, { unsigned i; - man->use_io_reserve_lru = false; - mutex_init(&man->io_reserve_mutex); spin_lock_init(&man->move_lock); - INIT_LIST_HEAD(&man->io_reserve_lru); man->size = p_size; for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) @@ -143,8 +140,6 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man, drm_printf(p, " use_type: %d\n", man->use_type); drm_printf(p, " use_tt: %d\n", man->use_tt); drm_printf(p, " size: %llu\n", man->size); - drm_printf(p, " available_caching: 0x%08X\n", man->available_caching); - drm_printf(p, " default_caching: 0x%08X\n", man->default_caching); if (man->func && man->func->debug) (*man->func->debug)(man, p); } diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 1ccf1ef050d6..f43fa69a1e65 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -156,7 +156,7 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm, if (ttm->caching_state == c_state) return 0; - if (ttm->state == tt_unpopulated) { + if (!ttm_tt_is_populated(ttm)) { /* Change caching but don't populate */ ttm->caching_state = c_state; return 0; @@ -207,33 +207,31 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) } EXPORT_SYMBOL(ttm_tt_set_placement_caching); -void ttm_tt_destroy(struct ttm_tt *ttm) +void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { - if (ttm == NULL) - return; - - ttm_tt_unbind(ttm); - - if (ttm->state == tt_unbound) - ttm_tt_unpopulate(ttm); + ttm_tt_unpopulate(bdev, ttm); if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && ttm->swap_storage) fput(ttm->swap_storage); ttm->swap_storage = NULL; - ttm->func->destroy(ttm); +} +EXPORT_SYMBOL(ttm_tt_destroy_common); + +void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) +{ + bdev->driver->ttm_tt_destroy(bdev, ttm); } static void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_buffer_object *bo, uint32_t page_flags) { - ttm->bdev = bo->bdev; ttm->num_pages = bo->num_pages; ttm->caching_state = tt_cached; ttm->page_flags = page_flags; - ttm->state = tt_unpopulated; + ttm_tt_set_unpopulated(ttm); ttm->swap_storage = NULL; ttm->sg = bo->sg; } @@ -308,39 +306,6 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) } EXPORT_SYMBOL(ttm_dma_tt_fini); -void ttm_tt_unbind(struct ttm_tt *ttm) -{ - if (ttm->state == tt_bound) { - ttm->func->unbind(ttm); - ttm->state = tt_unbound; - } -} - -int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem, - struct ttm_operation_ctx *ctx) -{ - int ret = 0; - - if (!ttm) - return -EINVAL; - - if (ttm->state == tt_bound) - return 0; - - ret = ttm_tt_populate(ttm, ctx); - if (ret) - return ret; - - ret = ttm->func->bind(ttm, bo_mem); - if (unlikely(ret != 0)) - return ret; - - ttm->state = tt_bound; - - return 0; -} -EXPORT_SYMBOL(ttm_tt_bind); - int ttm_tt_swapin(struct ttm_tt *ttm) { struct address_space *swap_space; @@ -383,7 +348,8 @@ out_err: return ret; } -int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) +int ttm_tt_swapout(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, struct file *persistent_swap_storage) { struct address_space *swap_space; struct file *swap_storage; @@ -392,7 +358,6 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) int i; int ret = -ENOMEM; - BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); BUG_ON(ttm->caching_state != tt_cached); if (!persistent_swap_storage) { @@ -429,7 +394,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) put_page(to_page); } - ttm_tt_unpopulate(ttm); + ttm_tt_unpopulate(bdev, ttm); ttm->swap_storage = swap_storage; ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; if (persistent_swap_storage) @@ -443,7 +408,7 @@ out_err: return ret; } -static void ttm_tt_add_mapping(struct ttm_tt *ttm) +static void ttm_tt_add_mapping(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { pgoff_t i; @@ -451,24 +416,29 @@ static void ttm_tt_add_mapping(struct ttm_tt *ttm) return; for (i = 0; i < ttm->num_pages; ++i) - ttm->pages[i]->mapping = ttm->bdev->dev_mapping; + ttm->pages[i]->mapping = bdev->dev_mapping; } -int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) +int ttm_tt_populate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { int ret; - if (ttm->state != tt_unpopulated) + if (!ttm) + return -EINVAL; + + if (ttm_tt_is_populated(ttm)) return 0; - if (ttm->bdev->driver->ttm_tt_populate) - ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx); + if (bdev->driver->ttm_tt_populate) + ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx); else ret = ttm_pool_populate(ttm, ctx); if (!ret) - ttm_tt_add_mapping(ttm); + ttm_tt_add_mapping(bdev, ttm); return ret; } +EXPORT_SYMBOL(ttm_tt_populate); static void ttm_tt_clear_mapping(struct ttm_tt *ttm) { @@ -484,14 +454,15 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm) } } -void ttm_tt_unpopulate(struct ttm_tt *ttm) +void ttm_tt_unpopulate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { - if (ttm->state == tt_unpopulated) + if (!ttm_tt_is_populated(ttm)) return; ttm_tt_clear_mapping(ttm); - if (ttm->bdev->driver->ttm_tt_unpopulate) - ttm->bdev->driver->ttm_tt_unpopulate(ttm); + if (bdev->driver->ttm_tt_unpopulate) + bdev->driver->ttm_tt_unpopulate(bdev, ttm); else ttm_pool_unpopulate(ttm); } diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c index 3b81ea28c0bb..5a453532901f 100644 --- a/drivers/gpu/drm/v3d/v3d_mmu.c +++ b/drivers/gpu/drm/v3d/v3d_mmu.c @@ -90,18 +90,17 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo) struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev); u32 page = bo->node.start; u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID; - unsigned int count; - struct scatterlist *sgl; + struct sg_dma_page_iter dma_iter; - for_each_sg(shmem_obj->sgt->sgl, sgl, shmem_obj->sgt->nents, count) { - u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT; + for_each_sgtable_dma_page(shmem_obj->sgt, &dma_iter, 0) { + dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter); + u32 page_address = dma_addr >> V3D_MMU_PAGE_SHIFT; u32 pte = page_prot | page_address; u32 i; - BUG_ON(page_address + (sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT) >= + BUG_ON(page_address + (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) >= BIT(24)); - - for (i = 0; i < sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT; i++) + for (i = 0; i < PAGE_SIZE >> V3D_MMU_PAGE_SHIFT; i++) v3d->pt[page++] = pte + i; } diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c index d9a5af62af89..4fcc0a542b8a 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_mode.c +++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c @@ -397,11 +397,13 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, vbox_crtc->cursor_enabled = true; - /* pinning is done in prepare/cleanup framebuffer */ - src = drm_gem_vram_kmap(gbo, true, NULL); + src = drm_gem_vram_vmap(gbo); if (IS_ERR(src)) { + /* + * BUG: we should have pinned the BO in prepare_fb(). + */ mutex_unlock(&vbox->hw_mutex); - DRM_WARN("Could not kmap cursor bo, skipping update\n"); + DRM_WARN("Could not map cursor bo, skipping update\n"); return; } @@ -414,7 +416,7 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, data_size = width * height * 4 + mask_size; copy_cursor_image(src, vbox->cursor_data, width, height, mask_size); - drm_gem_vram_kunmap(gbo); + drm_gem_vram_vunmap(gbo, src); flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA; diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile index b303703bc7f3..d0163e18e9ca 100644 --- a/drivers/gpu/drm/vc4/Makefile +++ b/drivers/gpu/drm/vc4/Makefile @@ -12,6 +12,7 @@ vc4-y := \ vc4_kms.o \ vc4_gem.o \ vc4_hdmi.o \ + vc4_hdmi_phy.o \ vc4_vec.o \ vc4_hvs.o \ vc4_irq.o \ diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 6d8fa6118fc1..482219fb4db2 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -65,6 +65,20 @@ static const struct debugfs_reg32 crtc_regs[] = { VC4_REG32(PV_HACT_ACT), }; +static unsigned int +vc4_crtc_get_cob_allocation(struct vc4_dev *vc4, unsigned int channel) +{ + u32 dispbase = HVS_READ(SCALER_DISPBASEX(channel)); + /* Top/base are supposed to be 4-pixel aligned, but the + * Raspberry Pi firmware fills the low bits (which are + * presumably ignored). + */ + u32 top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3; + u32 base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3; + + return top - base + 4; +} + static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq, int *vpos, int *hpos, @@ -74,6 +88,8 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state); + unsigned int cob_size; u32 val; int fifo_lines; int vblank_lines; @@ -89,7 +105,7 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc, * Read vertical scanline which is currently composed for our * pixelvalve by the HVS, and also the scaler status. */ - val = HVS_READ(SCALER_DISPSTATX(vc4_crtc->channel)); + val = HVS_READ(SCALER_DISPSTATX(vc4_crtc_state->assigned_channel)); /* Get optional system timestamp after query. */ if (etime) @@ -109,8 +125,9 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc, *hpos += mode->crtc_htotal / 2; } + cob_size = vc4_crtc_get_cob_allocation(vc4, vc4_crtc_state->assigned_channel); /* This is the offset we need for translating hvs -> pv scanout pos. */ - fifo_lines = vc4_crtc->cob_size / mode->crtc_hdisplay; + fifo_lines = cob_size / mode->crtc_hdisplay; if (fifo_lines > 0) ret = true; @@ -189,10 +206,22 @@ void vc4_crtc_destroy(struct drm_crtc *crtc) drm_crtc_cleanup(crtc); } -static u32 vc4_get_fifo_full_level(u32 format) +static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format) { - static const u32 fifo_len_bytes = 64; + const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc); + const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc); + u32 fifo_len_bytes = pv_data->fifo_depth; + /* + * Pixels are pulled from the HVS if the number of bytes is + * lower than the FIFO full level. + * + * The latency of the pixel fetch mechanism is 6 pixels, so we + * need to convert those 6 pixels in bytes, depending on the + * format, and then subtract that from the length of the FIFO + * to make sure we never end up in a situation where the FIFO + * is full. + */ switch (format) { case PV_CONTROL_FORMAT_DSIV_16: case PV_CONTROL_FORMAT_DSIC_16: @@ -202,10 +231,30 @@ static u32 vc4_get_fifo_full_level(u32 format) case PV_CONTROL_FORMAT_24: case PV_CONTROL_FORMAT_DSIV_24: default: + /* + * For some reason, the pixelvalve4 doesn't work with + * the usual formula and will only work with 32. + */ + if (crtc_data->hvs_output == 5) + return 32; + return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX; } } +static u32 vc4_crtc_get_fifo_full_level_bits(struct vc4_crtc *vc4_crtc, + u32 format) +{ + u32 level = vc4_get_fifo_full_level(vc4_crtc, format); + u32 ret = 0; + + ret |= VC4_SET_FIELD((level >> 6), + PV5_CONTROL_FIFO_LEVEL_HIGH); + + return ret | VC4_SET_FIELD(level & 0x3f, + PV_CONTROL_FIFO_LEVEL); +} + /* * Returns the encoder attached to the CRTC. * @@ -230,11 +279,23 @@ static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc) return NULL; } +static void vc4_crtc_pixelvalve_reset(struct drm_crtc *crtc) +{ + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + + /* The PV needs to be disabled before it can be flushed */ + CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) & ~PV_CONTROL_EN); + CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_FIFO_CLR); +} + static void vc4_crtc_config_pv(struct drm_crtc *crtc) { + struct drm_device *dev = crtc->dev; + struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc); struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc); struct drm_crtc_state *state = crtc->state; struct drm_display_mode *mode = &state->adjusted_mode; bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE; @@ -242,24 +303,29 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc) bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 || vc4_encoder->type == VC4_ENCODER_TYPE_DSI1); u32 format = is_dsi ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24; + u8 ppc = pv_data->pixels_per_clock; + bool debug_dump_regs = false; - /* Reset the PV fifo. */ - CRTC_WRITE(PV_CONTROL, 0); - CRTC_WRITE(PV_CONTROL, PV_CONTROL_FIFO_CLR | PV_CONTROL_EN); - CRTC_WRITE(PV_CONTROL, 0); + if (debug_dump_regs) { + struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev); + dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs before:\n", + drm_crtc_index(crtc)); + drm_print_regset32(&p, &vc4_crtc->regset); + } + + vc4_crtc_pixelvalve_reset(crtc); CRTC_WRITE(PV_HORZA, - VC4_SET_FIELD((mode->htotal - - mode->hsync_end) * pixel_rep, + VC4_SET_FIELD((mode->htotal - mode->hsync_end) * pixel_rep / ppc, PV_HORZA_HBP) | - VC4_SET_FIELD((mode->hsync_end - - mode->hsync_start) * pixel_rep, + VC4_SET_FIELD((mode->hsync_end - mode->hsync_start) * pixel_rep / ppc, PV_HORZA_HSYNC)); + CRTC_WRITE(PV_HORZB, - VC4_SET_FIELD((mode->hsync_start - - mode->hdisplay) * pixel_rep, + VC4_SET_FIELD((mode->hsync_start - mode->hdisplay) * pixel_rep / ppc, PV_HORZB_HFP) | - VC4_SET_FIELD(mode->hdisplay * pixel_rep, PV_HORZB_HACTIVE)); + VC4_SET_FIELD(mode->hdisplay * pixel_rep / ppc, + PV_HORZB_HACTIVE)); CRTC_WRITE(PV_VERTA, VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end, @@ -306,35 +372,20 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc) if (is_dsi) CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep); - CRTC_WRITE(PV_CONTROL, + if (vc4->hvs->hvs5) + CRTC_WRITE(PV_MUX_CFG, + VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP, + PV_MUX_CFG_RGB_PIXEL_MUX_MODE)); + + CRTC_WRITE(PV_CONTROL, PV_CONTROL_FIFO_CLR | + vc4_crtc_get_fifo_full_level_bits(vc4_crtc, format) | VC4_SET_FIELD(format, PV_CONTROL_FORMAT) | - VC4_SET_FIELD(vc4_get_fifo_full_level(format), - PV_CONTROL_FIFO_LEVEL) | VC4_SET_FIELD(pixel_rep - 1, PV_CONTROL_PIXEL_REP) | PV_CONTROL_CLR_AT_START | PV_CONTROL_TRIGGER_UNDERFLOW | PV_CONTROL_WAIT_HSTART | VC4_SET_FIELD(vc4_encoder->clock_select, - PV_CONTROL_CLK_SELECT) | - PV_CONTROL_FIFO_CLR | - PV_CONTROL_EN); -} - -static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc) -{ - struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); - bool debug_dump_regs = false; - - if (debug_dump_regs) { - struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev); - dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs before:\n", - drm_crtc_index(crtc)); - drm_print_regset32(&p, &vc4_crtc->regset); - } - - vc4_crtc_config_pv(crtc); - - vc4_hvs_mode_set_nofb(crtc); + PV_CONTROL_CLK_SELECT)); if (debug_dump_regs) { struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev); @@ -352,24 +403,86 @@ static void require_hvs_enabled(struct drm_device *dev) SCALER_DISPCTRL_ENABLE); } +static int vc4_crtc_disable(struct drm_crtc *crtc, unsigned int channel) +{ + struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc); + struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + struct drm_device *dev = crtc->dev; + int ret; + + CRTC_WRITE(PV_V_CONTROL, + CRTC_READ(PV_V_CONTROL) & ~PV_VCONTROL_VIDEN); + ret = wait_for(!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN), 1); + WARN_ONCE(ret, "Timeout waiting for !PV_VCONTROL_VIDEN\n"); + + /* + * This delay is needed to avoid to get a pixel stuck in an + * unflushable FIFO between the pixelvalve and the HDMI + * controllers on the BCM2711. + * + * Timing is fairly sensitive here, so mdelay is the safest + * approach. + * + * If it was to be reworked, the stuck pixel happens on a + * BCM2711 when changing mode with a good probability, so a + * script that changes mode on a regular basis should trigger + * the bug after less than 10 attempts. It manifests itself with + * every pixels being shifted by one to the right, and thus the + * last pixel of a line actually being displayed as the first + * pixel on the next line. + */ + mdelay(20); + + if (vc4_encoder && vc4_encoder->post_crtc_disable) + vc4_encoder->post_crtc_disable(encoder); + + vc4_crtc_pixelvalve_reset(crtc); + vc4_hvs_stop_channel(dev, channel); + + if (vc4_encoder && vc4_encoder->post_crtc_powerdown) + vc4_encoder->post_crtc_powerdown(encoder); + + return 0; +} + +int vc4_crtc_disable_at_boot(struct drm_crtc *crtc) +{ + struct drm_device *drm = crtc->dev; + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + int channel; + + if (!(of_device_is_compatible(vc4_crtc->pdev->dev.of_node, + "brcm,bcm2711-pixelvalve2") || + of_device_is_compatible(vc4_crtc->pdev->dev.of_node, + "brcm,bcm2711-pixelvalve4"))) + return 0; + + if (!(CRTC_READ(PV_CONTROL) & PV_CONTROL_EN)) + return 0; + + if (!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN)) + return 0; + + channel = vc4_hvs_get_fifo_from_output(drm, vc4_crtc->data->hvs_output); + if (channel < 0) + return 0; + + return vc4_crtc_disable(crtc, channel); +} + static void vc4_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { + struct vc4_crtc_state *old_vc4_state = to_vc4_crtc_state(old_state); struct drm_device *dev = crtc->dev; - struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); - int ret; require_hvs_enabled(dev); /* Disable vblank irq handling before crtc is disabled. */ drm_crtc_vblank_off(crtc); - CRTC_WRITE(PV_V_CONTROL, - CRTC_READ(PV_V_CONTROL) & ~PV_VCONTROL_VIDEN); - ret = wait_for(!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN), 1); - WARN_ONCE(ret, "Timeout waiting for !PV_VCONTROL_VIDEN\n"); - - vc4_hvs_atomic_disable(crtc, old_state); + vc4_crtc_disable(crtc, old_vc4_state->assigned_channel); /* * Make sure we issue a vblank event after disabling the CRTC if @@ -390,6 +503,8 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc); + struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); require_hvs_enabled(dev); @@ -400,11 +515,24 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc, vc4_hvs_atomic_enable(crtc, old_state); + if (vc4_encoder->pre_crtc_configure) + vc4_encoder->pre_crtc_configure(encoder); + + vc4_crtc_config_pv(crtc); + + CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_EN); + + if (vc4_encoder->pre_crtc_enable) + vc4_encoder->pre_crtc_enable(encoder); + /* When feeding the transposer block the pixelvalve is unneeded and * should not be enabled. */ CRTC_WRITE(PV_V_CONTROL, CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN); + + if (vc4_encoder->post_crtc_enable) + vc4_encoder->post_crtc_enable(encoder); } static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc, @@ -499,7 +627,7 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); - u32 chan = vc4_crtc->channel; + u32 chan = vc4_state->assigned_channel; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); @@ -516,7 +644,7 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) * the CRTC and encoder already reconfigured, leading to * underruns. This can be seen when reconfiguring the CRTC. */ - vc4_hvs_unmask_underrun(dev, vc4_crtc->channel); + vc4_hvs_unmask_underrun(dev, chan); } spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -698,6 +826,7 @@ struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc) old_vc4_state = to_vc4_crtc_state(crtc->state); vc4_state->feed_txp = old_vc4_state->feed_txp; vc4_state->margins = old_vc4_state->margins; + vc4_state->assigned_channel = old_vc4_state->assigned_channel; __drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base); return &vc4_state->base; @@ -723,11 +852,19 @@ void vc4_crtc_destroy_state(struct drm_crtc *crtc, void vc4_crtc_reset(struct drm_crtc *crtc) { + struct vc4_crtc_state *vc4_crtc_state; + if (crtc->state) vc4_crtc_destroy_state(crtc, crtc->state); - crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL); - if (crtc->state) - __drm_atomic_helper_crtc_reset(crtc, crtc->state); + + vc4_crtc_state = kzalloc(sizeof(*vc4_crtc_state), GFP_KERNEL); + if (!vc4_crtc_state) { + crtc->state = NULL; + return; + } + + vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED; + __drm_atomic_helper_crtc_reset(crtc, &vc4_crtc_state->base); } static const struct drm_crtc_funcs vc4_crtc_funcs = { @@ -747,7 +884,6 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = { }; static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = { - .mode_set_nofb = vc4_crtc_mode_set_nofb, .mode_valid = vc4_crtc_mode_valid, .atomic_check = vc4_crtc_atomic_check, .atomic_flush = vc4_hvs_atomic_flush, @@ -758,9 +894,12 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = { static const struct vc4_pv_data bcm2835_pv0_data = { .base = { - .hvs_channel = 0, + .hvs_available_channels = BIT(0), + .hvs_output = 0, }, .debugfs_name = "crtc0_regs", + .fifo_depth = 64, + .pixels_per_clock = 1, .encoder_types = { [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI0, [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_DPI, @@ -769,9 +908,12 @@ static const struct vc4_pv_data bcm2835_pv0_data = { static const struct vc4_pv_data bcm2835_pv1_data = { .base = { - .hvs_channel = 2, + .hvs_available_channels = BIT(2), + .hvs_output = 2, }, .debugfs_name = "crtc1_regs", + .fifo_depth = 64, + .pixels_per_clock = 1, .encoder_types = { [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI1, [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_SMI, @@ -780,19 +922,94 @@ static const struct vc4_pv_data bcm2835_pv1_data = { static const struct vc4_pv_data bcm2835_pv2_data = { .base = { - .hvs_channel = 1, + .hvs_available_channels = BIT(1), + .hvs_output = 1, }, .debugfs_name = "crtc2_regs", + .fifo_depth = 64, + .pixels_per_clock = 1, .encoder_types = { - [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI, + [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI0, [PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC, }, }; +static const struct vc4_pv_data bcm2711_pv0_data = { + .base = { + .hvs_available_channels = BIT(0), + .hvs_output = 0, + }, + .debugfs_name = "crtc0_regs", + .fifo_depth = 64, + .pixels_per_clock = 1, + .encoder_types = { + [0] = VC4_ENCODER_TYPE_DSI0, + [1] = VC4_ENCODER_TYPE_DPI, + }, +}; + +static const struct vc4_pv_data bcm2711_pv1_data = { + .base = { + .hvs_available_channels = BIT(0) | BIT(1) | BIT(2), + .hvs_output = 3, + }, + .debugfs_name = "crtc1_regs", + .fifo_depth = 64, + .pixels_per_clock = 1, + .encoder_types = { + [0] = VC4_ENCODER_TYPE_DSI1, + [1] = VC4_ENCODER_TYPE_SMI, + }, +}; + +static const struct vc4_pv_data bcm2711_pv2_data = { + .base = { + .hvs_available_channels = BIT(0) | BIT(1) | BIT(2), + .hvs_output = 4, + }, + .debugfs_name = "crtc2_regs", + .fifo_depth = 256, + .pixels_per_clock = 2, + .encoder_types = { + [0] = VC4_ENCODER_TYPE_HDMI0, + }, +}; + +static const struct vc4_pv_data bcm2711_pv3_data = { + .base = { + .hvs_available_channels = BIT(1), + .hvs_output = 1, + }, + .debugfs_name = "crtc3_regs", + .fifo_depth = 64, + .pixels_per_clock = 1, + .encoder_types = { + [0] = VC4_ENCODER_TYPE_VEC, + }, +}; + +static const struct vc4_pv_data bcm2711_pv4_data = { + .base = { + .hvs_available_channels = BIT(0) | BIT(1) | BIT(2), + .hvs_output = 5, + }, + .debugfs_name = "crtc4_regs", + .fifo_depth = 64, + .pixels_per_clock = 2, + .encoder_types = { + [0] = VC4_ENCODER_TYPE_HDMI1, + }, +}; + static const struct of_device_id vc4_crtc_dt_match[] = { { .compatible = "brcm,bcm2835-pixelvalve0", .data = &bcm2835_pv0_data }, { .compatible = "brcm,bcm2835-pixelvalve1", .data = &bcm2835_pv1_data }, { .compatible = "brcm,bcm2835-pixelvalve2", .data = &bcm2835_pv2_data }, + { .compatible = "brcm,bcm2711-pixelvalve0", .data = &bcm2711_pv0_data }, + { .compatible = "brcm,bcm2711-pixelvalve1", .data = &bcm2711_pv1_data }, + { .compatible = "brcm,bcm2711-pixelvalve2", .data = &bcm2711_pv2_data }, + { .compatible = "brcm,bcm2711-pixelvalve3", .data = &bcm2711_pv3_data }, + { .compatible = "brcm,bcm2711-pixelvalve4", .data = &bcm2711_pv4_data }, {} }; @@ -819,26 +1036,11 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm, } } -static void -vc4_crtc_get_cob_allocation(struct vc4_crtc *vc4_crtc) -{ - struct drm_device *drm = vc4_crtc->base.dev; - struct vc4_dev *vc4 = to_vc4_dev(drm); - u32 dispbase = HVS_READ(SCALER_DISPBASEX(vc4_crtc->channel)); - /* Top/base are supposed to be 4-pixel aligned, but the - * Raspberry Pi firmware fills the low bits (which are - * presumably ignored). - */ - u32 top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3; - u32 base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3; - - vc4_crtc->cob_size = top - base + 4; -} - int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc, const struct drm_crtc_funcs *crtc_funcs, const struct drm_crtc_helper_funcs *crtc_helper_funcs) { + struct vc4_dev *vc4 = to_vc4_dev(drm); struct drm_crtc *crtc = &vc4_crtc->base; struct drm_plane *primary_plane; unsigned int i; @@ -858,15 +1060,17 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc, drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL, crtc_funcs, NULL); drm_crtc_helper_add(crtc, crtc_helper_funcs); - vc4_crtc->channel = vc4_crtc->data->hvs_channel; - drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r)); - drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size); - /* We support CTM, but only for one CRTC at a time. It's therefore - * implemented as private driver state in vc4_kms, not here. - */ - drm_crtc_enable_color_mgmt(crtc, 0, true, crtc->gamma_size); - vc4_crtc_get_cob_allocation(vc4_crtc); + if (!vc4->hvs->hvs5) { + drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r)); + + drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size); + + /* We support CTM, but only for one CRTC at a time. It's therefore + * implemented as private driver state in vc4_kms, not here. + */ + drm_crtc_enable_color_mgmt(crtc, 0, true, crtc->gamma_size); + } for (i = 0; i < crtc->gamma_size; i++) { vc4_crtc->lut_r[i] = i; @@ -915,7 +1119,9 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data) CRTC_WRITE(PV_INTEN, 0); CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START); ret = devm_request_irq(dev, platform_get_irq(pdev, 0), - vc4_crtc_irq_handler, 0, "vc4 crtc", vc4_crtc); + vc4_crtc_irq_handler, + IRQF_SHARED, + "vc4 crtc", vc4_crtc); if (ret) goto err_destroy_planes; diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 38343d2fb4fb..f1a5fd5dab6f 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -252,6 +252,7 @@ static int vc4_drm_bind(struct device *dev) struct drm_device *drm; struct vc4_dev *vc4; struct device_node *node; + struct drm_crtc *crtc; int ret = 0; dev->coherent_dma_mask = DMA_BIT_MASK(32); @@ -298,6 +299,9 @@ static int vc4_drm_bind(struct device *dev) if (ret < 0) goto unbind_all; + drm_for_each_crtc(crtc, drm) + vc4_crtc_disable_at_boot(crtc); + ret = drm_dev_register(drm, 0); if (ret < 0) goto unbind_all; @@ -368,6 +372,7 @@ static int vc4_platform_drm_remove(struct platform_device *pdev) } static const struct of_device_id vc4_of_match[] = { + { .compatible = "brcm,bcm2711-vc5", }, { .compatible = "brcm,bcm2835-vc4", }, { .compatible = "brcm,cygnus-vc4", }, {}, diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index fa19160c801f..90b911fd2a7f 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -73,7 +73,6 @@ struct vc4_perfmon { struct vc4_dev { struct drm_device *dev; - struct vc4_hdmi *hdmi; struct vc4_hvs *hvs; struct vc4_v3d *v3d; struct vc4_dpi *dpi; @@ -201,6 +200,9 @@ struct vc4_dev { int power_refcount; + /* Set to true when the load tracker is supported. */ + bool load_tracker_available; + /* Set to true when the load tracker is active. */ bool load_tracker_enabled; @@ -320,6 +322,8 @@ struct vc4_hvs { void __iomem *regs; u32 __iomem *dlist; + struct clk *core_clk; + /* Memory manager for CRTCs to allocate space in the display * list. Units are dwords. */ @@ -329,7 +333,11 @@ struct vc4_hvs { spinlock_t mm_lock; struct drm_mm_node mitchell_netravali_filter; + struct debugfs_regset32 regset; + + /* HVS version 5 flag, therefore requires updated dlist structures */ + bool hvs5; }; struct vc4_plane { @@ -420,7 +428,8 @@ to_vc4_plane_state(struct drm_plane_state *state) enum vc4_encoder_type { VC4_ENCODER_TYPE_NONE, - VC4_ENCODER_TYPE_HDMI, + VC4_ENCODER_TYPE_HDMI0, + VC4_ENCODER_TYPE_HDMI1, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_DSI1, @@ -432,6 +441,13 @@ struct vc4_encoder { struct drm_encoder base; enum vc4_encoder_type type; u32 clock_select; + + void (*pre_crtc_configure)(struct drm_encoder *encoder); + void (*pre_crtc_enable)(struct drm_encoder *encoder); + void (*post_crtc_enable)(struct drm_encoder *encoder); + + void (*post_crtc_disable)(struct drm_encoder *encoder); + void (*post_crtc_powerdown)(struct drm_encoder *encoder); }; static inline struct vc4_encoder * @@ -441,13 +457,22 @@ to_vc4_encoder(struct drm_encoder *encoder) } struct vc4_crtc_data { - /* Which channel of the HVS this pixelvalve sources from. */ - int hvs_channel; + /* Bitmask of channels (FIFOs) of the HVS that the output can source from */ + unsigned int hvs_available_channels; + + /* Which output of the HVS this pixelvalve sources from. */ + int hvs_output; }; struct vc4_pv_data { struct vc4_crtc_data base; + /* Depth of the PixelValve FIFO in bytes */ + unsigned int fifo_depth; + + /* Number of pixels output per clock period */ + u8 pixels_per_clock; + enum vc4_encoder_type encoder_types[4]; const char *debugfs_name; @@ -462,14 +487,9 @@ struct vc4_crtc { /* Timestamp at start of vblank irq - unaffected by lock delays. */ ktime_t t_vblank; - /* Which HVS channel we're using for our CRTC. */ - int channel; - u8 lut_r[256]; u8 lut_g[256]; u8 lut_b[256]; - /* Size in pixels of the COB memory allocated to this CRTC. */ - u32 cob_size; struct drm_pending_vblank_event *event; @@ -502,6 +522,7 @@ struct vc4_crtc_state { struct drm_mm_node mm; bool feed_txp; bool txp_armed; + unsigned int assigned_channel; struct { unsigned int left; @@ -511,6 +532,8 @@ struct vc4_crtc_state { } margins; }; +#define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1) + static inline struct vc4_crtc_state * to_vc4_crtc_state(struct drm_crtc_state *crtc_state) { @@ -794,6 +817,7 @@ void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo); /* vc4_crtc.c */ extern struct platform_driver vc4_crtc_driver; +int vc4_crtc_disable_at_boot(struct drm_crtc *crtc); int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc, const struct drm_crtc_funcs *crtc_funcs, const struct drm_crtc_helper_funcs *crtc_helper_funcs); @@ -888,11 +912,12 @@ void vc4_irq_reset(struct drm_device *dev); /* vc4_hvs.c */ extern struct platform_driver vc4_hvs_driver; +void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int output); +int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output); int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state); void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state); void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state); void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *state); -void vc4_hvs_mode_set_nofb(struct drm_crtc *crtc); void vc4_hvs_dump_state(struct drm_device *dev); void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel); void vc4_hvs_mask_underrun(struct drm_device *dev, int channel); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 15a11cd4de25..03825596a308 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -43,177 +43,101 @@ #include <linux/of_platform.h> #include <linux/pm_runtime.h> #include <linux/rational.h> +#include <linux/reset.h> #include <sound/dmaengine_pcm.h> #include <sound/pcm_drm_eld.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include "media/cec.h" #include "vc4_drv.h" +#include "vc4_hdmi.h" +#include "vc4_hdmi_regs.h" #include "vc4_regs.h" -#define HSM_CLOCK_FREQ 163682864 -#define CEC_CLOCK_FREQ 40000 -#define CEC_CLOCK_DIV (HSM_CLOCK_FREQ / CEC_CLOCK_FREQ) - -/* HDMI audio information */ -struct vc4_hdmi_audio { - struct snd_soc_card card; - struct snd_soc_dai_link link; - struct snd_soc_dai_link_component cpu; - struct snd_soc_dai_link_component codec; - struct snd_soc_dai_link_component platform; - int samplerate; - int channels; - struct snd_dmaengine_dai_dma_data dma_data; - struct snd_pcm_substream *substream; -}; +#define VC5_HDMI_HORZA_HFP_SHIFT 16 +#define VC5_HDMI_HORZA_HFP_MASK VC4_MASK(28, 16) +#define VC5_HDMI_HORZA_VPOS BIT(15) +#define VC5_HDMI_HORZA_HPOS BIT(14) +#define VC5_HDMI_HORZA_HAP_SHIFT 0 +#define VC5_HDMI_HORZA_HAP_MASK VC4_MASK(13, 0) -/* General HDMI hardware state. */ -struct vc4_hdmi { - struct platform_device *pdev; - - struct drm_encoder *encoder; - struct drm_connector *connector; +#define VC5_HDMI_HORZB_HBP_SHIFT 16 +#define VC5_HDMI_HORZB_HBP_MASK VC4_MASK(26, 16) +#define VC5_HDMI_HORZB_HSP_SHIFT 0 +#define VC5_HDMI_HORZB_HSP_MASK VC4_MASK(10, 0) - struct vc4_hdmi_audio audio; +#define VC5_HDMI_VERTA_VSP_SHIFT 24 +#define VC5_HDMI_VERTA_VSP_MASK VC4_MASK(28, 24) +#define VC5_HDMI_VERTA_VFP_SHIFT 16 +#define VC5_HDMI_VERTA_VFP_MASK VC4_MASK(22, 16) +#define VC5_HDMI_VERTA_VAL_SHIFT 0 +#define VC5_HDMI_VERTA_VAL_MASK VC4_MASK(12, 0) - struct i2c_adapter *ddc; - void __iomem *hdmicore_regs; - void __iomem *hd_regs; - int hpd_gpio; - bool hpd_active_low; +#define VC5_HDMI_VERTB_VSPO_SHIFT 16 +#define VC5_HDMI_VERTB_VSPO_MASK VC4_MASK(29, 16) - struct cec_adapter *cec_adap; - struct cec_msg cec_rx_msg; - bool cec_tx_ok; - bool cec_irq_was_rx; +# define VC4_HD_M_SW_RST BIT(2) +# define VC4_HD_M_ENABLE BIT(0) - struct clk *pixel_clock; - struct clk *hsm_clock; +#define CEC_CLOCK_FREQ 40000 +#define VC4_HSM_MID_CLOCK 149985000 - struct debugfs_regset32 hdmi_regset; - struct debugfs_regset32 hd_regset; -}; +static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct vc4_hdmi *vc4_hdmi = node->info_ent->data; + struct drm_printer p = drm_seq_file_printer(m); -#define HDMI_READ(offset) readl(vc4->hdmi->hdmicore_regs + offset) -#define HDMI_WRITE(offset, val) writel(val, vc4->hdmi->hdmicore_regs + offset) -#define HD_READ(offset) readl(vc4->hdmi->hd_regs + offset) -#define HD_WRITE(offset, val) writel(val, vc4->hdmi->hd_regs + offset) + drm_print_regset32(&p, &vc4_hdmi->hdmi_regset); + drm_print_regset32(&p, &vc4_hdmi->hd_regset); -/* VC4 HDMI encoder KMS struct */ -struct vc4_hdmi_encoder { - struct vc4_encoder base; - bool hdmi_monitor; - bool limited_rgb_range; -}; + return 0; +} -static inline struct vc4_hdmi_encoder * -to_vc4_hdmi_encoder(struct drm_encoder *encoder) +static void vc4_hdmi_reset(struct vc4_hdmi *vc4_hdmi) { - return container_of(encoder, struct vc4_hdmi_encoder, base.base); -} + HDMI_WRITE(HDMI_M_CTL, VC4_HD_M_SW_RST); + udelay(1); + HDMI_WRITE(HDMI_M_CTL, 0); -/* VC4 HDMI connector KMS struct */ -struct vc4_hdmi_connector { - struct drm_connector base; + HDMI_WRITE(HDMI_M_CTL, VC4_HD_M_ENABLE); - /* Since the connector is attached to just the one encoder, - * this is the reference to it so we can do the best_encoder() - * hook. - */ - struct drm_encoder *encoder; -}; + HDMI_WRITE(HDMI_SW_RESET_CONTROL, + VC4_HDMI_SW_RESET_HDMI | + VC4_HDMI_SW_RESET_FORMAT_DETECT); -static inline struct vc4_hdmi_connector * -to_vc4_hdmi_connector(struct drm_connector *connector) -{ - return container_of(connector, struct vc4_hdmi_connector, base); + HDMI_WRITE(HDMI_SW_RESET_CONTROL, 0); } -static const struct debugfs_reg32 hdmi_regs[] = { - VC4_REG32(VC4_HDMI_CORE_REV), - VC4_REG32(VC4_HDMI_SW_RESET_CONTROL), - VC4_REG32(VC4_HDMI_HOTPLUG_INT), - VC4_REG32(VC4_HDMI_HOTPLUG), - VC4_REG32(VC4_HDMI_MAI_CHANNEL_MAP), - VC4_REG32(VC4_HDMI_MAI_CONFIG), - VC4_REG32(VC4_HDMI_MAI_FORMAT), - VC4_REG32(VC4_HDMI_AUDIO_PACKET_CONFIG), - VC4_REG32(VC4_HDMI_RAM_PACKET_CONFIG), - VC4_REG32(VC4_HDMI_HORZA), - VC4_REG32(VC4_HDMI_HORZB), - VC4_REG32(VC4_HDMI_FIFO_CTL), - VC4_REG32(VC4_HDMI_SCHEDULER_CONTROL), - VC4_REG32(VC4_HDMI_VERTA0), - VC4_REG32(VC4_HDMI_VERTA1), - VC4_REG32(VC4_HDMI_VERTB0), - VC4_REG32(VC4_HDMI_VERTB1), - VC4_REG32(VC4_HDMI_TX_PHY_RESET_CTL), - VC4_REG32(VC4_HDMI_TX_PHY_CTL0), - - VC4_REG32(VC4_HDMI_CEC_CNTRL_1), - VC4_REG32(VC4_HDMI_CEC_CNTRL_2), - VC4_REG32(VC4_HDMI_CEC_CNTRL_3), - VC4_REG32(VC4_HDMI_CEC_CNTRL_4), - VC4_REG32(VC4_HDMI_CEC_CNTRL_5), - VC4_REG32(VC4_HDMI_CPU_STATUS), - VC4_REG32(VC4_HDMI_CPU_MASK_STATUS), - - VC4_REG32(VC4_HDMI_CEC_RX_DATA_1), - VC4_REG32(VC4_HDMI_CEC_RX_DATA_2), - VC4_REG32(VC4_HDMI_CEC_RX_DATA_3), - VC4_REG32(VC4_HDMI_CEC_RX_DATA_4), - VC4_REG32(VC4_HDMI_CEC_TX_DATA_1), - VC4_REG32(VC4_HDMI_CEC_TX_DATA_2), - VC4_REG32(VC4_HDMI_CEC_TX_DATA_3), - VC4_REG32(VC4_HDMI_CEC_TX_DATA_4), -}; - -static const struct debugfs_reg32 hd_regs[] = { - VC4_REG32(VC4_HD_M_CTL), - VC4_REG32(VC4_HD_MAI_CTL), - VC4_REG32(VC4_HD_MAI_THR), - VC4_REG32(VC4_HD_MAI_FMT), - VC4_REG32(VC4_HD_MAI_SMP), - VC4_REG32(VC4_HD_VID_CTL), - VC4_REG32(VC4_HD_CSC_CTL), - VC4_REG32(VC4_HD_FRAME_COUNT), -}; - -static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused) +static void vc5_hdmi_reset(struct vc4_hdmi *vc4_hdmi) { - struct drm_info_node *node = (struct drm_info_node *)m->private; - struct drm_device *dev = node->minor->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_hdmi *hdmi = vc4->hdmi; - struct drm_printer p = drm_seq_file_printer(m); + reset_control_reset(vc4_hdmi->reset); - drm_print_regset32(&p, &hdmi->hdmi_regset); - drm_print_regset32(&p, &hdmi->hd_regset); + HDMI_WRITE(HDMI_DVP_CTL, 0); - return 0; + HDMI_WRITE(HDMI_CLOCK_STOP, + HDMI_READ(HDMI_CLOCK_STOP) | VC4_DVP_HT_CLOCK_STOP_PIXEL); } static enum drm_connector_status vc4_hdmi_connector_detect(struct drm_connector *connector, bool force) { - struct drm_device *dev = connector->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); - if (vc4->hdmi->hpd_gpio) { - if (gpio_get_value_cansleep(vc4->hdmi->hpd_gpio) ^ - vc4->hdmi->hpd_active_low) + if (vc4_hdmi->hpd_gpio) { + if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^ + vc4_hdmi->hpd_active_low) return connector_status_connected; - cec_phys_addr_invalidate(vc4->hdmi->cec_adap); + cec_phys_addr_invalidate(vc4_hdmi->cec_adap); return connector_status_disconnected; } - if (drm_probe_ddc(vc4->hdmi->ddc)) + if (drm_probe_ddc(vc4_hdmi->ddc)) return connector_status_connected; - if (HDMI_READ(VC4_HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED) + if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED) return connector_status_connected; - cec_phys_addr_invalidate(vc4->hdmi->cec_adap); + cec_phys_addr_invalidate(vc4_hdmi->cec_adap); return connector_status_disconnected; } @@ -225,17 +149,13 @@ static void vc4_hdmi_connector_destroy(struct drm_connector *connector) static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) { - struct vc4_hdmi_connector *vc4_connector = - to_vc4_hdmi_connector(connector); - struct drm_encoder *encoder = vc4_connector->encoder; - struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); - struct drm_device *dev = connector->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); + struct vc4_hdmi_encoder *vc4_encoder = &vc4_hdmi->encoder; int ret = 0; struct edid *edid; - edid = drm_get_edid(connector, vc4->hdmi->ddc); - cec_s_phys_addr_from_edid(vc4->hdmi->cec_adap, edid); + edid = drm_get_edid(connector, vc4_hdmi->ddc); + cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid); if (!edid) return -ENODEV; @@ -267,32 +187,23 @@ static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = .get_modes = vc4_hdmi_connector_get_modes, }; -static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev, - struct drm_encoder *encoder, - struct i2c_adapter *ddc) +static int vc4_hdmi_connector_init(struct drm_device *dev, + struct vc4_hdmi *vc4_hdmi) { - struct drm_connector *connector; - struct vc4_hdmi_connector *hdmi_connector; + struct drm_connector *connector = &vc4_hdmi->connector; + struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; int ret; - hdmi_connector = devm_kzalloc(dev->dev, sizeof(*hdmi_connector), - GFP_KERNEL); - if (!hdmi_connector) - return ERR_PTR(-ENOMEM); - connector = &hdmi_connector->base; - - hdmi_connector->encoder = encoder; - drm_connector_init_with_ddc(dev, connector, &vc4_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA, - ddc); + vc4_hdmi->ddc); drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs); /* Create and attach TV margin props to this connector. */ ret = drm_mode_create_tv_margin_properties(dev); if (ret) - return ERR_PTR(ret); + return ret; drm_connector_attach_tv_margin_properties(connector); @@ -304,35 +215,37 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev, drm_connector_attach_encoder(connector, encoder); - return connector; + return 0; } static int vc4_hdmi_stop_packet(struct drm_encoder *encoder, enum hdmi_infoframe_type type) { - struct drm_device *dev = encoder->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); u32 packet_id = type - 0x80; - HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG, - HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id)); + HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, + HDMI_READ(HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id)); - return wait_for(!(HDMI_READ(VC4_HDMI_RAM_PACKET_STATUS) & + return wait_for(!(HDMI_READ(HDMI_RAM_PACKET_STATUS) & BIT(packet_id)), 100); } static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder, union hdmi_infoframe *frame) { - struct drm_device *dev = encoder->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); u32 packet_id = frame->any.type - 0x80; - u32 packet_reg = VC4_HDMI_RAM_PACKET(packet_id); + const struct vc4_hdmi_register *ram_packet_start = + &vc4_hdmi->variant->registers[HDMI_RAM_PACKET_START]; + u32 packet_reg = ram_packet_start->offset + VC4_HDMI_PACKET_STRIDE * packet_id; + void __iomem *base = __vc4_hdmi_get_field_base(vc4_hdmi, + ram_packet_start->reg); uint8_t buffer[VC4_HDMI_PACKET_STRIDE]; ssize_t len, i; int ret; - WARN_ONCE(!(HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) & + WARN_ONCE(!(HDMI_READ(HDMI_RAM_PACKET_CONFIG) & VC4_HDMI_RAM_PACKET_ENABLE), "Packet RAM has to be on to store the packet."); @@ -347,23 +260,23 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder, } for (i = 0; i < len; i += 7) { - HDMI_WRITE(packet_reg, - buffer[i + 0] << 0 | - buffer[i + 1] << 8 | - buffer[i + 2] << 16); + writel(buffer[i + 0] << 0 | + buffer[i + 1] << 8 | + buffer[i + 2] << 16, + base + packet_reg); packet_reg += 4; - HDMI_WRITE(packet_reg, - buffer[i + 3] << 0 | - buffer[i + 4] << 8 | - buffer[i + 5] << 16 | - buffer[i + 6] << 24); + writel(buffer[i + 3] << 0 | + buffer[i + 4] << 8 | + buffer[i + 5] << 16 | + buffer[i + 6] << 24, + base + packet_reg); packet_reg += 4; } - HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG, - HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) | BIT(packet_id)); - ret = wait_for((HDMI_READ(VC4_HDMI_RAM_PACKET_STATUS) & + HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, + HDMI_READ(HDMI_RAM_PACKET_CONFIG) | BIT(packet_id)); + ret = wait_for((HDMI_READ(HDMI_RAM_PACKET_STATUS) & BIT(packet_id)), 100); if (ret) DRM_ERROR("Failed to wait for infoframe to start: %d\n", ret); @@ -371,24 +284,24 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder, static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder) { + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); - struct vc4_dev *vc4 = encoder->dev->dev_private; - struct vc4_hdmi *hdmi = vc4->hdmi; - struct drm_connector_state *cstate = hdmi->connector->state; + struct drm_connector *connector = &vc4_hdmi->connector; + struct drm_connector_state *cstate = connector->state; struct drm_crtc *crtc = encoder->crtc; const struct drm_display_mode *mode = &crtc->state->adjusted_mode; union hdmi_infoframe frame; int ret; ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, - hdmi->connector, mode); + connector, mode); if (ret < 0) { DRM_ERROR("couldn't fill AVI infoframe\n"); return; } drm_hdmi_avi_infoframe_quant_range(&frame.avi, - hdmi->connector, mode, + connector, mode, vc4_encoder->limited_rgb_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL); @@ -416,9 +329,7 @@ static void vc4_hdmi_set_spd_infoframe(struct drm_encoder *encoder) static void vc4_hdmi_set_audio_infoframe(struct drm_encoder *encoder) { - struct drm_device *drm = encoder->dev; - struct vc4_dev *vc4 = drm->dev_private; - struct vc4_hdmi *hdmi = vc4->hdmi; + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); union hdmi_infoframe frame; int ret; @@ -427,45 +338,139 @@ static void vc4_hdmi_set_audio_infoframe(struct drm_encoder *encoder) frame.audio.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM; frame.audio.sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM; frame.audio.sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM; - frame.audio.channels = hdmi->audio.channels; + frame.audio.channels = vc4_hdmi->audio.channels; vc4_hdmi_write_infoframe(encoder, &frame); } static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder) { + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + vc4_hdmi_set_avi_infoframe(encoder); vc4_hdmi_set_spd_infoframe(encoder); + /* + * If audio was streaming, then we need to reenabled the audio + * infoframe here during encoder_enable. + */ + if (vc4_hdmi->audio.streaming) + vc4_hdmi_set_audio_infoframe(encoder); } -static void vc4_hdmi_encoder_disable(struct drm_encoder *encoder) +static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder) { - struct drm_device *dev = encoder->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_hdmi *hdmi = vc4->hdmi; + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + + HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, 0); + + HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) | + VC4_HD_VID_CTL_CLRRGB | VC4_HD_VID_CTL_CLRSYNC); + + HDMI_WRITE(HDMI_VID_CTL, + HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX); +} + +static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder) +{ + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); int ret; - HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG, 0); + if (vc4_hdmi->variant->phy_disable) + vc4_hdmi->variant->phy_disable(vc4_hdmi); - HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0xf << 16); - HD_WRITE(VC4_HD_VID_CTL, - HD_READ(VC4_HD_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE); + HDMI_WRITE(HDMI_VID_CTL, + HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE); - clk_disable_unprepare(hdmi->pixel_clock); + clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock); + clk_disable_unprepare(vc4_hdmi->hsm_clock); + clk_disable_unprepare(vc4_hdmi->pixel_clock); - ret = pm_runtime_put(&hdmi->pdev->dev); + ret = pm_runtime_put(&vc4_hdmi->pdev->dev); if (ret < 0) DRM_ERROR("Failed to release power domain: %d\n", ret); } -static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder) +static void vc4_hdmi_encoder_disable(struct drm_encoder *encoder) +{ +} + +static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable) +{ + u32 csc_ctl; + + csc_ctl = VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR, + VC4_HD_CSC_CTL_ORDER); + + if (enable) { + /* CEA VICs other than #1 requre limited range RGB + * output unless overridden by an AVI infoframe. + * Apply a colorspace conversion to squash 0-255 down + * to 16-235. The matrix here is: + * + * [ 0 0 0.8594 16] + * [ 0 0.8594 0 16] + * [ 0.8594 0 0 16] + * [ 0 0 0 1] + */ + csc_ctl |= VC4_HD_CSC_CTL_ENABLE; + csc_ctl |= VC4_HD_CSC_CTL_RGB2YCC; + csc_ctl |= VC4_SET_FIELD(VC4_HD_CSC_CTL_MODE_CUSTOM, + VC4_HD_CSC_CTL_MODE); + + HDMI_WRITE(HDMI_CSC_12_11, (0x000 << 16) | 0x000); + HDMI_WRITE(HDMI_CSC_14_13, (0x100 << 16) | 0x6e0); + HDMI_WRITE(HDMI_CSC_22_21, (0x6e0 << 16) | 0x000); + HDMI_WRITE(HDMI_CSC_24_23, (0x100 << 16) | 0x000); + HDMI_WRITE(HDMI_CSC_32_31, (0x000 << 16) | 0x6e0); + HDMI_WRITE(HDMI_CSC_34_33, (0x100 << 16) | 0x000); + } + + /* The RGB order applies even when CSC is disabled. */ + HDMI_WRITE(HDMI_CSC_CTL, csc_ctl); +} + +static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable) +{ + u32 csc_ctl; + + csc_ctl = 0x07; /* RGB_CONVERT_MODE = custom matrix, || USE_RGB_TO_YCBCR */ + + if (enable) { + /* CEA VICs other than #1 requre limited range RGB + * output unless overridden by an AVI infoframe. + * Apply a colorspace conversion to squash 0-255 down + * to 16-235. The matrix here is: + * + * [ 0.8594 0 0 16] + * [ 0 0.8594 0 16] + * [ 0 0 0.8594 16] + * [ 0 0 0 1] + * Matrix is signed 2p13 fixed point, with signed 9p6 offsets + */ + HDMI_WRITE(HDMI_CSC_12_11, (0x0000 << 16) | 0x1b80); + HDMI_WRITE(HDMI_CSC_14_13, (0x0400 << 16) | 0x0000); + HDMI_WRITE(HDMI_CSC_22_21, (0x1b80 << 16) | 0x0000); + HDMI_WRITE(HDMI_CSC_24_23, (0x0400 << 16) | 0x0000); + HDMI_WRITE(HDMI_CSC_32_31, (0x0000 << 16) | 0x0000); + HDMI_WRITE(HDMI_CSC_34_33, (0x0400 << 16) | 0x1b80); + } else { + /* Still use the matrix for full range, but make it unity. + * Matrix is signed 2p13 fixed point, with signed 9p6 offsets + */ + HDMI_WRITE(HDMI_CSC_12_11, (0x0000 << 16) | 0x2000); + HDMI_WRITE(HDMI_CSC_14_13, (0x0000 << 16) | 0x0000); + HDMI_WRITE(HDMI_CSC_22_21, (0x2000 << 16) | 0x0000); + HDMI_WRITE(HDMI_CSC_24_23, (0x0000 << 16) | 0x0000); + HDMI_WRITE(HDMI_CSC_32_31, (0x0000 << 16) | 0x0000); + HDMI_WRITE(HDMI_CSC_34_33, (0x0000 << 16) | 0x2000); + } + + HDMI_WRITE(HDMI_CSC_CTL, csc_ctl); +} + +static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, + struct drm_display_mode *mode) { - struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; - struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); - struct drm_device *dev = encoder->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_hdmi *hdmi = vc4->hdmi; - bool debug_dump_regs = false; bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC; bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC; bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; @@ -483,213 +488,285 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder) mode->crtc_vsync_end - interlaced, VC4_HDMI_VERTB_VBP)); - u32 csc_ctl; + + HDMI_WRITE(HDMI_HORZA, + (vsync_pos ? VC4_HDMI_HORZA_VPOS : 0) | + (hsync_pos ? VC4_HDMI_HORZA_HPOS : 0) | + VC4_SET_FIELD(mode->hdisplay * pixel_rep, + VC4_HDMI_HORZA_HAP)); + + HDMI_WRITE(HDMI_HORZB, + VC4_SET_FIELD((mode->htotal - + mode->hsync_end) * pixel_rep, + VC4_HDMI_HORZB_HBP) | + VC4_SET_FIELD((mode->hsync_end - + mode->hsync_start) * pixel_rep, + VC4_HDMI_HORZB_HSP) | + VC4_SET_FIELD((mode->hsync_start - + mode->hdisplay) * pixel_rep, + VC4_HDMI_HORZB_HFP)); + + HDMI_WRITE(HDMI_VERTA0, verta); + HDMI_WRITE(HDMI_VERTA1, verta); + + HDMI_WRITE(HDMI_VERTB0, vertb_even); + HDMI_WRITE(HDMI_VERTB1, vertb); +} +static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, + struct drm_display_mode *mode) +{ + bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC; + bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC; + bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; + u32 pixel_rep = (mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1; + u32 verta = (VC4_SET_FIELD(mode->crtc_vsync_end - mode->crtc_vsync_start, + VC5_HDMI_VERTA_VSP) | + VC4_SET_FIELD(mode->crtc_vsync_start - mode->crtc_vdisplay, + VC5_HDMI_VERTA_VFP) | + VC4_SET_FIELD(mode->crtc_vdisplay, VC5_HDMI_VERTA_VAL)); + u32 vertb = (VC4_SET_FIELD(0, VC5_HDMI_VERTB_VSPO) | + VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end, + VC4_HDMI_VERTB_VBP)); + u32 vertb_even = (VC4_SET_FIELD(0, VC5_HDMI_VERTB_VSPO) | + VC4_SET_FIELD(mode->crtc_vtotal - + mode->crtc_vsync_end - + interlaced, + VC4_HDMI_VERTB_VBP)); + + HDMI_WRITE(HDMI_VEC_INTERFACE_XBAR, 0x354021); + HDMI_WRITE(HDMI_HORZA, + (vsync_pos ? VC5_HDMI_HORZA_VPOS : 0) | + (hsync_pos ? VC5_HDMI_HORZA_HPOS : 0) | + VC4_SET_FIELD(mode->hdisplay * pixel_rep, + VC5_HDMI_HORZA_HAP) | + VC4_SET_FIELD((mode->hsync_start - + mode->hdisplay) * pixel_rep, + VC5_HDMI_HORZA_HFP)); + + HDMI_WRITE(HDMI_HORZB, + VC4_SET_FIELD((mode->htotal - + mode->hsync_end) * pixel_rep, + VC5_HDMI_HORZB_HBP) | + VC4_SET_FIELD((mode->hsync_end - + mode->hsync_start) * pixel_rep, + VC5_HDMI_HORZB_HSP)); + + HDMI_WRITE(HDMI_VERTA0, verta); + HDMI_WRITE(HDMI_VERTA1, verta); + + HDMI_WRITE(HDMI_VERTB0, vertb_even); + HDMI_WRITE(HDMI_VERTB1, vertb); + + HDMI_WRITE(HDMI_CLOCK_STOP, 0); +} + +static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi) +{ + u32 drift; + int ret; + + drift = HDMI_READ(HDMI_FIFO_CTL); + drift &= VC4_HDMI_FIFO_VALID_WRITE_MASK; + + HDMI_WRITE(HDMI_FIFO_CTL, + drift & ~VC4_HDMI_FIFO_CTL_RECENTER); + HDMI_WRITE(HDMI_FIFO_CTL, + drift | VC4_HDMI_FIFO_CTL_RECENTER); + usleep_range(1000, 1100); + HDMI_WRITE(HDMI_FIFO_CTL, + drift & ~VC4_HDMI_FIFO_CTL_RECENTER); + HDMI_WRITE(HDMI_FIFO_CTL, + drift | VC4_HDMI_FIFO_CTL_RECENTER); + + ret = wait_for(HDMI_READ(HDMI_FIFO_CTL) & + VC4_HDMI_FIFO_CTL_RECENTER_DONE, 1); + WARN_ONCE(ret, "Timeout waiting for " + "VC4_HDMI_FIFO_CTL_RECENTER_DONE"); +} + +static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder) +{ + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + unsigned long pixel_rate, hsm_rate; int ret; - ret = pm_runtime_get_sync(&hdmi->pdev->dev); + ret = pm_runtime_get_sync(&vc4_hdmi->pdev->dev); if (ret < 0) { DRM_ERROR("Failed to retain power domain: %d\n", ret); return; } - ret = clk_set_rate(hdmi->pixel_clock, - mode->clock * 1000 * - ((mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1)); + pixel_rate = mode->clock * 1000 * ((mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1); + ret = clk_set_rate(vc4_hdmi->pixel_clock, pixel_rate); if (ret) { DRM_ERROR("Failed to set pixel clock rate: %d\n", ret); return; } - ret = clk_prepare_enable(hdmi->pixel_clock); + ret = clk_prepare_enable(vc4_hdmi->pixel_clock); if (ret) { DRM_ERROR("Failed to turn on pixel clock: %d\n", ret); return; } - HDMI_WRITE(VC4_HDMI_SW_RESET_CONTROL, - VC4_HDMI_SW_RESET_HDMI | - VC4_HDMI_SW_RESET_FORMAT_DETECT); - - HDMI_WRITE(VC4_HDMI_SW_RESET_CONTROL, 0); - - /* PHY should be in reset, like - * vc4_hdmi_encoder_disable() does. + /* + * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must + * be faster than pixel clock, infinitesimally faster, tested in + * simulation. Otherwise, exact value is unimportant for HDMI + * operation." This conflicts with bcm2835's vc4 documentation, which + * states HSM's clock has to be at least 108% of the pixel clock. + * + * Real life tests reveal that vc4's firmware statement holds up, and + * users are able to use pixel clocks closer to HSM's, namely for + * 1920x1200@60Hz. So it was decided to have leave a 1% margin between + * both clocks. Which, for RPi0-3 implies a maximum pixel clock of + * 162MHz. + * + * Additionally, the AXI clock needs to be at least 25% of + * pixel clock, but HSM ends up being the limiting factor. */ - HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0xf << 16); + hsm_rate = max_t(unsigned long, 120000000, (pixel_rate / 100) * 101); + ret = clk_set_min_rate(vc4_hdmi->hsm_clock, hsm_rate); + if (ret) { + DRM_ERROR("Failed to set HSM clock rate: %d\n", ret); + return; + } - HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0); + ret = clk_prepare_enable(vc4_hdmi->hsm_clock); + if (ret) { + DRM_ERROR("Failed to turn on HSM clock: %d\n", ret); + clk_disable_unprepare(vc4_hdmi->pixel_clock); + return; + } - if (debug_dump_regs) { - struct drm_printer p = drm_info_printer(&hdmi->pdev->dev); + /* + * FIXME: When the pixel freq is 594MHz (4k60), this needs to be setup + * at 300MHz. + */ + ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, + (hsm_rate > VC4_HSM_MID_CLOCK ? 150000000 : 75000000)); + if (ret) { + DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret); + clk_disable_unprepare(vc4_hdmi->hsm_clock); + clk_disable_unprepare(vc4_hdmi->pixel_clock); + return; + } - dev_info(&hdmi->pdev->dev, "HDMI regs before:\n"); - drm_print_regset32(&p, &hdmi->hdmi_regset); - drm_print_regset32(&p, &hdmi->hd_regset); + ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock); + if (ret) { + DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret); + clk_disable_unprepare(vc4_hdmi->hsm_clock); + clk_disable_unprepare(vc4_hdmi->pixel_clock); + return; } - HD_WRITE(VC4_HD_VID_CTL, 0); + if (vc4_hdmi->variant->reset) + vc4_hdmi->variant->reset(vc4_hdmi); - HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL, - HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) | + if (vc4_hdmi->variant->phy_init) + vc4_hdmi->variant->phy_init(vc4_hdmi, mode); + + HDMI_WRITE(HDMI_SCHEDULER_CONTROL, + HDMI_READ(HDMI_SCHEDULER_CONTROL) | VC4_HDMI_SCHEDULER_CONTROL_MANUAL_FORMAT | VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS); - HDMI_WRITE(VC4_HDMI_HORZA, - (vsync_pos ? VC4_HDMI_HORZA_VPOS : 0) | - (hsync_pos ? VC4_HDMI_HORZA_HPOS : 0) | - VC4_SET_FIELD(mode->hdisplay * pixel_rep, - VC4_HDMI_HORZA_HAP)); - - HDMI_WRITE(VC4_HDMI_HORZB, - VC4_SET_FIELD((mode->htotal - - mode->hsync_end) * pixel_rep, - VC4_HDMI_HORZB_HBP) | - VC4_SET_FIELD((mode->hsync_end - - mode->hsync_start) * pixel_rep, - VC4_HDMI_HORZB_HSP) | - VC4_SET_FIELD((mode->hsync_start - - mode->hdisplay) * pixel_rep, - VC4_HDMI_HORZB_HFP)); - - HDMI_WRITE(VC4_HDMI_VERTA0, verta); - HDMI_WRITE(VC4_HDMI_VERTA1, verta); - - HDMI_WRITE(VC4_HDMI_VERTB0, vertb_even); - HDMI_WRITE(VC4_HDMI_VERTB1, vertb); - - HD_WRITE(VC4_HD_VID_CTL, - (vsync_pos ? 0 : VC4_HD_VID_CTL_VSYNC_LOW) | - (hsync_pos ? 0 : VC4_HD_VID_CTL_HSYNC_LOW)); + if (vc4_hdmi->variant->set_timings) + vc4_hdmi->variant->set_timings(vc4_hdmi, mode); +} - csc_ctl = VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR, - VC4_HD_CSC_CTL_ORDER); +static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder) +{ + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; + struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); if (vc4_encoder->hdmi_monitor && - drm_default_rgb_quant_range(mode) == - HDMI_QUANTIZATION_RANGE_LIMITED) { - /* CEA VICs other than #1 requre limited range RGB - * output unless overridden by an AVI infoframe. - * Apply a colorspace conversion to squash 0-255 down - * to 16-235. The matrix here is: - * - * [ 0 0 0.8594 16] - * [ 0 0.8594 0 16] - * [ 0.8594 0 0 16] - * [ 0 0 0 1] - */ - csc_ctl |= VC4_HD_CSC_CTL_ENABLE; - csc_ctl |= VC4_HD_CSC_CTL_RGB2YCC; - csc_ctl |= VC4_SET_FIELD(VC4_HD_CSC_CTL_MODE_CUSTOM, - VC4_HD_CSC_CTL_MODE); + drm_default_rgb_quant_range(mode) == HDMI_QUANTIZATION_RANGE_LIMITED) { + if (vc4_hdmi->variant->csc_setup) + vc4_hdmi->variant->csc_setup(vc4_hdmi, true); - HD_WRITE(VC4_HD_CSC_12_11, (0x000 << 16) | 0x000); - HD_WRITE(VC4_HD_CSC_14_13, (0x100 << 16) | 0x6e0); - HD_WRITE(VC4_HD_CSC_22_21, (0x6e0 << 16) | 0x000); - HD_WRITE(VC4_HD_CSC_24_23, (0x100 << 16) | 0x000); - HD_WRITE(VC4_HD_CSC_32_31, (0x000 << 16) | 0x6e0); - HD_WRITE(VC4_HD_CSC_34_33, (0x100 << 16) | 0x000); vc4_encoder->limited_rgb_range = true; } else { + if (vc4_hdmi->variant->csc_setup) + vc4_hdmi->variant->csc_setup(vc4_hdmi, false); + vc4_encoder->limited_rgb_range = false; } - /* The RGB order applies even when CSC is disabled. */ - HD_WRITE(VC4_HD_CSC_CTL, csc_ctl); - - HDMI_WRITE(VC4_HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N); + HDMI_WRITE(HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N); +} - if (debug_dump_regs) { - struct drm_printer p = drm_info_printer(&hdmi->pdev->dev); +static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder) +{ + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); + bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC; + bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC; + int ret; - dev_info(&hdmi->pdev->dev, "HDMI regs after:\n"); - drm_print_regset32(&p, &hdmi->hdmi_regset); - drm_print_regset32(&p, &hdmi->hd_regset); - } + HDMI_WRITE(HDMI_VID_CTL, + VC4_HD_VID_CTL_ENABLE | + VC4_HD_VID_CTL_UNDERFLOW_ENABLE | + VC4_HD_VID_CTL_FRAME_COUNTER_RESET | + (vsync_pos ? 0 : VC4_HD_VID_CTL_VSYNC_LOW) | + (hsync_pos ? 0 : VC4_HD_VID_CTL_HSYNC_LOW)); - HD_WRITE(VC4_HD_VID_CTL, - HD_READ(VC4_HD_VID_CTL) | - VC4_HD_VID_CTL_ENABLE | - VC4_HD_VID_CTL_UNDERFLOW_ENABLE | - VC4_HD_VID_CTL_FRAME_COUNTER_RESET); + HDMI_WRITE(HDMI_VID_CTL, + HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_BLANKPIX); if (vc4_encoder->hdmi_monitor) { - HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL, - HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) | + HDMI_WRITE(HDMI_SCHEDULER_CONTROL, + HDMI_READ(HDMI_SCHEDULER_CONTROL) | VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI); - ret = wait_for(HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) & + ret = wait_for(HDMI_READ(HDMI_SCHEDULER_CONTROL) & VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE, 1000); WARN_ONCE(ret, "Timeout waiting for " "VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE\n"); } else { - HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG, - HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) & + HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, + HDMI_READ(HDMI_RAM_PACKET_CONFIG) & ~(VC4_HDMI_RAM_PACKET_ENABLE)); - HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL, - HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) & + HDMI_WRITE(HDMI_SCHEDULER_CONTROL, + HDMI_READ(HDMI_SCHEDULER_CONTROL) & ~VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI); - ret = wait_for(!(HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) & + ret = wait_for(!(HDMI_READ(HDMI_SCHEDULER_CONTROL) & VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE), 1000); WARN_ONCE(ret, "Timeout waiting for " "!VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE\n"); } if (vc4_encoder->hdmi_monitor) { - u32 drift; - - WARN_ON(!(HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) & + WARN_ON(!(HDMI_READ(HDMI_SCHEDULER_CONTROL) & VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE)); - HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL, - HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) | + HDMI_WRITE(HDMI_SCHEDULER_CONTROL, + HDMI_READ(HDMI_SCHEDULER_CONTROL) | VC4_HDMI_SCHEDULER_CONTROL_VERT_ALWAYS_KEEPOUT); - HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG, + HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, VC4_HDMI_RAM_PACKET_ENABLE); vc4_hdmi_set_infoframes(encoder); - - drift = HDMI_READ(VC4_HDMI_FIFO_CTL); - drift &= VC4_HDMI_FIFO_VALID_WRITE_MASK; - - HDMI_WRITE(VC4_HDMI_FIFO_CTL, - drift & ~VC4_HDMI_FIFO_CTL_RECENTER); - HDMI_WRITE(VC4_HDMI_FIFO_CTL, - drift | VC4_HDMI_FIFO_CTL_RECENTER); - usleep_range(1000, 1100); - HDMI_WRITE(VC4_HDMI_FIFO_CTL, - drift & ~VC4_HDMI_FIFO_CTL_RECENTER); - HDMI_WRITE(VC4_HDMI_FIFO_CTL, - drift | VC4_HDMI_FIFO_CTL_RECENTER); - - ret = wait_for(HDMI_READ(VC4_HDMI_FIFO_CTL) & - VC4_HDMI_FIFO_CTL_RECENTER_DONE, 1); - WARN_ONCE(ret, "Timeout waiting for " - "VC4_HDMI_FIFO_CTL_RECENTER_DONE"); } + + vc4_hdmi_recenter_fifo(vc4_hdmi); +} + +static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder) +{ } static enum drm_mode_status -vc4_hdmi_encoder_mode_valid(struct drm_encoder *crtc, +vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder, const struct drm_display_mode *mode) { - /* - * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must - * be faster than pixel clock, infinitesimally faster, tested in - * simulation. Otherwise, exact value is unimportant for HDMI - * operation." This conflicts with bcm2835's vc4 documentation, which - * states HSM's clock has to be at least 108% of the pixel clock. - * - * Real life tests reveal that vc4's firmware statement holds up, and - * users are able to use pixel clocks closer to HSM's, namely for - * 1920x1200@60Hz. So it was decided to have leave a 1% margin between - * both clocks. Which, for RPi0-3 implies a maximum pixel clock of - * 162MHz. - * - * Additionally, the AXI clock needs to be at least 25% of - * pixel clock, but HSM ends up being the limiting factor. - */ - if (mode->clock > HSM_CLOCK_FREQ / (1000 * 101 / 100)) + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + + if ((mode->clock * 1000) > vc4_hdmi->variant->max_pixel_clock) return MODE_CLOCK_HIGH; return MODE_OK; @@ -701,34 +778,54 @@ static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = { .enable = vc4_hdmi_encoder_enable, }; +static u32 vc4_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask) +{ + int i; + u32 channel_map = 0; + + for (i = 0; i < 8; i++) { + if (channel_mask & BIT(i)) + channel_map |= i << (3 * i); + } + return channel_map; +} + +static u32 vc5_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask) +{ + int i; + u32 channel_map = 0; + + for (i = 0; i < 8; i++) { + if (channel_mask & BIT(i)) + channel_map |= i << (4 * i); + } + return channel_map; +} + /* HDMI audio codec callbacks */ -static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *hdmi) +static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi) { - struct drm_device *drm = hdmi->encoder->dev; - struct vc4_dev *vc4 = to_vc4_dev(drm); - u32 hsm_clock = clk_get_rate(hdmi->hsm_clock); + u32 hsm_clock = clk_get_rate(vc4_hdmi->audio_clock); unsigned long n, m; - rational_best_approximation(hsm_clock, hdmi->audio.samplerate, + rational_best_approximation(hsm_clock, vc4_hdmi->audio.samplerate, VC4_HD_MAI_SMP_N_MASK >> VC4_HD_MAI_SMP_N_SHIFT, (VC4_HD_MAI_SMP_M_MASK >> VC4_HD_MAI_SMP_M_SHIFT) + 1, &n, &m); - HD_WRITE(VC4_HD_MAI_SMP, - VC4_SET_FIELD(n, VC4_HD_MAI_SMP_N) | - VC4_SET_FIELD(m - 1, VC4_HD_MAI_SMP_M)); + HDMI_WRITE(HDMI_MAI_SMP, + VC4_SET_FIELD(n, VC4_HD_MAI_SMP_N) | + VC4_SET_FIELD(m - 1, VC4_HD_MAI_SMP_M)); } -static void vc4_hdmi_set_n_cts(struct vc4_hdmi *hdmi) +static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi) { - struct drm_encoder *encoder = hdmi->encoder; + struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; struct drm_crtc *crtc = encoder->crtc; - struct drm_device *drm = encoder->dev; - struct vc4_dev *vc4 = to_vc4_dev(drm); const struct drm_display_mode *mode = &crtc->state->adjusted_mode; - u32 samplerate = hdmi->audio.samplerate; + u32 samplerate = vc4_hdmi->audio.samplerate; u32 n, cts; u64 tmp; @@ -737,7 +834,7 @@ static void vc4_hdmi_set_n_cts(struct vc4_hdmi *hdmi) do_div(tmp, 128 * samplerate); cts = tmp; - HDMI_WRITE(VC4_HDMI_CRP_CFG, + HDMI_WRITE(HDMI_CRP_CFG, VC4_HDMI_CRP_CFG_EXTERNAL_CTS_EN | VC4_SET_FIELD(n, VC4_HDMI_CRP_CFG_N)); @@ -746,8 +843,8 @@ static void vc4_hdmi_set_n_cts(struct vc4_hdmi *hdmi) * providing a CTS_1 value. The two CTS values are alternated * between based on the period fields */ - HDMI_WRITE(VC4_HDMI_CTS_0, cts); - HDMI_WRITE(VC4_HDMI_CTS_1, cts); + HDMI_WRITE(HDMI_CTS_0, cts); + HDMI_WRITE(HDMI_CTS_1, cts); } static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai) @@ -760,26 +857,25 @@ static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai) static int vc4_hdmi_audio_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { - struct vc4_hdmi *hdmi = dai_to_hdmi(dai); - struct drm_encoder *encoder = hdmi->encoder; - struct vc4_dev *vc4 = to_vc4_dev(encoder->dev); + struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai); + struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; + struct drm_connector *connector = &vc4_hdmi->connector; int ret; - if (hdmi->audio.substream && hdmi->audio.substream != substream) + if (vc4_hdmi->audio.substream && vc4_hdmi->audio.substream != substream) return -EINVAL; - hdmi->audio.substream = substream; + vc4_hdmi->audio.substream = substream; /* * If the HDMI encoder hasn't probed, or the encoder is * currently in DVI mode, treat the codec dai as missing. */ - if (!encoder->crtc || !(HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) & + if (!encoder->crtc || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) & VC4_HDMI_RAM_PACKET_ENABLE)) return -ENODEV; - ret = snd_pcm_hw_constraint_eld(substream->runtime, - hdmi->connector->eld); + ret = snd_pcm_hw_constraint_eld(substream->runtime, connector->eld); if (ret) return ret; @@ -791,34 +887,33 @@ static int vc4_hdmi_audio_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) return 0; } -static void vc4_hdmi_audio_reset(struct vc4_hdmi *hdmi) +static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi) { - struct drm_encoder *encoder = hdmi->encoder; - struct drm_device *drm = encoder->dev; - struct device *dev = &hdmi->pdev->dev; - struct vc4_dev *vc4 = to_vc4_dev(drm); + struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; + struct device *dev = &vc4_hdmi->pdev->dev; int ret; + vc4_hdmi->audio.streaming = false; ret = vc4_hdmi_stop_packet(encoder, HDMI_INFOFRAME_TYPE_AUDIO); if (ret) dev_err(dev, "Failed to stop audio infoframe: %d\n", ret); - HD_WRITE(VC4_HD_MAI_CTL, VC4_HD_MAI_CTL_RESET); - HD_WRITE(VC4_HD_MAI_CTL, VC4_HD_MAI_CTL_ERRORF); - HD_WRITE(VC4_HD_MAI_CTL, VC4_HD_MAI_CTL_FLUSH); + HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_RESET); + HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_ERRORF); + HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_FLUSH); } static void vc4_hdmi_audio_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { - struct vc4_hdmi *hdmi = dai_to_hdmi(dai); + struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai); - if (substream != hdmi->audio.substream) + if (substream != vc4_hdmi->audio.substream) return; - vc4_hdmi_audio_reset(hdmi); + vc4_hdmi_audio_reset(vc4_hdmi); - hdmi->audio.substream = NULL; + vc4_hdmi->audio.substream = NULL; } /* HDMI audio codec callbacks */ @@ -826,72 +921,65 @@ static int vc4_hdmi_audio_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { - struct vc4_hdmi *hdmi = dai_to_hdmi(dai); - struct drm_encoder *encoder = hdmi->encoder; - struct drm_device *drm = encoder->dev; - struct device *dev = &hdmi->pdev->dev; - struct vc4_dev *vc4 = to_vc4_dev(drm); + struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai); + struct device *dev = &vc4_hdmi->pdev->dev; u32 audio_packet_config, channel_mask; - u32 channel_map, i; + u32 channel_map; - if (substream != hdmi->audio.substream) + if (substream != vc4_hdmi->audio.substream) return -EINVAL; dev_dbg(dev, "%s: %u Hz, %d bit, %d channels\n", __func__, params_rate(params), params_width(params), params_channels(params)); - hdmi->audio.channels = params_channels(params); - hdmi->audio.samplerate = params_rate(params); + vc4_hdmi->audio.channels = params_channels(params); + vc4_hdmi->audio.samplerate = params_rate(params); - HD_WRITE(VC4_HD_MAI_CTL, - VC4_HD_MAI_CTL_RESET | - VC4_HD_MAI_CTL_FLUSH | - VC4_HD_MAI_CTL_DLATE | - VC4_HD_MAI_CTL_ERRORE | - VC4_HD_MAI_CTL_ERRORF); + HDMI_WRITE(HDMI_MAI_CTL, + VC4_HD_MAI_CTL_RESET | + VC4_HD_MAI_CTL_FLUSH | + VC4_HD_MAI_CTL_DLATE | + VC4_HD_MAI_CTL_ERRORE | + VC4_HD_MAI_CTL_ERRORF); - vc4_hdmi_audio_set_mai_clock(hdmi); + vc4_hdmi_audio_set_mai_clock(vc4_hdmi); + /* The B frame identifier should match the value used by alsa-lib (8) */ audio_packet_config = VC4_HDMI_AUDIO_PACKET_ZERO_DATA_ON_SAMPLE_FLAT | VC4_HDMI_AUDIO_PACKET_ZERO_DATA_ON_INACTIVE_CHANNELS | - VC4_SET_FIELD(0xf, VC4_HDMI_AUDIO_PACKET_B_FRAME_IDENTIFIER); + VC4_SET_FIELD(0x8, VC4_HDMI_AUDIO_PACKET_B_FRAME_IDENTIFIER); - channel_mask = GENMASK(hdmi->audio.channels - 1, 0); + channel_mask = GENMASK(vc4_hdmi->audio.channels - 1, 0); audio_packet_config |= VC4_SET_FIELD(channel_mask, VC4_HDMI_AUDIO_PACKET_CEA_MASK); /* Set the MAI threshold. This logic mimics the firmware's. */ - if (hdmi->audio.samplerate > 96000) { - HD_WRITE(VC4_HD_MAI_THR, - VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQHIGH) | - VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQLOW)); - } else if (hdmi->audio.samplerate > 48000) { - HD_WRITE(VC4_HD_MAI_THR, - VC4_SET_FIELD(0x14, VC4_HD_MAI_THR_DREQHIGH) | - VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQLOW)); + if (vc4_hdmi->audio.samplerate > 96000) { + HDMI_WRITE(HDMI_MAI_THR, + VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQHIGH) | + VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQLOW)); + } else if (vc4_hdmi->audio.samplerate > 48000) { + HDMI_WRITE(HDMI_MAI_THR, + VC4_SET_FIELD(0x14, VC4_HD_MAI_THR_DREQHIGH) | + VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQLOW)); } else { - HD_WRITE(VC4_HD_MAI_THR, - VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) | - VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) | - VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQHIGH) | - VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQLOW)); + HDMI_WRITE(HDMI_MAI_THR, + VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) | + VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) | + VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQHIGH) | + VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQLOW)); } - HDMI_WRITE(VC4_HDMI_MAI_CONFIG, + HDMI_WRITE(HDMI_MAI_CONFIG, VC4_HDMI_MAI_CONFIG_BIT_REVERSE | VC4_SET_FIELD(channel_mask, VC4_HDMI_MAI_CHANNEL_MASK)); - channel_map = 0; - for (i = 0; i < 8; i++) { - if (channel_mask & BIT(i)) - channel_map |= i << (3 * i); - } - - HDMI_WRITE(VC4_HDMI_MAI_CHANNEL_MAP, channel_map); - HDMI_WRITE(VC4_HDMI_AUDIO_PACKET_CONFIG, audio_packet_config); - vc4_hdmi_set_n_cts(hdmi); + channel_map = vc4_hdmi->variant->channel_map(vc4_hdmi, channel_mask); + HDMI_WRITE(HDMI_MAI_CHANNEL_MAP, channel_map); + HDMI_WRITE(HDMI_AUDIO_PACKET_CONFIG, audio_packet_config); + vc4_hdmi_set_n_cts(vc4_hdmi); return 0; } @@ -899,30 +987,33 @@ static int vc4_hdmi_audio_hw_params(struct snd_pcm_substream *substream, static int vc4_hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { - struct vc4_hdmi *hdmi = dai_to_hdmi(dai); - struct drm_encoder *encoder = hdmi->encoder; - struct drm_device *drm = encoder->dev; - struct vc4_dev *vc4 = to_vc4_dev(drm); + struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai); + struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; switch (cmd) { case SNDRV_PCM_TRIGGER_START: vc4_hdmi_set_audio_infoframe(encoder); - HDMI_WRITE(VC4_HDMI_TX_PHY_CTL0, - HDMI_READ(VC4_HDMI_TX_PHY_CTL0) & - ~VC4_HDMI_TX_PHY_RNG_PWRDN); - HD_WRITE(VC4_HD_MAI_CTL, - VC4_SET_FIELD(hdmi->audio.channels, - VC4_HD_MAI_CTL_CHNUM) | - VC4_HD_MAI_CTL_ENABLE); + vc4_hdmi->audio.streaming = true; + + if (vc4_hdmi->variant->phy_rng_enable) + vc4_hdmi->variant->phy_rng_enable(vc4_hdmi); + + HDMI_WRITE(HDMI_MAI_CTL, + VC4_SET_FIELD(vc4_hdmi->audio.channels, + VC4_HD_MAI_CTL_CHNUM) | + VC4_HD_MAI_CTL_ENABLE); break; case SNDRV_PCM_TRIGGER_STOP: - HD_WRITE(VC4_HD_MAI_CTL, - VC4_HD_MAI_CTL_DLATE | - VC4_HD_MAI_CTL_ERRORE | - VC4_HD_MAI_CTL_ERRORF); - HDMI_WRITE(VC4_HDMI_TX_PHY_CTL0, - HDMI_READ(VC4_HDMI_TX_PHY_CTL0) | - VC4_HDMI_TX_PHY_RNG_PWRDN); + HDMI_WRITE(HDMI_MAI_CTL, + VC4_HD_MAI_CTL_DLATE | + VC4_HD_MAI_CTL_ERRORE | + VC4_HD_MAI_CTL_ERRORF); + + if (vc4_hdmi->variant->phy_rng_disable) + vc4_hdmi->variant->phy_rng_disable(vc4_hdmi); + + vc4_hdmi->audio.streaming = false; + break; default: break; @@ -943,10 +1034,11 @@ static int vc4_hdmi_audio_eld_ctl_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct vc4_hdmi *hdmi = snd_component_to_hdmi(component); + struct vc4_hdmi *vc4_hdmi = snd_component_to_hdmi(component); + struct drm_connector *connector = &vc4_hdmi->connector; uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES; - uinfo->count = sizeof(hdmi->connector->eld); + uinfo->count = sizeof(connector->eld); return 0; } @@ -955,10 +1047,11 @@ static int vc4_hdmi_audio_eld_ctl_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct vc4_hdmi *hdmi = snd_component_to_hdmi(component); + struct vc4_hdmi *vc4_hdmi = snd_component_to_hdmi(component); + struct drm_connector *connector = &vc4_hdmi->connector; - memcpy(ucontrol->value.bytes.data, hdmi->connector->eld, - sizeof(hdmi->connector->eld)); + memcpy(ucontrol->value.bytes.data, connector->eld, + sizeof(connector->eld)); return 0; } @@ -1023,9 +1116,9 @@ static const struct snd_soc_component_driver vc4_hdmi_audio_cpu_dai_comp = { static int vc4_hdmi_audio_cpu_dai_probe(struct snd_soc_dai *dai) { - struct vc4_hdmi *hdmi = dai_to_hdmi(dai); + struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai); - snd_soc_dai_init_dma_data(dai, &hdmi->audio.dma_data, NULL); + snd_soc_dai_init_dma_data(dai, &vc4_hdmi->audio.dma_data, NULL); return 0; } @@ -1051,12 +1144,15 @@ static const struct snd_dmaengine_pcm_config pcm_conf = { .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config, }; -static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) +static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) { - struct snd_soc_dai_link *dai_link = &hdmi->audio.link; - struct snd_soc_card *card = &hdmi->audio.card; - struct device *dev = &hdmi->pdev->dev; + const struct vc4_hdmi_register *mai_data = + &vc4_hdmi->variant->registers[HDMI_MAI_DATA]; + struct snd_soc_dai_link *dai_link = &vc4_hdmi->audio.link; + struct snd_soc_card *card = &vc4_hdmi->audio.card; + struct device *dev = &vc4_hdmi->pdev->dev; const __be32 *addr; + int index; int ret; if (!of_find_property(dev->of_node, "dmas", NULL)) { @@ -1065,6 +1161,11 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) return 0; } + if (mai_data->reg != VC4_HD) { + WARN_ONCE(true, "MAI isn't in the HD block\n"); + return -EINVAL; + } + /* * Get the physical address of VC4_HD_MAI_DATA. We need to retrieve * the bus address specified in the DT, because the physical address @@ -1072,10 +1173,16 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) * for DMA transfers. * This VC/MMU should probably be exposed to avoid this kind of hacks. */ - addr = of_get_address(dev->of_node, 1, NULL, NULL); - hdmi->audio.dma_data.addr = be32_to_cpup(addr) + VC4_HD_MAI_DATA; - hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - hdmi->audio.dma_data.maxburst = 2; + index = of_property_match_string(dev->of_node, "reg-names", "hd"); + /* Before BCM2711, we don't have a named register range */ + if (index < 0) + index = 1; + + addr = of_get_address(dev->of_node, index, NULL, NULL); + + vc4_hdmi->audio.dma_data.addr = be32_to_cpup(addr) + mai_data->offset; + vc4_hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + vc4_hdmi->audio.dma_data.maxburst = 2; ret = devm_snd_dmaengine_pcm_register(dev, &pcm_conf, 0); if (ret) { @@ -1098,9 +1205,9 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) return ret; } - dai_link->cpus = &hdmi->audio.cpu; - dai_link->codecs = &hdmi->audio.codec; - dai_link->platforms = &hdmi->audio.platform; + dai_link->cpus = &vc4_hdmi->audio.cpu; + dai_link->codecs = &vc4_hdmi->audio.codec; + dai_link->platforms = &vc4_hdmi->audio.platform; dai_link->num_cpus = 1; dai_link->num_codecs = 1; @@ -1115,7 +1222,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) card->dai_link = dai_link; card->num_links = 1; - card->name = "vc4-hdmi"; + card->name = vc4_hdmi->variant->card_name; card->dev = dev; /* @@ -1125,7 +1232,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) * now stored in card->drvdata and should be retrieved with * snd_soc_card_get_drvdata() if needed. */ - snd_soc_card_set_drvdata(card, hdmi); + snd_soc_card_set_drvdata(card, vc4_hdmi); ret = devm_snd_soc_register_card(dev, card); if (ret) dev_err(dev, "Could not register sound card: %d\n", ret); @@ -1137,35 +1244,35 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) #ifdef CONFIG_DRM_VC4_HDMI_CEC static irqreturn_t vc4_cec_irq_handler_thread(int irq, void *priv) { - struct vc4_dev *vc4 = priv; - struct vc4_hdmi *hdmi = vc4->hdmi; - - if (hdmi->cec_irq_was_rx) { - if (hdmi->cec_rx_msg.len) - cec_received_msg(hdmi->cec_adap, &hdmi->cec_rx_msg); - } else if (hdmi->cec_tx_ok) { - cec_transmit_done(hdmi->cec_adap, CEC_TX_STATUS_OK, + struct vc4_hdmi *vc4_hdmi = priv; + + if (vc4_hdmi->cec_irq_was_rx) { + if (vc4_hdmi->cec_rx_msg.len) + cec_received_msg(vc4_hdmi->cec_adap, + &vc4_hdmi->cec_rx_msg); + } else if (vc4_hdmi->cec_tx_ok) { + cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_OK, 0, 0, 0, 0); } else { /* * This CEC implementation makes 1 retry, so if we * get a NACK, then that means it made 2 attempts. */ - cec_transmit_done(hdmi->cec_adap, CEC_TX_STATUS_NACK, + cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_NACK, 0, 2, 0, 0); } return IRQ_HANDLED; } -static void vc4_cec_read_msg(struct vc4_dev *vc4, u32 cntrl1) +static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1) { - struct cec_msg *msg = &vc4->hdmi->cec_rx_msg; + struct cec_msg *msg = &vc4_hdmi->cec_rx_msg; unsigned int i; msg->len = 1 + ((cntrl1 & VC4_HDMI_CEC_REC_WRD_CNT_MASK) >> VC4_HDMI_CEC_REC_WRD_CNT_SHIFT); for (i = 0; i < msg->len; i += 4) { - u32 val = HDMI_READ(VC4_HDMI_CEC_RX_DATA_1 + i); + u32 val = HDMI_READ(HDMI_CEC_RX_DATA_1 + i); msg->msg[i] = val & 0xff; msg->msg[i + 1] = (val >> 8) & 0xff; @@ -1176,38 +1283,37 @@ static void vc4_cec_read_msg(struct vc4_dev *vc4, u32 cntrl1) static irqreturn_t vc4_cec_irq_handler(int irq, void *priv) { - struct vc4_dev *vc4 = priv; - struct vc4_hdmi *hdmi = vc4->hdmi; - u32 stat = HDMI_READ(VC4_HDMI_CPU_STATUS); + struct vc4_hdmi *vc4_hdmi = priv; + u32 stat = HDMI_READ(HDMI_CEC_CPU_STATUS); u32 cntrl1, cntrl5; if (!(stat & VC4_HDMI_CPU_CEC)) return IRQ_NONE; - hdmi->cec_rx_msg.len = 0; - cntrl1 = HDMI_READ(VC4_HDMI_CEC_CNTRL_1); - cntrl5 = HDMI_READ(VC4_HDMI_CEC_CNTRL_5); - hdmi->cec_irq_was_rx = cntrl5 & VC4_HDMI_CEC_RX_CEC_INT; - if (hdmi->cec_irq_was_rx) { - vc4_cec_read_msg(vc4, cntrl1); + vc4_hdmi->cec_rx_msg.len = 0; + cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1); + cntrl5 = HDMI_READ(HDMI_CEC_CNTRL_5); + vc4_hdmi->cec_irq_was_rx = cntrl5 & VC4_HDMI_CEC_RX_CEC_INT; + if (vc4_hdmi->cec_irq_was_rx) { + vc4_cec_read_msg(vc4_hdmi, cntrl1); cntrl1 |= VC4_HDMI_CEC_CLEAR_RECEIVE_OFF; - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, cntrl1); + HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1); cntrl1 &= ~VC4_HDMI_CEC_CLEAR_RECEIVE_OFF; } else { - hdmi->cec_tx_ok = cntrl1 & VC4_HDMI_CEC_TX_STATUS_GOOD; + vc4_hdmi->cec_tx_ok = cntrl1 & VC4_HDMI_CEC_TX_STATUS_GOOD; cntrl1 &= ~VC4_HDMI_CEC_START_XMIT_BEGIN; } - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, cntrl1); - HDMI_WRITE(VC4_HDMI_CPU_CLEAR, VC4_HDMI_CPU_CEC); + HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1); + HDMI_WRITE(HDMI_CEC_CPU_CLEAR, VC4_HDMI_CPU_CEC); return IRQ_WAKE_THREAD; } static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) { - struct vc4_dev *vc4 = cec_get_drvdata(adap); + struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); /* clock period in microseconds */ const u32 usecs = 1000000 / CEC_CLOCK_FREQ; - u32 val = HDMI_READ(VC4_HDMI_CEC_CNTRL_5); + u32 val = HDMI_READ(HDMI_CEC_CNTRL_5); val &= ~(VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET | VC4_HDMI_CEC_CNT_TO_4700_US_MASK | @@ -1216,30 +1322,30 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) ((4500 / usecs) << VC4_HDMI_CEC_CNT_TO_4500_US_SHIFT); if (enable) { - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_5, val | + HDMI_WRITE(HDMI_CEC_CNTRL_5, val | VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET); - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_5, val); - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_2, - ((1500 / usecs) << VC4_HDMI_CEC_CNT_TO_1500_US_SHIFT) | - ((1300 / usecs) << VC4_HDMI_CEC_CNT_TO_1300_US_SHIFT) | - ((800 / usecs) << VC4_HDMI_CEC_CNT_TO_800_US_SHIFT) | - ((600 / usecs) << VC4_HDMI_CEC_CNT_TO_600_US_SHIFT) | - ((400 / usecs) << VC4_HDMI_CEC_CNT_TO_400_US_SHIFT)); - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_3, - ((2750 / usecs) << VC4_HDMI_CEC_CNT_TO_2750_US_SHIFT) | - ((2400 / usecs) << VC4_HDMI_CEC_CNT_TO_2400_US_SHIFT) | - ((2050 / usecs) << VC4_HDMI_CEC_CNT_TO_2050_US_SHIFT) | - ((1700 / usecs) << VC4_HDMI_CEC_CNT_TO_1700_US_SHIFT)); - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_4, - ((4300 / usecs) << VC4_HDMI_CEC_CNT_TO_4300_US_SHIFT) | - ((3900 / usecs) << VC4_HDMI_CEC_CNT_TO_3900_US_SHIFT) | - ((3600 / usecs) << VC4_HDMI_CEC_CNT_TO_3600_US_SHIFT) | - ((3500 / usecs) << VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT)); - - HDMI_WRITE(VC4_HDMI_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC); + HDMI_WRITE(HDMI_CEC_CNTRL_5, val); + HDMI_WRITE(HDMI_CEC_CNTRL_2, + ((1500 / usecs) << VC4_HDMI_CEC_CNT_TO_1500_US_SHIFT) | + ((1300 / usecs) << VC4_HDMI_CEC_CNT_TO_1300_US_SHIFT) | + ((800 / usecs) << VC4_HDMI_CEC_CNT_TO_800_US_SHIFT) | + ((600 / usecs) << VC4_HDMI_CEC_CNT_TO_600_US_SHIFT) | + ((400 / usecs) << VC4_HDMI_CEC_CNT_TO_400_US_SHIFT)); + HDMI_WRITE(HDMI_CEC_CNTRL_3, + ((2750 / usecs) << VC4_HDMI_CEC_CNT_TO_2750_US_SHIFT) | + ((2400 / usecs) << VC4_HDMI_CEC_CNT_TO_2400_US_SHIFT) | + ((2050 / usecs) << VC4_HDMI_CEC_CNT_TO_2050_US_SHIFT) | + ((1700 / usecs) << VC4_HDMI_CEC_CNT_TO_1700_US_SHIFT)); + HDMI_WRITE(HDMI_CEC_CNTRL_4, + ((4300 / usecs) << VC4_HDMI_CEC_CNT_TO_4300_US_SHIFT) | + ((3900 / usecs) << VC4_HDMI_CEC_CNT_TO_3900_US_SHIFT) | + ((3600 / usecs) << VC4_HDMI_CEC_CNT_TO_3600_US_SHIFT) | + ((3500 / usecs) << VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT)); + + HDMI_WRITE(HDMI_CEC_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC); } else { - HDMI_WRITE(VC4_HDMI_CPU_MASK_SET, VC4_HDMI_CPU_CEC); - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_5, val | + HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC); + HDMI_WRITE(HDMI_CEC_CNTRL_5, val | VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET); } return 0; @@ -1247,10 +1353,10 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) { - struct vc4_dev *vc4 = cec_get_drvdata(adap); + struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, - (HDMI_READ(VC4_HDMI_CEC_CNTRL_1) & ~VC4_HDMI_CEC_ADDR_MASK) | + HDMI_WRITE(HDMI_CEC_CNTRL_1, + (HDMI_READ(HDMI_CEC_CNTRL_1) & ~VC4_HDMI_CEC_ADDR_MASK) | (log_addr & 0xf) << VC4_HDMI_CEC_ADDR_SHIFT); return 0; } @@ -1258,25 +1364,25 @@ static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, u32 signal_free_time, struct cec_msg *msg) { - struct vc4_dev *vc4 = cec_get_drvdata(adap); + struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); u32 val; unsigned int i; for (i = 0; i < msg->len; i += 4) - HDMI_WRITE(VC4_HDMI_CEC_TX_DATA_1 + i, + HDMI_WRITE(HDMI_CEC_TX_DATA_1 + i, (msg->msg[i]) | (msg->msg[i + 1] << 8) | (msg->msg[i + 2] << 16) | (msg->msg[i + 3] << 24)); - val = HDMI_READ(VC4_HDMI_CEC_CNTRL_1); + val = HDMI_READ(HDMI_CEC_CNTRL_1); val &= ~VC4_HDMI_CEC_START_XMIT_BEGIN; - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, val); + HDMI_WRITE(HDMI_CEC_CNTRL_1, val); val &= ~VC4_HDMI_CEC_MESSAGE_LENGTH_MASK; val |= (msg->len - 1) << VC4_HDMI_CEC_MESSAGE_LENGTH_SHIFT; val |= VC4_HDMI_CEC_START_XMIT_BEGIN; - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, val); + HDMI_WRITE(HDMI_CEC_CNTRL_1, val); return 0; } @@ -1285,61 +1391,275 @@ static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = { .adap_log_addr = vc4_hdmi_cec_adap_log_addr, .adap_transmit = vc4_hdmi_cec_adap_transmit, }; -#endif -static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) +static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) { -#ifdef CONFIG_DRM_VC4_HDMI_CEC struct cec_connector_info conn_info; -#endif - struct platform_device *pdev = to_platform_device(dev); - struct drm_device *drm = dev_get_drvdata(master); - struct vc4_dev *vc4 = drm->dev_private; - struct vc4_hdmi *hdmi; - struct vc4_hdmi_encoder *vc4_hdmi_encoder; - struct device_node *ddc_node; + struct platform_device *pdev = vc4_hdmi->pdev; u32 value; int ret; - hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); - if (!hdmi) + if (!vc4_hdmi->variant->cec_available) + return 0; + + vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops, + vc4_hdmi, "vc4", + CEC_CAP_DEFAULTS | + CEC_CAP_CONNECTOR_INFO, 1); + ret = PTR_ERR_OR_ZERO(vc4_hdmi->cec_adap); + if (ret < 0) + return ret; + + cec_fill_conn_info_from_drm(&conn_info, &vc4_hdmi->connector); + cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info); + + HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff); + value = HDMI_READ(HDMI_CEC_CNTRL_1); + value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK; + /* + * Set the logical address to Unregistered and set the clock + * divider: the hsm_clock rate and this divider setting will + * give a 40 kHz CEC clock. + */ + value |= VC4_HDMI_CEC_ADDR_MASK | + (4091 << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT); + HDMI_WRITE(HDMI_CEC_CNTRL_1, value); + ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0), + vc4_cec_irq_handler, + vc4_cec_irq_handler_thread, 0, + "vc4 hdmi cec", vc4_hdmi); + if (ret) + goto err_delete_cec_adap; + + ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev); + if (ret < 0) + goto err_delete_cec_adap; + + return 0; + +err_delete_cec_adap: + cec_delete_adapter(vc4_hdmi->cec_adap); + + return ret; +} + +static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi) +{ + cec_unregister_adapter(vc4_hdmi->cec_adap); +} +#else +static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) +{ + return 0; +} + +static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi) {}; + +#endif + +static int vc4_hdmi_build_regset(struct vc4_hdmi *vc4_hdmi, + struct debugfs_regset32 *regset, + enum vc4_hdmi_regs reg) +{ + const struct vc4_hdmi_variant *variant = vc4_hdmi->variant; + struct debugfs_reg32 *regs, *new_regs; + unsigned int count = 0; + unsigned int i; + + regs = kcalloc(variant->num_registers, sizeof(*regs), + GFP_KERNEL); + if (!regs) return -ENOMEM; - vc4_hdmi_encoder = devm_kzalloc(dev, sizeof(*vc4_hdmi_encoder), - GFP_KERNEL); - if (!vc4_hdmi_encoder) + for (i = 0; i < variant->num_registers; i++) { + const struct vc4_hdmi_register *field = &variant->registers[i]; + + if (field->reg != reg) + continue; + + regs[count].name = field->name; + regs[count].offset = field->offset; + count++; + } + + new_regs = krealloc(regs, count * sizeof(*regs), GFP_KERNEL); + if (!new_regs) return -ENOMEM; - vc4_hdmi_encoder->base.type = VC4_ENCODER_TYPE_HDMI; - hdmi->encoder = &vc4_hdmi_encoder->base.base; - - hdmi->pdev = pdev; - hdmi->hdmicore_regs = vc4_ioremap_regs(pdev, 0); - if (IS_ERR(hdmi->hdmicore_regs)) - return PTR_ERR(hdmi->hdmicore_regs); - - hdmi->hd_regs = vc4_ioremap_regs(pdev, 1); - if (IS_ERR(hdmi->hd_regs)) - return PTR_ERR(hdmi->hd_regs); - - hdmi->hdmi_regset.base = hdmi->hdmicore_regs; - hdmi->hdmi_regset.regs = hdmi_regs; - hdmi->hdmi_regset.nregs = ARRAY_SIZE(hdmi_regs); - hdmi->hd_regset.base = hdmi->hd_regs; - hdmi->hd_regset.regs = hd_regs; - hdmi->hd_regset.nregs = ARRAY_SIZE(hd_regs); - - hdmi->pixel_clock = devm_clk_get(dev, "pixel"); - if (IS_ERR(hdmi->pixel_clock)) { - ret = PTR_ERR(hdmi->pixel_clock); + + regset->base = __vc4_hdmi_get_field_base(vc4_hdmi, reg); + regset->regs = new_regs; + regset->nregs = count; + + return 0; +} + +static int vc4_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi) +{ + struct platform_device *pdev = vc4_hdmi->pdev; + struct device *dev = &pdev->dev; + int ret; + + vc4_hdmi->hdmicore_regs = vc4_ioremap_regs(pdev, 0); + if (IS_ERR(vc4_hdmi->hdmicore_regs)) + return PTR_ERR(vc4_hdmi->hdmicore_regs); + + vc4_hdmi->hd_regs = vc4_ioremap_regs(pdev, 1); + if (IS_ERR(vc4_hdmi->hd_regs)) + return PTR_ERR(vc4_hdmi->hd_regs); + + ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hd_regset, VC4_HD); + if (ret) + return ret; + + ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hdmi_regset, VC4_HDMI); + if (ret) + return ret; + + vc4_hdmi->pixel_clock = devm_clk_get(dev, "pixel"); + if (IS_ERR(vc4_hdmi->pixel_clock)) { + ret = PTR_ERR(vc4_hdmi->pixel_clock); if (ret != -EPROBE_DEFER) DRM_ERROR("Failed to get pixel clock\n"); return ret; } - hdmi->hsm_clock = devm_clk_get(dev, "hdmi"); - if (IS_ERR(hdmi->hsm_clock)) { + + vc4_hdmi->hsm_clock = devm_clk_get(dev, "hdmi"); + if (IS_ERR(vc4_hdmi->hsm_clock)) { DRM_ERROR("Failed to get HDMI state machine clock\n"); - return PTR_ERR(hdmi->hsm_clock); + return PTR_ERR(vc4_hdmi->hsm_clock); } + vc4_hdmi->audio_clock = vc4_hdmi->hsm_clock; + + return 0; +} + +static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi) +{ + struct platform_device *pdev = vc4_hdmi->pdev; + struct device *dev = &pdev->dev; + struct resource *res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi"); + if (!res) + return -ENODEV; + + vc4_hdmi->hdmicore_regs = devm_ioremap(dev, res->start, + resource_size(res)); + if (!vc4_hdmi->hdmicore_regs) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hd"); + if (!res) + return -ENODEV; + + vc4_hdmi->hd_regs = devm_ioremap(dev, res->start, resource_size(res)); + if (!vc4_hdmi->hd_regs) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cec"); + if (!res) + return -ENODEV; + + vc4_hdmi->cec_regs = devm_ioremap(dev, res->start, resource_size(res)); + if (!vc4_hdmi->cec_regs) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csc"); + if (!res) + return -ENODEV; + + vc4_hdmi->csc_regs = devm_ioremap(dev, res->start, resource_size(res)); + if (!vc4_hdmi->csc_regs) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dvp"); + if (!res) + return -ENODEV; + + vc4_hdmi->dvp_regs = devm_ioremap(dev, res->start, resource_size(res)); + if (!vc4_hdmi->dvp_regs) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); + if (!res) + return -ENODEV; + + vc4_hdmi->phy_regs = devm_ioremap(dev, res->start, resource_size(res)); + if (!vc4_hdmi->phy_regs) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "packet"); + if (!res) + return -ENODEV; + + vc4_hdmi->ram_regs = devm_ioremap(dev, res->start, resource_size(res)); + if (!vc4_hdmi->ram_regs) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rm"); + if (!res) + return -ENODEV; + + vc4_hdmi->rm_regs = devm_ioremap(dev, res->start, resource_size(res)); + if (!vc4_hdmi->rm_regs) + return -ENOMEM; + + vc4_hdmi->hsm_clock = devm_clk_get(dev, "hdmi"); + if (IS_ERR(vc4_hdmi->hsm_clock)) { + DRM_ERROR("Failed to get HDMI state machine clock\n"); + return PTR_ERR(vc4_hdmi->hsm_clock); + } + + vc4_hdmi->pixel_bvb_clock = devm_clk_get(dev, "bvb"); + if (IS_ERR(vc4_hdmi->pixel_bvb_clock)) { + DRM_ERROR("Failed to get pixel bvb clock\n"); + return PTR_ERR(vc4_hdmi->pixel_bvb_clock); + } + + vc4_hdmi->audio_clock = devm_clk_get(dev, "audio"); + if (IS_ERR(vc4_hdmi->audio_clock)) { + DRM_ERROR("Failed to get audio clock\n"); + return PTR_ERR(vc4_hdmi->audio_clock); + } + + vc4_hdmi->reset = devm_reset_control_get(dev, NULL); + if (IS_ERR(vc4_hdmi->reset)) { + DRM_ERROR("Failed to get HDMI reset line\n"); + return PTR_ERR(vc4_hdmi->reset); + } + + return 0; +} + +static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) +{ + const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev); + struct platform_device *pdev = to_platform_device(dev); + struct drm_device *drm = dev_get_drvdata(master); + struct vc4_hdmi *vc4_hdmi; + struct drm_encoder *encoder; + struct device_node *ddc_node; + u32 value; + int ret; + + vc4_hdmi = devm_kzalloc(dev, sizeof(*vc4_hdmi), GFP_KERNEL); + if (!vc4_hdmi) + return -ENOMEM; + + dev_set_drvdata(dev, vc4_hdmi); + encoder = &vc4_hdmi->encoder.base.base; + vc4_hdmi->encoder.base.type = variant->encoder_type; + vc4_hdmi->encoder.base.pre_crtc_configure = vc4_hdmi_encoder_pre_crtc_configure; + vc4_hdmi->encoder.base.pre_crtc_enable = vc4_hdmi_encoder_pre_crtc_enable; + vc4_hdmi->encoder.base.post_crtc_enable = vc4_hdmi_encoder_post_crtc_enable; + vc4_hdmi->encoder.base.post_crtc_disable = vc4_hdmi_encoder_post_crtc_disable; + vc4_hdmi->encoder.base.post_crtc_powerdown = vc4_hdmi_encoder_post_crtc_powerdown; + vc4_hdmi->pdev = pdev; + vc4_hdmi->variant = variant; + + ret = variant->init_resources(vc4_hdmi); + if (ret) + return ret; ddc_node = of_parse_phandle(dev->of_node, "ddc", 0); if (!ddc_node) { @@ -1347,123 +1667,62 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) return -ENODEV; } - hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node); + vc4_hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node); of_node_put(ddc_node); - if (!hdmi->ddc) { + if (!vc4_hdmi->ddc) { DRM_DEBUG("Failed to get ddc i2c adapter by node\n"); return -EPROBE_DEFER; } - /* This is the rate that is set by the firmware. The number - * needs to be a bit higher than the pixel clock rate - * (generally 148.5Mhz). - */ - ret = clk_set_rate(hdmi->hsm_clock, HSM_CLOCK_FREQ); - if (ret) { - DRM_ERROR("Failed to set HSM clock rate: %d\n", ret); - goto err_put_i2c; - } - - ret = clk_prepare_enable(hdmi->hsm_clock); - if (ret) { - DRM_ERROR("Failed to turn on HDMI state machine clock: %d\n", - ret); - goto err_put_i2c; - } - /* Only use the GPIO HPD pin if present in the DT, otherwise * we'll use the HDMI core's register. */ if (of_find_property(dev->of_node, "hpd-gpios", &value)) { enum of_gpio_flags hpd_gpio_flags; - hdmi->hpd_gpio = of_get_named_gpio_flags(dev->of_node, - "hpd-gpios", 0, - &hpd_gpio_flags); - if (hdmi->hpd_gpio < 0) { - ret = hdmi->hpd_gpio; + vc4_hdmi->hpd_gpio = of_get_named_gpio_flags(dev->of_node, + "hpd-gpios", 0, + &hpd_gpio_flags); + if (vc4_hdmi->hpd_gpio < 0) { + ret = vc4_hdmi->hpd_gpio; goto err_unprepare_hsm; } - hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW; + vc4_hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW; } - vc4->hdmi = hdmi; - - /* HDMI core must be enabled. */ - if (!(HD_READ(VC4_HD_M_CTL) & VC4_HD_M_ENABLE)) { - HD_WRITE(VC4_HD_M_CTL, VC4_HD_M_SW_RST); - udelay(1); - HD_WRITE(VC4_HD_M_CTL, 0); - - HD_WRITE(VC4_HD_M_CTL, VC4_HD_M_ENABLE); - } pm_runtime_enable(dev); - drm_simple_encoder_init(drm, hdmi->encoder, DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs); + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(encoder, &vc4_hdmi_encoder_helper_funcs); - hdmi->connector = - vc4_hdmi_connector_init(drm, hdmi->encoder, hdmi->ddc); - if (IS_ERR(hdmi->connector)) { - ret = PTR_ERR(hdmi->connector); + ret = vc4_hdmi_connector_init(drm, vc4_hdmi); + if (ret) goto err_destroy_encoder; - } -#ifdef CONFIG_DRM_VC4_HDMI_CEC - hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops, - vc4, "vc4", - CEC_CAP_DEFAULTS | - CEC_CAP_CONNECTOR_INFO, 1); - ret = PTR_ERR_OR_ZERO(hdmi->cec_adap); - if (ret < 0) - goto err_destroy_conn; - cec_fill_conn_info_from_drm(&conn_info, hdmi->connector); - cec_s_conn_info(hdmi->cec_adap, &conn_info); - - HDMI_WRITE(VC4_HDMI_CPU_MASK_SET, 0xffffffff); - value = HDMI_READ(VC4_HDMI_CEC_CNTRL_1); - value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK; - /* - * Set the logical address to Unregistered and set the clock - * divider: the hsm_clock rate and this divider setting will - * give a 40 kHz CEC clock. - */ - value |= VC4_HDMI_CEC_ADDR_MASK | - (4091 << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT); - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, value); - ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), - vc4_cec_irq_handler, - vc4_cec_irq_handler_thread, 0, - "vc4 hdmi cec", vc4); + ret = vc4_hdmi_cec_init(vc4_hdmi); if (ret) - goto err_delete_cec_adap; - ret = cec_register_adapter(hdmi->cec_adap, dev); - if (ret < 0) - goto err_delete_cec_adap; -#endif + goto err_destroy_conn; - ret = vc4_hdmi_audio_init(hdmi); + ret = vc4_hdmi_audio_init(vc4_hdmi); if (ret) - goto err_destroy_encoder; + goto err_free_cec; - vc4_debugfs_add_file(drm, "hdmi_regs", vc4_hdmi_debugfs_regs, hdmi); + vc4_debugfs_add_file(drm, variant->debugfs_name, + vc4_hdmi_debugfs_regs, + vc4_hdmi); return 0; -#ifdef CONFIG_DRM_VC4_HDMI_CEC -err_delete_cec_adap: - cec_delete_adapter(hdmi->cec_adap); +err_free_cec: + vc4_hdmi_cec_exit(vc4_hdmi); err_destroy_conn: - vc4_hdmi_connector_destroy(hdmi->connector); -#endif + vc4_hdmi_connector_destroy(&vc4_hdmi->connector); err_destroy_encoder: - drm_encoder_cleanup(hdmi->encoder); + drm_encoder_cleanup(encoder); err_unprepare_hsm: - clk_disable_unprepare(hdmi->hsm_clock); pm_runtime_disable(dev); -err_put_i2c: - put_device(&hdmi->ddc->dev); + put_device(&vc4_hdmi->ddc->dev); return ret; } @@ -1471,20 +1730,39 @@ err_put_i2c: static void vc4_hdmi_unbind(struct device *dev, struct device *master, void *data) { - struct drm_device *drm = dev_get_drvdata(master); - struct vc4_dev *vc4 = drm->dev_private; - struct vc4_hdmi *hdmi = vc4->hdmi; + struct vc4_hdmi *vc4_hdmi; - cec_unregister_adapter(hdmi->cec_adap); - vc4_hdmi_connector_destroy(hdmi->connector); - drm_encoder_cleanup(hdmi->encoder); + /* + * ASoC makes it a bit hard to retrieve a pointer to the + * vc4_hdmi structure. Registering the card will overwrite our + * device drvdata with a pointer to the snd_soc_card structure, + * which can then be used to retrieve whatever drvdata we want + * to associate. + * + * However, that doesn't fly in the case where we wouldn't + * register an ASoC card (because of an old DT that is missing + * the dmas properties for example), then the card isn't + * registered and the device drvdata wouldn't be set. + * + * We can deal with both cases by making sure a snd_soc_card + * pointer and a vc4_hdmi structure are pointing to the same + * memory address, so we can treat them indistinctly without any + * issue. + */ + BUILD_BUG_ON(offsetof(struct vc4_hdmi_audio, card) != 0); + BUILD_BUG_ON(offsetof(struct vc4_hdmi, audio) != 0); + vc4_hdmi = dev_get_drvdata(dev); - clk_disable_unprepare(hdmi->hsm_clock); - pm_runtime_disable(dev); + kfree(vc4_hdmi->hdmi_regset.regs); + kfree(vc4_hdmi->hd_regset.regs); - put_device(&hdmi->ddc->dev); + vc4_hdmi_cec_exit(vc4_hdmi); + vc4_hdmi_connector_destroy(&vc4_hdmi->connector); + drm_encoder_cleanup(&vc4_hdmi->encoder.base.base); + + pm_runtime_disable(dev); - vc4->hdmi = NULL; + put_device(&vc4_hdmi->ddc->dev); } static const struct component_ops vc4_hdmi_ops = { @@ -1503,8 +1781,80 @@ static int vc4_hdmi_dev_remove(struct platform_device *pdev) return 0; } +static const struct vc4_hdmi_variant bcm2835_variant = { + .encoder_type = VC4_ENCODER_TYPE_HDMI0, + .debugfs_name = "hdmi_regs", + .card_name = "vc4-hdmi", + .max_pixel_clock = 162000000, + .cec_available = true, + .registers = vc4_hdmi_fields, + .num_registers = ARRAY_SIZE(vc4_hdmi_fields), + + .init_resources = vc4_hdmi_init_resources, + .csc_setup = vc4_hdmi_csc_setup, + .reset = vc4_hdmi_reset, + .set_timings = vc4_hdmi_set_timings, + .phy_init = vc4_hdmi_phy_init, + .phy_disable = vc4_hdmi_phy_disable, + .phy_rng_enable = vc4_hdmi_phy_rng_enable, + .phy_rng_disable = vc4_hdmi_phy_rng_disable, + .channel_map = vc4_hdmi_channel_map, +}; + +static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = { + .encoder_type = VC4_ENCODER_TYPE_HDMI0, + .debugfs_name = "hdmi0_regs", + .card_name = "vc4-hdmi-0", + .max_pixel_clock = 297000000, + .registers = vc5_hdmi_hdmi0_fields, + .num_registers = ARRAY_SIZE(vc5_hdmi_hdmi0_fields), + .phy_lane_mapping = { + PHY_LANE_0, + PHY_LANE_1, + PHY_LANE_2, + PHY_LANE_CK, + }, + + .init_resources = vc5_hdmi_init_resources, + .csc_setup = vc5_hdmi_csc_setup, + .reset = vc5_hdmi_reset, + .set_timings = vc5_hdmi_set_timings, + .phy_init = vc5_hdmi_phy_init, + .phy_disable = vc5_hdmi_phy_disable, + .phy_rng_enable = vc5_hdmi_phy_rng_enable, + .phy_rng_disable = vc5_hdmi_phy_rng_disable, + .channel_map = vc5_hdmi_channel_map, +}; + +static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = { + .encoder_type = VC4_ENCODER_TYPE_HDMI1, + .debugfs_name = "hdmi1_regs", + .card_name = "vc4-hdmi-1", + .max_pixel_clock = 297000000, + .registers = vc5_hdmi_hdmi1_fields, + .num_registers = ARRAY_SIZE(vc5_hdmi_hdmi1_fields), + .phy_lane_mapping = { + PHY_LANE_1, + PHY_LANE_0, + PHY_LANE_CK, + PHY_LANE_2, + }, + + .init_resources = vc5_hdmi_init_resources, + .csc_setup = vc5_hdmi_csc_setup, + .reset = vc5_hdmi_reset, + .set_timings = vc5_hdmi_set_timings, + .phy_init = vc5_hdmi_phy_init, + .phy_disable = vc5_hdmi_phy_disable, + .phy_rng_enable = vc5_hdmi_phy_rng_enable, + .phy_rng_disable = vc5_hdmi_phy_rng_disable, + .channel_map = vc5_hdmi_channel_map, +}; + static const struct of_device_id vc4_hdmi_dt_match[] = { - { .compatible = "brcm,bcm2835-hdmi" }, + { .compatible = "brcm,bcm2835-hdmi", .data = &bcm2835_variant }, + { .compatible = "brcm,bcm2711-hdmi0", .data = &bcm2711_hdmi0_variant }, + { .compatible = "brcm,bcm2711-hdmi1", .data = &bcm2711_hdmi1_variant }, {} }; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h new file mode 100644 index 000000000000..63c6f8bddf1d --- /dev/null +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -0,0 +1,184 @@ +#ifndef _VC4_HDMI_H_ +#define _VC4_HDMI_H_ + +#include <drm/drm_connector.h> +#include <media/cec.h> +#include <sound/dmaengine_pcm.h> +#include <sound/soc.h> + +#include "vc4_drv.h" + +/* VC4 HDMI encoder KMS struct */ +struct vc4_hdmi_encoder { + struct vc4_encoder base; + bool hdmi_monitor; + bool limited_rgb_range; +}; + +static inline struct vc4_hdmi_encoder * +to_vc4_hdmi_encoder(struct drm_encoder *encoder) +{ + return container_of(encoder, struct vc4_hdmi_encoder, base.base); +} + +struct drm_display_mode; + +struct vc4_hdmi; +struct vc4_hdmi_register; + +enum vc4_hdmi_phy_channel { + PHY_LANE_0 = 0, + PHY_LANE_1, + PHY_LANE_2, + PHY_LANE_CK, +}; + +struct vc4_hdmi_variant { + /* Encoder Type for that controller */ + enum vc4_encoder_type encoder_type; + + /* ALSA card name */ + const char *card_name; + + /* Filename to expose the registers in debugfs */ + const char *debugfs_name; + + /* Set to true when the CEC support is available */ + bool cec_available; + + /* Maximum pixel clock supported by the controller (in Hz) */ + unsigned long long max_pixel_clock; + + /* List of the registers available on that variant */ + const struct vc4_hdmi_register *registers; + + /* Number of registers on that variant */ + unsigned int num_registers; + + /* BCM2711 Only. + * The variants don't map the lane in the same order in the + * PHY, so this is an array mapping the HDMI channel (index) + * to the PHY lane (value). + */ + enum vc4_hdmi_phy_channel phy_lane_mapping[4]; + + /* Callback to get the resources (memory region, interrupts, + * clocks, etc) for that variant. + */ + int (*init_resources)(struct vc4_hdmi *vc4_hdmi); + + /* Callback to reset the HDMI block */ + void (*reset)(struct vc4_hdmi *vc4_hdmi); + + /* Callback to enable / disable the CSC */ + void (*csc_setup)(struct vc4_hdmi *vc4_hdmi, bool enable); + + /* Callback to configure the video timings in the HDMI block */ + void (*set_timings)(struct vc4_hdmi *vc4_hdmi, + struct drm_display_mode *mode); + + /* Callback to initialize the PHY according to the mode */ + void (*phy_init)(struct vc4_hdmi *vc4_hdmi, + struct drm_display_mode *mode); + + /* Callback to disable the PHY */ + void (*phy_disable)(struct vc4_hdmi *vc4_hdmi); + + /* Callback to enable the RNG in the PHY */ + void (*phy_rng_enable)(struct vc4_hdmi *vc4_hdmi); + + /* Callback to disable the RNG in the PHY */ + void (*phy_rng_disable)(struct vc4_hdmi *vc4_hdmi); + + /* Callback to get channel map */ + u32 (*channel_map)(struct vc4_hdmi *vc4_hdmi, u32 channel_mask); +}; + +/* HDMI audio information */ +struct vc4_hdmi_audio { + struct snd_soc_card card; + struct snd_soc_dai_link link; + struct snd_soc_dai_link_component cpu; + struct snd_soc_dai_link_component codec; + struct snd_soc_dai_link_component platform; + int samplerate; + int channels; + struct snd_dmaengine_dai_dma_data dma_data; + struct snd_pcm_substream *substream; + + bool streaming; +}; + +/* General HDMI hardware state. */ +struct vc4_hdmi { + struct vc4_hdmi_audio audio; + + struct platform_device *pdev; + const struct vc4_hdmi_variant *variant; + + struct vc4_hdmi_encoder encoder; + struct drm_connector connector; + + struct i2c_adapter *ddc; + void __iomem *hdmicore_regs; + void __iomem *hd_regs; + + /* VC5 Only */ + void __iomem *cec_regs; + /* VC5 Only */ + void __iomem *csc_regs; + /* VC5 Only */ + void __iomem *dvp_regs; + /* VC5 Only */ + void __iomem *phy_regs; + /* VC5 Only */ + void __iomem *ram_regs; + /* VC5 Only */ + void __iomem *rm_regs; + + int hpd_gpio; + bool hpd_active_low; + + struct cec_adapter *cec_adap; + struct cec_msg cec_rx_msg; + bool cec_tx_ok; + bool cec_irq_was_rx; + + struct clk *pixel_clock; + struct clk *hsm_clock; + struct clk *audio_clock; + struct clk *pixel_bvb_clock; + + struct reset_control *reset; + + struct debugfs_regset32 hdmi_regset; + struct debugfs_regset32 hd_regset; +}; + +static inline struct vc4_hdmi * +connector_to_vc4_hdmi(struct drm_connector *connector) +{ + return container_of(connector, struct vc4_hdmi, connector); +} + +static inline struct vc4_hdmi * +encoder_to_vc4_hdmi(struct drm_encoder *encoder) +{ + struct vc4_hdmi_encoder *_encoder = to_vc4_hdmi_encoder(encoder); + + return container_of(_encoder, struct vc4_hdmi, encoder); +} + +void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, + struct drm_display_mode *mode); +void vc4_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi); +void vc4_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi); +void vc4_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi); + +void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, + struct drm_display_mode *mode); +void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi); +void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi); +void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi); + +#endif /* _VC4_HDMI_H_ */ diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c new file mode 100644 index 000000000000..057796b54c51 --- /dev/null +++ b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c @@ -0,0 +1,521 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015 Broadcom + * Copyright (c) 2014 The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark <robdclark@gmail.com> + */ + +#include "vc4_hdmi.h" +#include "vc4_regs.h" +#include "vc4_hdmi_regs.h" + +#define VC4_HDMI_TX_PHY_RESET_CTL_PLL_RESETB BIT(5) +#define VC4_HDMI_TX_PHY_RESET_CTL_PLLDIV_RESETB BIT(4) +#define VC4_HDMI_TX_PHY_RESET_CTL_TX_CK_RESET BIT(3) +#define VC4_HDMI_TX_PHY_RESET_CTL_TX_2_RESET BIT(2) +#define VC4_HDMI_TX_PHY_RESET_CTL_TX_1_RESET BIT(1) +#define VC4_HDMI_TX_PHY_RESET_CTL_TX_0_RESET BIT(0) + +#define VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN BIT(4) + +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_PREEMP_SHIFT 29 +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_PREEMP_MASK VC4_MASK(31, 29) +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_MAINDRV_SHIFT 24 +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_MAINDRV_MASK VC4_MASK(28, 24) +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_PREEMP_SHIFT 21 +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_PREEMP_MASK VC4_MASK(23, 21) +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_MAINDRV_SHIFT 16 +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_MAINDRV_MASK VC4_MASK(20, 16) +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_PREEMP_SHIFT 13 +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_PREEMP_MASK VC4_MASK(15, 13) +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_MAINDRV_SHIFT 8 +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_MAINDRV_MASK VC4_MASK(12, 8) +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_PREEMP_SHIFT 5 +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_PREEMP_MASK VC4_MASK(7, 5) +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_MAINDRV_SHIFT 0 +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_MAINDRV_MASK VC4_MASK(4, 0) + +#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA2_SHIFT 15 +#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA2_MASK VC4_MASK(19, 15) +#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA1_SHIFT 10 +#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA1_MASK VC4_MASK(14, 10) +#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA0_SHIFT 5 +#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA0_MASK VC4_MASK(9, 5) +#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_CK_SHIFT 0 +#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_CK_MASK VC4_MASK(4, 0) + +#define VC4_HDMI_TX_PHY_CTL_2_VCO_GAIN_SHIFT 16 +#define VC4_HDMI_TX_PHY_CTL_2_VCO_GAIN_MASK VC4_MASK(19, 16) +#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA2_SHIFT 12 +#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA2_MASK VC4_MASK(15, 12) +#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA1_SHIFT 8 +#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA1_MASK VC4_MASK(11, 8) +#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA0_SHIFT 4 +#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA0_MASK VC4_MASK(7, 4) +#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELCK_SHIFT 0 +#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELCK_MASK VC4_MASK(3, 0) + +#define VC4_HDMI_TX_PHY_CTL_3_RP_SHIFT 17 +#define VC4_HDMI_TX_PHY_CTL_3_RP_MASK VC4_MASK(19, 17) +#define VC4_HDMI_TX_PHY_CTL_3_RZ_SHIFT 12 +#define VC4_HDMI_TX_PHY_CTL_3_RZ_MASK VC4_MASK(16, 12) +#define VC4_HDMI_TX_PHY_CTL_3_CP1_SHIFT 10 +#define VC4_HDMI_TX_PHY_CTL_3_CP1_MASK VC4_MASK(11, 10) +#define VC4_HDMI_TX_PHY_CTL_3_CP_SHIFT 8 +#define VC4_HDMI_TX_PHY_CTL_3_CP_MASK VC4_MASK(9, 8) +#define VC4_HDMI_TX_PHY_CTL_3_CZ_SHIFT 6 +#define VC4_HDMI_TX_PHY_CTL_3_CZ_MASK VC4_MASK(7, 6) +#define VC4_HDMI_TX_PHY_CTL_3_ICP_SHIFT 0 +#define VC4_HDMI_TX_PHY_CTL_3_ICP_MASK VC4_MASK(5, 0) + +#define VC4_HDMI_TX_PHY_PLL_CTL_0_MASH11_MODE BIT(13) +#define VC4_HDMI_TX_PHY_PLL_CTL_0_VC_RANGE_EN BIT(12) +#define VC4_HDMI_TX_PHY_PLL_CTL_0_EMULATE_VC_LOW BIT(11) +#define VC4_HDMI_TX_PHY_PLL_CTL_0_EMULATE_VC_HIGH BIT(10) +#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_SEL_SHIFT 9 +#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_SEL_MASK VC4_MASK(9, 9) +#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_FB_DIV2 BIT(8) +#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_POST_DIV2 BIT(7) +#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_CONT_EN BIT(6) +#define VC4_HDMI_TX_PHY_PLL_CTL_0_ENA_VCO_CLK BIT(5) + +#define VC4_HDMI_TX_PHY_PLL_CTL_1_CPP_SHIFT 16 +#define VC4_HDMI_TX_PHY_PLL_CTL_1_CPP_MASK VC4_MASK(27, 16) +#define VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_DELAY_SHIFT 14 +#define VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_DELAY_MASK VC4_MASK(15, 14) +#define VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_ENABLE BIT(13) +#define VC4_HDMI_TX_PHY_PLL_CTL_1_POST_RST_SEL_SHIFT 11 +#define VC4_HDMI_TX_PHY_PLL_CTL_1_POST_RST_SEL_MASK VC4_MASK(12, 11) + +#define VC4_HDMI_TX_PHY_CLK_DIV_VCO_SHIFT 8 +#define VC4_HDMI_TX_PHY_CLK_DIV_VCO_MASK VC4_MASK(15, 8) + +#define VC4_HDMI_TX_PHY_PLL_CFG_PDIV_SHIFT 0 +#define VC4_HDMI_TX_PHY_PLL_CFG_PDIV_MASK VC4_MASK(3, 0) + +#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TXCK_OUT_SEL_MASK VC4_MASK(13, 12) +#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TXCK_OUT_SEL_SHIFT 12 +#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX2_OUT_SEL_MASK VC4_MASK(9, 8) +#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX2_OUT_SEL_SHIFT 8 +#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX1_OUT_SEL_MASK VC4_MASK(5, 4) +#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX1_OUT_SEL_SHIFT 4 +#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX0_OUT_SEL_MASK VC4_MASK(1, 0) +#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX0_OUT_SEL_SHIFT 0 + +#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1_MIN_LIMIT_MASK VC4_MASK(27, 0) +#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1_MIN_LIMIT_SHIFT 0 + +#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2_MAX_LIMIT_MASK VC4_MASK(27, 0) +#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2_MAX_LIMIT_SHIFT 0 + +#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_STABLE_THRESHOLD_MASK VC4_MASK(31, 16) +#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_STABLE_THRESHOLD_SHIFT 16 +#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_HOLD_THRESHOLD_MASK VC4_MASK(15, 0) +#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_HOLD_THRESHOLD_SHIFT 0 + +#define VC4_HDMI_RM_CONTROL_EN_FREEZE_COUNTERS BIT(19) +#define VC4_HDMI_RM_CONTROL_EN_LOAD_INTEGRATOR BIT(17) +#define VC4_HDMI_RM_CONTROL_FREE_RUN BIT(4) + +#define VC4_HDMI_RM_OFFSET_ONLY BIT(31) +#define VC4_HDMI_RM_OFFSET_OFFSET_SHIFT 0 +#define VC4_HDMI_RM_OFFSET_OFFSET_MASK VC4_MASK(30, 0) + +#define VC4_HDMI_RM_FORMAT_SHIFT_SHIFT 24 +#define VC4_HDMI_RM_FORMAT_SHIFT_MASK VC4_MASK(25, 24) + +#define OSCILLATOR_FREQUENCY 54000000 + +void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct drm_display_mode *mode) +{ + /* PHY should be in reset, like + * vc4_hdmi_encoder_disable() does. + */ + + HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0xf << 16); + HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0); +} + +void vc4_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi) +{ + HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0xf << 16); +} + +void vc4_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi) +{ + HDMI_WRITE(HDMI_TX_PHY_CTL_0, + HDMI_READ(HDMI_TX_PHY_CTL_0) & + ~VC4_HDMI_TX_PHY_RNG_PWRDN); +} + +void vc4_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi) +{ + HDMI_WRITE(HDMI_TX_PHY_CTL_0, + HDMI_READ(HDMI_TX_PHY_CTL_0) | + VC4_HDMI_TX_PHY_RNG_PWRDN); +} + +static unsigned long long +phy_get_vco_freq(unsigned long long clock, u8 *vco_sel, u8 *vco_div) +{ + unsigned long long vco_freq = clock; + unsigned int _vco_div = 0; + unsigned int _vco_sel = 0; + + while (vco_freq < 3000000000ULL) { + _vco_div++; + vco_freq = clock * _vco_div * 10; + } + + if (vco_freq > 4500000000ULL) + _vco_sel = 1; + + *vco_sel = _vco_sel; + *vco_div = _vco_div; + + return vco_freq; +} + +static u8 phy_get_cp_current(unsigned long vco_freq) +{ + if (vco_freq < 3700000000ULL) + return 0x1c; + + return 0x18; +} + +static u32 phy_get_rm_offset(unsigned long long vco_freq) +{ + unsigned long long fref = OSCILLATOR_FREQUENCY; + u64 offset = 0; + + /* RM offset is stored as 9.22 format */ + offset = vco_freq * 2; + offset = offset << 22; + do_div(offset, fref); + offset >>= 2; + + return offset; +} + +static u8 phy_get_vco_gain(unsigned long long vco_freq) +{ + if (vco_freq < 3350000000ULL) + return 0xf; + + if (vco_freq < 3700000000ULL) + return 0xc; + + if (vco_freq < 4050000000ULL) + return 0x6; + + if (vco_freq < 4800000000ULL) + return 0x5; + + if (vco_freq < 5200000000ULL) + return 0x7; + + return 0x2; +} + +struct phy_lane_settings { + struct { + u8 preemphasis; + u8 main_driver; + } amplitude; + + u8 res_sel_data; + u8 term_res_sel_data; +}; + +struct phy_settings { + unsigned long long min_rate; + unsigned long long max_rate; + struct phy_lane_settings channel[3]; + struct phy_lane_settings clock; +}; + +static const struct phy_settings vc5_hdmi_phy_settings[] = { + { + 0, 50000000, + { + {{0x0, 0x0A}, 0x12, 0x0}, + {{0x0, 0x0A}, 0x12, 0x0}, + {{0x0, 0x0A}, 0x12, 0x0} + }, + {{0x0, 0x0A}, 0x18, 0x0}, + }, + { + 50000001, 75000000, + { + {{0x0, 0x09}, 0x12, 0x0}, + {{0x0, 0x09}, 0x12, 0x0}, + {{0x0, 0x09}, 0x12, 0x0} + }, + {{0x0, 0x0C}, 0x18, 0x3}, + }, + { + 75000001, 165000000, + { + {{0x0, 0x09}, 0x12, 0x0}, + {{0x0, 0x09}, 0x12, 0x0}, + {{0x0, 0x09}, 0x12, 0x0} + }, + {{0x0, 0x0C}, 0x18, 0x3}, + }, + { + 165000001, 250000000, + { + {{0x0, 0x0F}, 0x12, 0x1}, + {{0x0, 0x0F}, 0x12, 0x1}, + {{0x0, 0x0F}, 0x12, 0x1} + }, + {{0x0, 0x0C}, 0x18, 0x3}, + }, + { + 250000001, 340000000, + { + {{0x2, 0x0D}, 0x12, 0x1}, + {{0x2, 0x0D}, 0x12, 0x1}, + {{0x2, 0x0D}, 0x12, 0x1} + }, + {{0x0, 0x0C}, 0x18, 0xF}, + }, + { + 340000001, 450000000, + { + {{0x0, 0x1B}, 0x12, 0xF}, + {{0x0, 0x1B}, 0x12, 0xF}, + {{0x0, 0x1B}, 0x12, 0xF} + }, + {{0x0, 0x0A}, 0x12, 0xF}, + }, + { + 450000001, 600000000, + { + {{0x0, 0x1C}, 0x12, 0xF}, + {{0x0, 0x1C}, 0x12, 0xF}, + {{0x0, 0x1C}, 0x12, 0xF} + }, + {{0x0, 0x0B}, 0x13, 0xF}, + }, +}; + +static const struct phy_settings *phy_get_settings(unsigned long long tmds_rate) +{ + unsigned int count = ARRAY_SIZE(vc5_hdmi_phy_settings); + unsigned int i; + + for (i = 0; i < count; i++) { + const struct phy_settings *s = &vc5_hdmi_phy_settings[i]; + + if (tmds_rate >= s->min_rate && tmds_rate <= s->max_rate) + return s; + } + + /* + * If the pixel clock exceeds our max setting, try the max + * setting anyway. + */ + return &vc5_hdmi_phy_settings[count - 1]; +} + +static const struct phy_lane_settings * +phy_get_channel_settings(enum vc4_hdmi_phy_channel chan, + unsigned long long tmds_rate) +{ + const struct phy_settings *settings = phy_get_settings(tmds_rate); + + if (chan == PHY_LANE_CK) + return &settings->clock; + + return &settings->channel[chan]; +} + +static void vc5_hdmi_reset_phy(struct vc4_hdmi *vc4_hdmi) +{ + HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0x0f); + HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, BIT(10)); +} + +void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct drm_display_mode *mode) +{ + const struct phy_lane_settings *chan0_settings, *chan1_settings, *chan2_settings, *clock_settings; + const struct vc4_hdmi_variant *variant = vc4_hdmi->variant; + unsigned long long pixel_freq = mode->clock * 1000; + unsigned long long vco_freq; + unsigned char word_sel; + u8 vco_sel, vco_div; + + vco_freq = phy_get_vco_freq(pixel_freq, &vco_sel, &vco_div); + + vc5_hdmi_reset_phy(vc4_hdmi); + + HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, + VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN); + + HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, + HDMI_READ(HDMI_TX_PHY_RESET_CTL) & + ~VC4_HDMI_TX_PHY_RESET_CTL_TX_0_RESET & + ~VC4_HDMI_TX_PHY_RESET_CTL_TX_1_RESET & + ~VC4_HDMI_TX_PHY_RESET_CTL_TX_2_RESET & + ~VC4_HDMI_TX_PHY_RESET_CTL_TX_CK_RESET); + + HDMI_WRITE(HDMI_RM_CONTROL, + HDMI_READ(HDMI_RM_CONTROL) | + VC4_HDMI_RM_CONTROL_EN_FREEZE_COUNTERS | + VC4_HDMI_RM_CONTROL_EN_LOAD_INTEGRATOR | + VC4_HDMI_RM_CONTROL_FREE_RUN); + + HDMI_WRITE(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1, + (HDMI_READ(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1) & + ~VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1_MIN_LIMIT_MASK) | + VC4_SET_FIELD(0, VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1_MIN_LIMIT)); + + HDMI_WRITE(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2, + (HDMI_READ(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2) & + ~VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2_MAX_LIMIT_MASK) | + VC4_SET_FIELD(0, VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2_MAX_LIMIT)); + + HDMI_WRITE(HDMI_RM_OFFSET, + VC4_SET_FIELD(phy_get_rm_offset(vco_freq), + VC4_HDMI_RM_OFFSET_OFFSET) | + VC4_HDMI_RM_OFFSET_ONLY); + + HDMI_WRITE(HDMI_TX_PHY_CLK_DIV, + VC4_SET_FIELD(vco_div, VC4_HDMI_TX_PHY_CLK_DIV_VCO)); + + HDMI_WRITE(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4, + VC4_SET_FIELD(0xe147, VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_HOLD_THRESHOLD) | + VC4_SET_FIELD(0xe14, VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_STABLE_THRESHOLD)); + + HDMI_WRITE(HDMI_TX_PHY_PLL_CTL_0, + VC4_HDMI_TX_PHY_PLL_CTL_0_ENA_VCO_CLK | + VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_CONT_EN | + VC4_HDMI_TX_PHY_PLL_CTL_0_MASH11_MODE | + VC4_SET_FIELD(vco_sel, VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_SEL)); + + HDMI_WRITE(HDMI_TX_PHY_PLL_CTL_1, + HDMI_READ(HDMI_TX_PHY_PLL_CTL_1) | + VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_ENABLE | + VC4_SET_FIELD(3, VC4_HDMI_TX_PHY_PLL_CTL_1_POST_RST_SEL) | + VC4_SET_FIELD(1, VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_DELAY) | + VC4_SET_FIELD(0x8a, VC4_HDMI_TX_PHY_PLL_CTL_1_CPP)); + + HDMI_WRITE(HDMI_RM_FORMAT, + HDMI_READ(HDMI_RM_FORMAT) | + VC4_SET_FIELD(2, VC4_HDMI_RM_FORMAT_SHIFT)); + + HDMI_WRITE(HDMI_TX_PHY_PLL_CFG, + HDMI_READ(HDMI_TX_PHY_PLL_CFG) | + VC4_SET_FIELD(1, VC4_HDMI_TX_PHY_PLL_CFG_PDIV)); + + if (pixel_freq >= 340000000) + word_sel = 3; + else + word_sel = 0; + HDMI_WRITE(HDMI_TX_PHY_TMDS_CLK_WORD_SEL, word_sel); + + HDMI_WRITE(HDMI_TX_PHY_CTL_3, + VC4_SET_FIELD(phy_get_cp_current(vco_freq), + VC4_HDMI_TX_PHY_CTL_3_ICP) | + VC4_SET_FIELD(1, VC4_HDMI_TX_PHY_CTL_3_CP) | + VC4_SET_FIELD(1, VC4_HDMI_TX_PHY_CTL_3_CP1) | + VC4_SET_FIELD(3, VC4_HDMI_TX_PHY_CTL_3_CZ) | + VC4_SET_FIELD(4, VC4_HDMI_TX_PHY_CTL_3_RP) | + VC4_SET_FIELD(6, VC4_HDMI_TX_PHY_CTL_3_RZ)); + + chan0_settings = + phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_0], + pixel_freq); + chan1_settings = + phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_1], + pixel_freq); + chan2_settings = + phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_2], + pixel_freq); + clock_settings = + phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_CK], + pixel_freq); + + HDMI_WRITE(HDMI_TX_PHY_CTL_0, + VC4_SET_FIELD(chan0_settings->amplitude.preemphasis, + VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_PREEMP) | + VC4_SET_FIELD(chan0_settings->amplitude.main_driver, + VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_MAINDRV) | + VC4_SET_FIELD(chan1_settings->amplitude.preemphasis, + VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_PREEMP) | + VC4_SET_FIELD(chan1_settings->amplitude.main_driver, + VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_MAINDRV) | + VC4_SET_FIELD(chan2_settings->amplitude.preemphasis, + VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_PREEMP) | + VC4_SET_FIELD(chan2_settings->amplitude.main_driver, + VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_MAINDRV) | + VC4_SET_FIELD(clock_settings->amplitude.preemphasis, + VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_PREEMP) | + VC4_SET_FIELD(clock_settings->amplitude.main_driver, + VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_MAINDRV)); + + HDMI_WRITE(HDMI_TX_PHY_CTL_1, + HDMI_READ(HDMI_TX_PHY_CTL_1) | + VC4_SET_FIELD(chan0_settings->res_sel_data, + VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA0) | + VC4_SET_FIELD(chan1_settings->res_sel_data, + VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA1) | + VC4_SET_FIELD(chan2_settings->res_sel_data, + VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA2) | + VC4_SET_FIELD(clock_settings->res_sel_data, + VC4_HDMI_TX_PHY_CTL_1_RES_SEL_CK)); + + HDMI_WRITE(HDMI_TX_PHY_CTL_2, + VC4_SET_FIELD(chan0_settings->term_res_sel_data, + VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA0) | + VC4_SET_FIELD(chan1_settings->term_res_sel_data, + VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA1) | + VC4_SET_FIELD(chan2_settings->term_res_sel_data, + VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA2) | + VC4_SET_FIELD(clock_settings->term_res_sel_data, + VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELCK) | + VC4_SET_FIELD(phy_get_vco_gain(vco_freq), + VC4_HDMI_TX_PHY_CTL_2_VCO_GAIN)); + + HDMI_WRITE(HDMI_TX_PHY_CHANNEL_SWAP, + VC4_SET_FIELD(variant->phy_lane_mapping[PHY_LANE_0], + VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX0_OUT_SEL) | + VC4_SET_FIELD(variant->phy_lane_mapping[PHY_LANE_1], + VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX1_OUT_SEL) | + VC4_SET_FIELD(variant->phy_lane_mapping[PHY_LANE_2], + VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX2_OUT_SEL) | + VC4_SET_FIELD(variant->phy_lane_mapping[PHY_LANE_CK], + VC4_HDMI_TX_PHY_CHANNEL_SWAP_TXCK_OUT_SEL)); + + HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, + HDMI_READ(HDMI_TX_PHY_RESET_CTL) & + ~(VC4_HDMI_TX_PHY_RESET_CTL_PLL_RESETB | + VC4_HDMI_TX_PHY_RESET_CTL_PLLDIV_RESETB)); + + HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, + HDMI_READ(HDMI_TX_PHY_RESET_CTL) | + VC4_HDMI_TX_PHY_RESET_CTL_PLL_RESETB | + VC4_HDMI_TX_PHY_RESET_CTL_PLLDIV_RESETB); +} + +void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi) +{ + vc5_hdmi_reset_phy(vc4_hdmi); +} + +void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi) +{ + HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, + HDMI_READ(HDMI_TX_PHY_POWERDOWN_CTL) & + ~VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN); +} + +void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi) +{ + HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, + HDMI_READ(HDMI_TX_PHY_POWERDOWN_CTL) | + VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN); +} diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h new file mode 100644 index 000000000000..7c6b4818f245 --- /dev/null +++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h @@ -0,0 +1,442 @@ +#ifndef _VC4_HDMI_REGS_H_ +#define _VC4_HDMI_REGS_H_ + +#include "vc4_hdmi.h" + +#define VC4_HDMI_PACKET_STRIDE 0x24 + +enum vc4_hdmi_regs { + VC4_INVALID = 0, + VC4_HDMI, + VC4_HD, + VC5_CEC, + VC5_CSC, + VC5_DVP, + VC5_PHY, + VC5_RAM, + VC5_RM, +}; + +enum vc4_hdmi_field { + HDMI_AUDIO_PACKET_CONFIG, + HDMI_CEC_CNTRL_1, + HDMI_CEC_CNTRL_2, + HDMI_CEC_CNTRL_3, + HDMI_CEC_CNTRL_4, + HDMI_CEC_CNTRL_5, + HDMI_CEC_CPU_CLEAR, + HDMI_CEC_CPU_MASK_CLEAR, + HDMI_CEC_CPU_MASK_SET, + HDMI_CEC_CPU_MASK_STATUS, + HDMI_CEC_CPU_STATUS, + + /* + * Transmit data, first byte is low byte of the 32-bit reg. + * MSB of each byte transmitted first. + */ + HDMI_CEC_RX_DATA_1, + HDMI_CEC_RX_DATA_2, + HDMI_CEC_RX_DATA_3, + HDMI_CEC_RX_DATA_4, + HDMI_CEC_TX_DATA_1, + HDMI_CEC_TX_DATA_2, + HDMI_CEC_TX_DATA_3, + HDMI_CEC_TX_DATA_4, + HDMI_CLOCK_STOP, + HDMI_CORE_REV, + HDMI_CRP_CFG, + HDMI_CSC_12_11, + HDMI_CSC_14_13, + HDMI_CSC_22_21, + HDMI_CSC_24_23, + HDMI_CSC_32_31, + HDMI_CSC_34_33, + HDMI_CSC_CTL, + + /* + * 20-bit fields containing CTS values to be transmitted if + * !EXTERNAL_CTS_EN + */ + HDMI_CTS_0, + HDMI_CTS_1, + HDMI_DVP_CTL, + HDMI_FIFO_CTL, + HDMI_FRAME_COUNT, + HDMI_HORZA, + HDMI_HORZB, + HDMI_HOTPLUG, + HDMI_HOTPLUG_INT, + + /* + * 3 bits per field, where each field maps from that + * corresponding MAI bus channel to the given HDMI channel. + */ + HDMI_MAI_CHANNEL_MAP, + HDMI_MAI_CONFIG, + HDMI_MAI_CTL, + + /* + * Register for DMAing in audio data to be transported over + * the MAI bus to the Falcon core. + */ + HDMI_MAI_DATA, + + /* Format header to be placed on the MAI data. Unused. */ + HDMI_MAI_FMT, + + /* Last received format word on the MAI bus. */ + HDMI_MAI_FORMAT, + HDMI_MAI_SMP, + HDMI_MAI_THR, + HDMI_M_CTL, + HDMI_RAM_PACKET_CONFIG, + HDMI_RAM_PACKET_START, + HDMI_RAM_PACKET_STATUS, + HDMI_RM_CONTROL, + HDMI_RM_FORMAT, + HDMI_RM_OFFSET, + HDMI_SCHEDULER_CONTROL, + HDMI_SW_RESET_CONTROL, + HDMI_TX_PHY_CHANNEL_SWAP, + HDMI_TX_PHY_CLK_DIV, + HDMI_TX_PHY_CTL_0, + HDMI_TX_PHY_CTL_1, + HDMI_TX_PHY_CTL_2, + HDMI_TX_PHY_CTL_3, + HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1, + HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2, + HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4, + HDMI_TX_PHY_PLL_CFG, + HDMI_TX_PHY_PLL_CTL_0, + HDMI_TX_PHY_PLL_CTL_1, + HDMI_TX_PHY_POWERDOWN_CTL, + HDMI_TX_PHY_RESET_CTL, + HDMI_TX_PHY_TMDS_CLK_WORD_SEL, + HDMI_VEC_INTERFACE_XBAR, + HDMI_VERTA0, + HDMI_VERTA1, + HDMI_VERTB0, + HDMI_VERTB1, + HDMI_VID_CTL, +}; + +struct vc4_hdmi_register { + char *name; + enum vc4_hdmi_regs reg; + unsigned int offset; +}; + +#define _VC4_REG(_base, _reg, _offset) \ + [_reg] = { \ + .name = #_reg, \ + .reg = _base, \ + .offset = _offset, \ + } + +#define VC4_HD_REG(reg, offset) _VC4_REG(VC4_HD, reg, offset) +#define VC4_HDMI_REG(reg, offset) _VC4_REG(VC4_HDMI, reg, offset) +#define VC5_CEC_REG(reg, offset) _VC4_REG(VC5_CEC, reg, offset) +#define VC5_CSC_REG(reg, offset) _VC4_REG(VC5_CSC, reg, offset) +#define VC5_DVP_REG(reg, offset) _VC4_REG(VC5_DVP, reg, offset) +#define VC5_PHY_REG(reg, offset) _VC4_REG(VC5_PHY, reg, offset) +#define VC5_RAM_REG(reg, offset) _VC4_REG(VC5_RAM, reg, offset) +#define VC5_RM_REG(reg, offset) _VC4_REG(VC5_RM, reg, offset) + +static const struct vc4_hdmi_register vc4_hdmi_fields[] = { + VC4_HD_REG(HDMI_M_CTL, 0x000c), + VC4_HD_REG(HDMI_MAI_CTL, 0x0014), + VC4_HD_REG(HDMI_MAI_THR, 0x0018), + VC4_HD_REG(HDMI_MAI_FMT, 0x001c), + VC4_HD_REG(HDMI_MAI_DATA, 0x0020), + VC4_HD_REG(HDMI_MAI_SMP, 0x002c), + VC4_HD_REG(HDMI_VID_CTL, 0x0038), + VC4_HD_REG(HDMI_CSC_CTL, 0x0040), + VC4_HD_REG(HDMI_CSC_12_11, 0x0044), + VC4_HD_REG(HDMI_CSC_14_13, 0x0048), + VC4_HD_REG(HDMI_CSC_22_21, 0x004c), + VC4_HD_REG(HDMI_CSC_24_23, 0x0050), + VC4_HD_REG(HDMI_CSC_32_31, 0x0054), + VC4_HD_REG(HDMI_CSC_34_33, 0x0058), + VC4_HD_REG(HDMI_FRAME_COUNT, 0x0068), + + VC4_HDMI_REG(HDMI_CORE_REV, 0x0000), + VC4_HDMI_REG(HDMI_SW_RESET_CONTROL, 0x0004), + VC4_HDMI_REG(HDMI_HOTPLUG_INT, 0x0008), + VC4_HDMI_REG(HDMI_HOTPLUG, 0x000c), + VC4_HDMI_REG(HDMI_FIFO_CTL, 0x005c), + VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x0090), + VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0094), + VC4_HDMI_REG(HDMI_MAI_FORMAT, 0x0098), + VC4_HDMI_REG(HDMI_AUDIO_PACKET_CONFIG, 0x009c), + VC4_HDMI_REG(HDMI_RAM_PACKET_CONFIG, 0x00a0), + VC4_HDMI_REG(HDMI_RAM_PACKET_STATUS, 0x00a4), + VC4_HDMI_REG(HDMI_CRP_CFG, 0x00a8), + VC4_HDMI_REG(HDMI_CTS_0, 0x00ac), + VC4_HDMI_REG(HDMI_CTS_1, 0x00b0), + VC4_HDMI_REG(HDMI_SCHEDULER_CONTROL, 0x00c0), + VC4_HDMI_REG(HDMI_HORZA, 0x00c4), + VC4_HDMI_REG(HDMI_HORZB, 0x00c8), + VC4_HDMI_REG(HDMI_VERTA0, 0x00cc), + VC4_HDMI_REG(HDMI_VERTB0, 0x00d0), + VC4_HDMI_REG(HDMI_VERTA1, 0x00d4), + VC4_HDMI_REG(HDMI_VERTB1, 0x00d8), + VC4_HDMI_REG(HDMI_CEC_CNTRL_1, 0x00e8), + VC4_HDMI_REG(HDMI_CEC_CNTRL_2, 0x00ec), + VC4_HDMI_REG(HDMI_CEC_CNTRL_3, 0x00f0), + VC4_HDMI_REG(HDMI_CEC_CNTRL_4, 0x00f4), + VC4_HDMI_REG(HDMI_CEC_CNTRL_5, 0x00f8), + VC4_HDMI_REG(HDMI_CEC_TX_DATA_1, 0x00fc), + VC4_HDMI_REG(HDMI_CEC_TX_DATA_2, 0x0100), + VC4_HDMI_REG(HDMI_CEC_TX_DATA_3, 0x0104), + VC4_HDMI_REG(HDMI_CEC_TX_DATA_4, 0x0108), + VC4_HDMI_REG(HDMI_CEC_RX_DATA_1, 0x010c), + VC4_HDMI_REG(HDMI_CEC_RX_DATA_2, 0x0110), + VC4_HDMI_REG(HDMI_CEC_RX_DATA_3, 0x0114), + VC4_HDMI_REG(HDMI_CEC_RX_DATA_4, 0x0118), + VC4_HDMI_REG(HDMI_TX_PHY_RESET_CTL, 0x02c0), + VC4_HDMI_REG(HDMI_TX_PHY_CTL_0, 0x02c4), + VC4_HDMI_REG(HDMI_CEC_CPU_STATUS, 0x0340), + VC4_HDMI_REG(HDMI_CEC_CPU_CLEAR, 0x0348), + VC4_HDMI_REG(HDMI_CEC_CPU_MASK_STATUS, 0x034c), + VC4_HDMI_REG(HDMI_CEC_CPU_MASK_SET, 0x034c), + VC4_HDMI_REG(HDMI_CEC_CPU_MASK_CLEAR, 0x0354), + VC4_HDMI_REG(HDMI_RAM_PACKET_START, 0x0400), +}; + +static const struct vc4_hdmi_register vc5_hdmi_hdmi0_fields[] = { + VC4_HD_REG(HDMI_DVP_CTL, 0x0000), + VC4_HD_REG(HDMI_MAI_CTL, 0x0010), + VC4_HD_REG(HDMI_MAI_THR, 0x0014), + VC4_HD_REG(HDMI_MAI_FMT, 0x0018), + VC4_HD_REG(HDMI_MAI_DATA, 0x001c), + VC4_HD_REG(HDMI_MAI_SMP, 0x0020), + VC4_HD_REG(HDMI_VID_CTL, 0x0044), + VC4_HD_REG(HDMI_FRAME_COUNT, 0x0060), + + VC4_HDMI_REG(HDMI_FIFO_CTL, 0x074), + VC4_HDMI_REG(HDMI_AUDIO_PACKET_CONFIG, 0x0b8), + VC4_HDMI_REG(HDMI_RAM_PACKET_CONFIG, 0x0bc), + VC4_HDMI_REG(HDMI_RAM_PACKET_STATUS, 0x0c4), + VC4_HDMI_REG(HDMI_CRP_CFG, 0x0c8), + VC4_HDMI_REG(HDMI_CTS_0, 0x0cc), + VC4_HDMI_REG(HDMI_CTS_1, 0x0d0), + VC4_HDMI_REG(HDMI_SCHEDULER_CONTROL, 0x0e0), + VC4_HDMI_REG(HDMI_HORZA, 0x0e4), + VC4_HDMI_REG(HDMI_HORZB, 0x0e8), + VC4_HDMI_REG(HDMI_VERTA0, 0x0ec), + VC4_HDMI_REG(HDMI_VERTB0, 0x0f0), + VC4_HDMI_REG(HDMI_VERTA1, 0x0f4), + VC4_HDMI_REG(HDMI_VERTB1, 0x0f8), + VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c), + VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0), + VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8), + + VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc), + VC5_DVP_REG(HDMI_VEC_INTERFACE_XBAR, 0x0f0), + + VC5_PHY_REG(HDMI_TX_PHY_RESET_CTL, 0x000), + VC5_PHY_REG(HDMI_TX_PHY_POWERDOWN_CTL, 0x004), + VC5_PHY_REG(HDMI_TX_PHY_CTL_0, 0x008), + VC5_PHY_REG(HDMI_TX_PHY_CTL_1, 0x00c), + VC5_PHY_REG(HDMI_TX_PHY_CTL_2, 0x010), + VC5_PHY_REG(HDMI_TX_PHY_CTL_3, 0x014), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CTL_0, 0x01c), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CTL_1, 0x020), + VC5_PHY_REG(HDMI_TX_PHY_CLK_DIV, 0x028), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CFG, 0x034), + VC5_PHY_REG(HDMI_TX_PHY_TMDS_CLK_WORD_SEL, 0x044), + VC5_PHY_REG(HDMI_TX_PHY_CHANNEL_SWAP, 0x04c), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1, 0x050), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2, 0x054), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4, 0x05c), + + VC5_RM_REG(HDMI_RM_CONTROL, 0x000), + VC5_RM_REG(HDMI_RM_OFFSET, 0x018), + VC5_RM_REG(HDMI_RM_FORMAT, 0x01c), + + VC5_RAM_REG(HDMI_RAM_PACKET_START, 0x000), + + VC5_CEC_REG(HDMI_CEC_CNTRL_1, 0x010), + VC5_CEC_REG(HDMI_CEC_CNTRL_2, 0x014), + VC5_CEC_REG(HDMI_CEC_CNTRL_3, 0x018), + VC5_CEC_REG(HDMI_CEC_CNTRL_4, 0x01c), + VC5_CEC_REG(HDMI_CEC_CNTRL_5, 0x020), + VC5_CEC_REG(HDMI_CEC_TX_DATA_1, 0x028), + VC5_CEC_REG(HDMI_CEC_TX_DATA_2, 0x02c), + VC5_CEC_REG(HDMI_CEC_TX_DATA_3, 0x030), + VC5_CEC_REG(HDMI_CEC_TX_DATA_4, 0x034), + VC5_CEC_REG(HDMI_CEC_RX_DATA_1, 0x038), + VC5_CEC_REG(HDMI_CEC_RX_DATA_2, 0x03c), + VC5_CEC_REG(HDMI_CEC_RX_DATA_3, 0x040), + VC5_CEC_REG(HDMI_CEC_RX_DATA_4, 0x044), + + VC5_CSC_REG(HDMI_CSC_CTL, 0x000), + VC5_CSC_REG(HDMI_CSC_12_11, 0x004), + VC5_CSC_REG(HDMI_CSC_14_13, 0x008), + VC5_CSC_REG(HDMI_CSC_22_21, 0x00c), + VC5_CSC_REG(HDMI_CSC_24_23, 0x010), + VC5_CSC_REG(HDMI_CSC_32_31, 0x014), + VC5_CSC_REG(HDMI_CSC_34_33, 0x018), +}; + +static const struct vc4_hdmi_register vc5_hdmi_hdmi1_fields[] = { + VC4_HD_REG(HDMI_DVP_CTL, 0x0000), + VC4_HD_REG(HDMI_MAI_CTL, 0x0030), + VC4_HD_REG(HDMI_MAI_THR, 0x0034), + VC4_HD_REG(HDMI_MAI_FMT, 0x0038), + VC4_HD_REG(HDMI_MAI_DATA, 0x003c), + VC4_HD_REG(HDMI_MAI_SMP, 0x0040), + VC4_HD_REG(HDMI_VID_CTL, 0x0048), + VC4_HD_REG(HDMI_FRAME_COUNT, 0x0064), + + VC4_HDMI_REG(HDMI_FIFO_CTL, 0x074), + VC4_HDMI_REG(HDMI_AUDIO_PACKET_CONFIG, 0x0b8), + VC4_HDMI_REG(HDMI_RAM_PACKET_CONFIG, 0x0bc), + VC4_HDMI_REG(HDMI_RAM_PACKET_STATUS, 0x0c4), + VC4_HDMI_REG(HDMI_CRP_CFG, 0x0c8), + VC4_HDMI_REG(HDMI_CTS_0, 0x0cc), + VC4_HDMI_REG(HDMI_CTS_1, 0x0d0), + VC4_HDMI_REG(HDMI_SCHEDULER_CONTROL, 0x0e0), + VC4_HDMI_REG(HDMI_HORZA, 0x0e4), + VC4_HDMI_REG(HDMI_HORZB, 0x0e8), + VC4_HDMI_REG(HDMI_VERTA0, 0x0ec), + VC4_HDMI_REG(HDMI_VERTB0, 0x0f0), + VC4_HDMI_REG(HDMI_VERTA1, 0x0f4), + VC4_HDMI_REG(HDMI_VERTB1, 0x0f8), + VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c), + VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0), + VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8), + + VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc), + VC5_DVP_REG(HDMI_VEC_INTERFACE_XBAR, 0x0f0), + + VC5_PHY_REG(HDMI_TX_PHY_RESET_CTL, 0x000), + VC5_PHY_REG(HDMI_TX_PHY_POWERDOWN_CTL, 0x004), + VC5_PHY_REG(HDMI_TX_PHY_CTL_0, 0x008), + VC5_PHY_REG(HDMI_TX_PHY_CTL_1, 0x00c), + VC5_PHY_REG(HDMI_TX_PHY_CTL_2, 0x010), + VC5_PHY_REG(HDMI_TX_PHY_CTL_3, 0x014), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CTL_0, 0x01c), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CTL_1, 0x020), + VC5_PHY_REG(HDMI_TX_PHY_CLK_DIV, 0x028), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CFG, 0x034), + VC5_PHY_REG(HDMI_TX_PHY_CHANNEL_SWAP, 0x04c), + VC5_PHY_REG(HDMI_TX_PHY_TMDS_CLK_WORD_SEL, 0x044), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1, 0x050), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2, 0x054), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4, 0x05c), + + VC5_RM_REG(HDMI_RM_CONTROL, 0x000), + VC5_RM_REG(HDMI_RM_OFFSET, 0x018), + VC5_RM_REG(HDMI_RM_FORMAT, 0x01c), + + VC5_RAM_REG(HDMI_RAM_PACKET_START, 0x000), + + VC5_CEC_REG(HDMI_CEC_CNTRL_1, 0x010), + VC5_CEC_REG(HDMI_CEC_CNTRL_2, 0x014), + VC5_CEC_REG(HDMI_CEC_CNTRL_3, 0x018), + VC5_CEC_REG(HDMI_CEC_CNTRL_4, 0x01c), + VC5_CEC_REG(HDMI_CEC_CNTRL_5, 0x020), + VC5_CEC_REG(HDMI_CEC_TX_DATA_1, 0x028), + VC5_CEC_REG(HDMI_CEC_TX_DATA_2, 0x02c), + VC5_CEC_REG(HDMI_CEC_TX_DATA_3, 0x030), + VC5_CEC_REG(HDMI_CEC_TX_DATA_4, 0x034), + VC5_CEC_REG(HDMI_CEC_RX_DATA_1, 0x038), + VC5_CEC_REG(HDMI_CEC_RX_DATA_2, 0x03c), + VC5_CEC_REG(HDMI_CEC_RX_DATA_3, 0x040), + VC5_CEC_REG(HDMI_CEC_RX_DATA_4, 0x044), + + VC5_CSC_REG(HDMI_CSC_CTL, 0x000), + VC5_CSC_REG(HDMI_CSC_12_11, 0x004), + VC5_CSC_REG(HDMI_CSC_14_13, 0x008), + VC5_CSC_REG(HDMI_CSC_22_21, 0x00c), + VC5_CSC_REG(HDMI_CSC_24_23, 0x010), + VC5_CSC_REG(HDMI_CSC_32_31, 0x014), + VC5_CSC_REG(HDMI_CSC_34_33, 0x018), +}; + +static inline +void __iomem *__vc4_hdmi_get_field_base(struct vc4_hdmi *hdmi, + enum vc4_hdmi_regs reg) +{ + switch (reg) { + case VC4_HD: + return hdmi->hd_regs; + + case VC4_HDMI: + return hdmi->hdmicore_regs; + + case VC5_CSC: + return hdmi->csc_regs; + + case VC5_CEC: + return hdmi->cec_regs; + + case VC5_DVP: + return hdmi->dvp_regs; + + case VC5_PHY: + return hdmi->phy_regs; + + case VC5_RAM: + return hdmi->ram_regs; + + case VC5_RM: + return hdmi->rm_regs; + + default: + return NULL; + } + + return NULL; +} + +static inline u32 vc4_hdmi_read(struct vc4_hdmi *hdmi, + enum vc4_hdmi_field reg) +{ + const struct vc4_hdmi_register *field; + const struct vc4_hdmi_variant *variant = hdmi->variant; + void __iomem *base; + + if (reg >= variant->num_registers) { + dev_warn(&hdmi->pdev->dev, + "Invalid register ID %u\n", reg); + return 0; + } + + field = &variant->registers[reg]; + base = __vc4_hdmi_get_field_base(hdmi, field->reg); + if (!base) { + dev_warn(&hdmi->pdev->dev, + "Unknown register ID %u\n", reg); + return 0; + } + + return readl(base + field->offset); +} +#define HDMI_READ(reg) vc4_hdmi_read(vc4_hdmi, reg) + +static inline void vc4_hdmi_write(struct vc4_hdmi *hdmi, + enum vc4_hdmi_field reg, + u32 value) +{ + const struct vc4_hdmi_register *field; + const struct vc4_hdmi_variant *variant = hdmi->variant; + void __iomem *base; + + if (reg >= variant->num_registers) { + dev_warn(&hdmi->pdev->dev, + "Invalid register ID %u\n", reg); + return; + } + + field = &variant->registers[reg]; + base = __vc4_hdmi_get_field_base(hdmi, field->reg); + if (!base) + return; + + writel(value, base + field->offset); +} +#define HDMI_WRITE(reg, val) vc4_hdmi_write(vc4_hdmi, reg, val) + +#endif /* _VC4_HDMI_REGS_H_ */ diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index 2d2bf59c0503..4d0a833366ce 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -19,6 +19,8 @@ * each CRTC. */ +#include <linux/bitfield.h> +#include <linux/clk.h> #include <linux/component.h> #include <linux/platform_device.h> @@ -160,6 +162,7 @@ static void vc4_hvs_lut_load(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); u32 i; /* The LUT memory is laid out with each HVS channel in order, @@ -168,7 +171,7 @@ static void vc4_hvs_lut_load(struct drm_crtc *crtc) */ HVS_WRITE(SCALER_GAMADDR, SCALER_GAMADDR_AUTOINC | - (vc4_crtc->channel * 3 * crtc->gamma_size)); + (vc4_state->assigned_channel * 3 * crtc->gamma_size)); for (i = 0; i < crtc->gamma_size; i++) HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_r[i]); @@ -194,6 +197,135 @@ static void vc4_hvs_update_gamma_lut(struct drm_crtc *crtc) vc4_hvs_lut_load(crtc); } +int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + u32 reg; + int ret; + + if (!vc4->hvs->hvs5) + return output; + + switch (output) { + case 0: + return 0; + + case 1: + return 1; + + case 2: + reg = HVS_READ(SCALER_DISPECTRL); + ret = FIELD_GET(SCALER_DISPECTRL_DSP2_MUX_MASK, reg); + if (ret == 0) + return 2; + + return 0; + + case 3: + reg = HVS_READ(SCALER_DISPCTRL); + ret = FIELD_GET(SCALER_DISPCTRL_DSP3_MUX_MASK, reg); + if (ret == 3) + return -EPIPE; + + return ret; + + case 4: + reg = HVS_READ(SCALER_DISPEOLN); + ret = FIELD_GET(SCALER_DISPEOLN_DSP4_MUX_MASK, reg); + if (ret == 3) + return -EPIPE; + + return ret; + + case 5: + reg = HVS_READ(SCALER_DISPDITHER); + ret = FIELD_GET(SCALER_DISPDITHER_DSP5_MUX_MASK, reg); + if (ret == 3) + return -EPIPE; + + return ret; + + default: + return -EPIPE; + } +} + +static int vc4_hvs_init_channel(struct vc4_dev *vc4, struct drm_crtc *crtc, + struct drm_display_mode *mode, bool oneshot) +{ + struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state); + unsigned int chan = vc4_crtc_state->assigned_channel; + bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE; + u32 dispbkgndx; + u32 dispctrl; + + HVS_WRITE(SCALER_DISPCTRLX(chan), 0); + HVS_WRITE(SCALER_DISPCTRLX(chan), SCALER_DISPCTRLX_RESET); + HVS_WRITE(SCALER_DISPCTRLX(chan), 0); + + /* Turn on the scaler, which will wait for vstart to start + * compositing. + * When feeding the transposer, we should operate in oneshot + * mode. + */ + dispctrl = SCALER_DISPCTRLX_ENABLE; + + if (!vc4->hvs->hvs5) + dispctrl |= VC4_SET_FIELD(mode->hdisplay, + SCALER_DISPCTRLX_WIDTH) | + VC4_SET_FIELD(mode->vdisplay, + SCALER_DISPCTRLX_HEIGHT) | + (oneshot ? SCALER_DISPCTRLX_ONESHOT : 0); + else + dispctrl |= VC4_SET_FIELD(mode->hdisplay, + SCALER5_DISPCTRLX_WIDTH) | + VC4_SET_FIELD(mode->vdisplay, + SCALER5_DISPCTRLX_HEIGHT) | + (oneshot ? SCALER5_DISPCTRLX_ONESHOT : 0); + + HVS_WRITE(SCALER_DISPCTRLX(chan), dispctrl); + + dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan)); + dispbkgndx &= ~SCALER_DISPBKGND_GAMMA; + dispbkgndx &= ~SCALER_DISPBKGND_INTERLACE; + + HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx | + SCALER_DISPBKGND_AUTOHS | + ((!vc4->hvs->hvs5) ? SCALER_DISPBKGND_GAMMA : 0) | + (interlace ? SCALER_DISPBKGND_INTERLACE : 0)); + + /* Reload the LUT, since the SRAMs would have been disabled if + * all CRTCs had SCALER_DISPBKGND_GAMMA unset at once. + */ + vc4_hvs_lut_load(crtc); + + return 0; +} + +void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int chan) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + + if (HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE) + return; + + HVS_WRITE(SCALER_DISPCTRLX(chan), + HVS_READ(SCALER_DISPCTRLX(chan)) | SCALER_DISPCTRLX_RESET); + HVS_WRITE(SCALER_DISPCTRLX(chan), + HVS_READ(SCALER_DISPCTRLX(chan)) & ~SCALER_DISPCTRLX_ENABLE); + + /* Once we leave, the scaler should be disabled and its fifo empty. */ + WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET); + + WARN_ON_ONCE(VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(chan)), + SCALER_DISPSTATX_MODE) != + SCALER_DISPSTATX_MODE_DISABLED); + + WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) & + (SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) != + SCALER_DISPSTATX_EMPTY); +} + int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { @@ -248,12 +380,12 @@ static void vc4_hvs_update_dlist(struct drm_crtc *crtc) crtc->state->event = NULL; } - HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), + HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel), vc4_state->mm.start); spin_unlock_irqrestore(&dev->event_lock, flags); } else { - HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), + HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel), vc4_state->mm.start); } } @@ -263,59 +395,22 @@ void vc4_hvs_atomic_enable(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); struct drm_display_mode *mode = &crtc->state->adjusted_mode; bool oneshot = vc4_state->feed_txp; - u32 dispctrl; vc4_hvs_update_dlist(crtc); - - /* Turn on the scaler, which will wait for vstart to start - * compositing. - * When feeding the transposer, we should operate in oneshot - * mode. - */ - dispctrl = SCALER_DISPCTRLX_ENABLE; - dispctrl |= VC4_SET_FIELD(mode->hdisplay, - SCALER_DISPCTRLX_WIDTH) | - VC4_SET_FIELD(mode->vdisplay, - SCALER_DISPCTRLX_HEIGHT) | - (oneshot ? SCALER_DISPCTRLX_ONESHOT : 0); - - HVS_WRITE(SCALER_DISPCTRLX(vc4_crtc->channel), dispctrl); + vc4_hvs_init_channel(vc4, crtc, mode, oneshot); } void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { struct drm_device *dev = crtc->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); - u32 chan = vc4_crtc->channel; + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(old_state); + unsigned int chan = vc4_state->assigned_channel; - if (HVS_READ(SCALER_DISPCTRLX(chan)) & - SCALER_DISPCTRLX_ENABLE) { - HVS_WRITE(SCALER_DISPCTRLX(chan), - SCALER_DISPCTRLX_RESET); - - /* While the docs say that reset is self-clearing, it - * seems it doesn't actually. - */ - HVS_WRITE(SCALER_DISPCTRLX(chan), 0); - } - - /* Once we leave, the scaler should be disabled and its fifo empty. */ - - WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET); - - WARN_ON_ONCE(VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(chan)), - SCALER_DISPSTATX_MODE) != - SCALER_DISPSTATX_MODE_DISABLED); - - WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) & - (SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) != - SCALER_DISPSTATX_EMPTY); + vc4_hvs_stop_channel(dev, chan); } void vc4_hvs_atomic_flush(struct drm_crtc *crtc, @@ -323,7 +418,6 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); struct drm_plane *plane; struct vc4_plane_state *vc4_plane_state; @@ -365,8 +459,8 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc, /* This sets a black background color fill, as is the case * with other DRM drivers. */ - HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel), - HVS_READ(SCALER_DISPBKGNDX(vc4_crtc->channel)) | + HVS_WRITE(SCALER_DISPBKGNDX(vc4_state->assigned_channel), + HVS_READ(SCALER_DISPBKGNDX(vc4_state->assigned_channel)) | SCALER_DISPBKGND_FILL); /* Only update DISPLIST if the CRTC was already running and is not @@ -380,7 +474,7 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc, vc4_hvs_update_dlist(crtc); if (crtc->state->color_mgmt_changed) { - u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(vc4_crtc->channel)); + u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(vc4_state->assigned_channel)); if (crtc->state->gamma_lut) { vc4_hvs_update_gamma_lut(crtc); @@ -392,7 +486,7 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc, */ dispbkgndx &= ~SCALER_DISPBKGND_GAMMA; } - HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel), dispbkgndx); + HVS_WRITE(SCALER_DISPBKGNDX(vc4_state->assigned_channel), dispbkgndx); } if (debug_dump_regs) { @@ -401,50 +495,6 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc, } } -void vc4_hvs_mode_set_nofb(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); - struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); - struct drm_display_mode *mode = &crtc->state->adjusted_mode; - bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE; - - if (vc4_crtc->data->hvs_channel == 2) { - u32 dispctrl; - u32 dsp3_mux; - - /* - * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to - * FIFO X'. - * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'. - * - * DSP3 is connected to FIFO2 unless the transposer is - * enabled. In this case, FIFO 2 is directly accessed by the - * TXP IP, and we need to disable the FIFO2 -> pixelvalve1 - * route. - */ - if (vc4_state->feed_txp) - dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX); - else - dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); - - dispctrl = HVS_READ(SCALER_DISPCTRL) & - ~SCALER_DISPCTRL_DSP3_MUX_MASK; - HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux); - } - - HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel), - SCALER_DISPBKGND_AUTOHS | - SCALER_DISPBKGND_GAMMA | - (interlace ? SCALER_DISPBKGND_INTERLACE : 0)); - - /* Reload the LUT, since the SRAMs would have been disabled if - * all CRTCs had SCALER_DISPBKGND_GAMMA unset at once. - */ - vc4_hvs_lut_load(crtc); -} - void vc4_hvs_mask_underrun(struct drm_device *dev, int channel) { struct vc4_dev *vc4 = to_vc4_dev(dev); @@ -521,6 +571,9 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) hvs->pdev = pdev; + if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm2711-hvs")) + hvs->hvs5 = true; + hvs->regs = vc4_ioremap_regs(pdev, 0); if (IS_ERR(hvs->regs)) return PTR_ERR(hvs->regs); @@ -529,7 +582,24 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) hvs->regset.regs = hvs_regs; hvs->regset.nregs = ARRAY_SIZE(hvs_regs); - hvs->dlist = hvs->regs + SCALER_DLIST_START; + if (hvs->hvs5) { + hvs->core_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(hvs->core_clk)) { + dev_err(&pdev->dev, "Couldn't get core clock\n"); + return PTR_ERR(hvs->core_clk); + } + + ret = clk_prepare_enable(hvs->core_clk); + if (ret) { + dev_err(&pdev->dev, "Couldn't enable the core clock\n"); + return ret; + } + } + + if (!hvs->hvs5) + hvs->dlist = hvs->regs + SCALER_DLIST_START; + else + hvs->dlist = hvs->regs + SCALER5_DLIST_START; spin_lock_init(&hvs->mm_lock); @@ -547,7 +617,12 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) * between planes when they don't overlap on the screen, but * for now we just allocate globally. */ - drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024); + if (!hvs->hvs5) + /* 96kB */ + drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024); + else + /* 70k words */ + drm_mm_init(&hvs->lbm_mm, 0, 70 * 2 * 1024); /* Upload filter kernels. We only have the one for now, so we * keep it around for the lifetime of the driver. @@ -605,6 +680,7 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master, { struct drm_device *drm = dev_get_drvdata(master); struct vc4_dev *vc4 = drm->dev_private; + struct vc4_hvs *hvs = vc4->hvs; if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter)) drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter); @@ -612,6 +688,8 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master, drm_mm_takedown(&vc4->hvs->dlist_mm); drm_mm_takedown(&vc4->hvs->lbm_mm); + clk_disable_unprepare(hvs->core_clk); + vc4->hvs = NULL; } @@ -632,6 +710,7 @@ static int vc4_hvs_dev_remove(struct platform_device *pdev) } static const struct of_device_id vc4_hvs_dt_match[] = { + { .compatible = "brcm,bcm2711-hvs" }, { .compatible = "brcm,bcm2835-hvs" }, {} }; diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 08318e69061b..149825ff5df8 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -11,6 +11,8 @@ * crtc, HDMI encoder). */ +#include <linux/clk.h> + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> @@ -144,22 +146,130 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state) VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO)); } +static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + unsigned int i; + + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); + u32 dispctrl; + u32 dsp3_mux; + + if (!crtc_state->active) + continue; + + if (vc4_state->assigned_channel != 2) + continue; + + /* + * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to + * FIFO X'. + * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'. + * + * DSP3 is connected to FIFO2 unless the transposer is + * enabled. In this case, FIFO 2 is directly accessed by the + * TXP IP, and we need to disable the FIFO2 -> pixelvalve1 + * route. + */ + if (vc4_state->feed_txp) + dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX); + else + dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); + + dispctrl = HVS_READ(SCALER_DISPCTRL) & + ~SCALER_DISPCTRL_DSP3_MUX_MASK; + HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux); + } +} + +static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + unsigned char dsp2_mux = 0; + unsigned char dsp3_mux = 3; + unsigned char dsp4_mux = 3; + unsigned char dsp5_mux = 3; + unsigned int i; + u32 reg; + + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + + if (!crtc_state->active) + continue; + + switch (vc4_crtc->data->hvs_output) { + case 2: + dsp2_mux = (vc4_state->assigned_channel == 2) ? 0 : 1; + break; + + case 3: + dsp3_mux = vc4_state->assigned_channel; + break; + + case 4: + dsp4_mux = vc4_state->assigned_channel; + break; + + case 5: + dsp5_mux = vc4_state->assigned_channel; + break; + + default: + break; + } + } + + reg = HVS_READ(SCALER_DISPECTRL); + HVS_WRITE(SCALER_DISPECTRL, + (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) | + VC4_SET_FIELD(dsp2_mux, SCALER_DISPECTRL_DSP2_MUX)); + + reg = HVS_READ(SCALER_DISPCTRL); + HVS_WRITE(SCALER_DISPCTRL, + (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) | + VC4_SET_FIELD(dsp3_mux, SCALER_DISPCTRL_DSP3_MUX)); + + reg = HVS_READ(SCALER_DISPEOLN); + HVS_WRITE(SCALER_DISPEOLN, + (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) | + VC4_SET_FIELD(dsp4_mux, SCALER_DISPEOLN_DSP4_MUX)); + + reg = HVS_READ(SCALER_DISPDITHER); + HVS_WRITE(SCALER_DISPDITHER, + (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) | + VC4_SET_FIELD(dsp5_mux, SCALER_DISPDITHER_DSP5_MUX)); +} + static void vc4_atomic_complete_commit(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc *vc4_crtc; + struct vc4_hvs *hvs = vc4->hvs; + struct drm_crtc_state *new_crtc_state; + struct drm_crtc *crtc; int i; - for (i = 0; i < dev->mode_config.num_crtc; i++) { - if (!state->crtcs[i].ptr || !state->crtcs[i].commit) + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + struct vc4_crtc_state *vc4_crtc_state; + + if (!new_crtc_state->commit) continue; - vc4_crtc = to_vc4_crtc(state->crtcs[i].ptr); - vc4_hvs_mask_underrun(dev, vc4_crtc->channel); + vc4_crtc_state = to_vc4_crtc_state(new_crtc_state); + vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel); } + if (vc4->hvs->hvs5) + clk_set_min_rate(hvs->core_clk, 500000000); + drm_atomic_helper_wait_for_fences(dev, state, false); drm_atomic_helper_wait_for_dependencies(state); @@ -168,6 +278,11 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state) vc4_ctm_commit(vc4, state); + if (vc4->hvs->hvs5) + vc5_hvs_pv_muxing_commit(vc4, state); + else + vc4_hvs_pv_muxing_commit(vc4, state); + drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_modeset_enables(dev, state); @@ -182,6 +297,9 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state) drm_atomic_helper_commit_cleanup_done(state); + if (vc4->hvs->hvs5) + clk_set_min_rate(hvs->core_clk, 0); + drm_atomic_state_put(state); up(&vc4->async_modeset); @@ -374,8 +492,11 @@ vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) /* CTM is being enabled or the matrix changed. */ if (new_crtc_state->ctm) { + struct vc4_crtc_state *vc4_crtc_state = + to_vc4_crtc_state(new_crtc_state); + /* fifo is 1-based since 0 disables CTM. */ - int fifo = to_vc4_crtc(crtc)->channel + 1; + int fifo = vc4_crtc_state->assigned_channel + 1; /* Check userland isn't trying to turn on CTM for more * than one CRTC at a time. @@ -415,6 +536,9 @@ static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state) struct drm_plane *plane; int i; + if (!vc4->load_tracker_available) + return 0; + priv_state = drm_atomic_get_private_obj_state(state, &vc4->load_tracker); if (IS_ERR(priv_state)) @@ -485,10 +609,87 @@ static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = { .atomic_destroy_state = vc4_load_tracker_destroy_state, }; +#define NUM_OUTPUTS 6 +#define NUM_CHANNELS 3 + static int vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { - int ret; + unsigned long unassigned_channels = GENMASK(NUM_CHANNELS - 1, 0); + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_crtc *crtc; + int i, ret; + + /* + * Since the HVS FIFOs are shared across all the pixelvalves and + * the TXP (and thus all the CRTCs), we need to pull the current + * state of all the enabled CRTCs so that an update to a single + * CRTC still keeps the previous FIFOs enabled and assigned to + * the same CRTCs, instead of evaluating only the CRTC being + * modified. + */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct drm_crtc_state *crtc_state; + + if (!crtc->state->enable) + continue; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + } + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + struct vc4_crtc_state *new_vc4_crtc_state = + to_vc4_crtc_state(new_crtc_state); + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + unsigned int matching_channels; + + if (old_crtc_state->enable && !new_crtc_state->enable) + new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED; + + if (!new_crtc_state->enable) + continue; + + if (new_vc4_crtc_state->assigned_channel != VC4_HVS_CHANNEL_DISABLED) { + unassigned_channels &= ~BIT(new_vc4_crtc_state->assigned_channel); + continue; + } + + /* + * The problem we have to solve here is that we have + * up to 7 encoders, connected to up to 6 CRTCs. + * + * Those CRTCs, depending on the instance, can be + * routed to 1, 2 or 3 HVS FIFOs, and we need to set + * the change the muxing between FIFOs and outputs in + * the HVS accordingly. + * + * It would be pretty hard to come up with an + * algorithm that would generically solve + * this. However, the current routing trees we support + * allow us to simplify a bit the problem. + * + * Indeed, with the current supported layouts, if we + * try to assign in the ascending crtc index order the + * FIFOs, we can't fall into the situation where an + * earlier CRTC that had multiple routes is assigned + * one that was the only option for a later CRTC. + * + * If the layout changes and doesn't give us that in + * the future, we will need to have something smarter, + * but it works so far. + */ + matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels; + if (matching_channels) { + unsigned int channel = ffs(matching_channels) - 1; + + new_vc4_crtc_state->assigned_channel = channel; + unassigned_channels &= ~BIT(channel); + } else { + return -EINVAL; + } + } ret = vc4_ctm_atomic_check(dev, state); if (ret < 0) @@ -512,12 +713,18 @@ int vc4_kms_load(struct drm_device *dev) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_ctm_state *ctm_state; struct vc4_load_tracker_state *load_state; + bool is_vc5 = of_device_is_compatible(dev->dev->of_node, + "brcm,bcm2711-vc5"); int ret; - /* Start with the load tracker enabled. Can be disabled through the - * debugfs load_tracker file. - */ - vc4->load_tracker_enabled = true; + if (!is_vc5) { + vc4->load_tracker_available = true; + + /* Start with the load tracker enabled. Can be + * disabled through the debugfs load_tracker file. + */ + vc4->load_tracker_enabled = true; + } sema_init(&vc4->async_modeset, 1); @@ -531,8 +738,14 @@ int vc4_kms_load(struct drm_device *dev) return ret; } - dev->mode_config.max_width = 2048; - dev->mode_config.max_height = 2048; + if (is_vc5) { + dev->mode_config.max_width = 7680; + dev->mode_config.max_height = 7680; + } else { + dev->mode_config.max_width = 2048; + dev->mode_config.max_height = 2048; + } + dev->mode_config.funcs = &vc4_mode_funcs; dev->mode_config.preferred_depth = 24; dev->mode_config.async_page_flip = true; @@ -547,14 +760,17 @@ int vc4_kms_load(struct drm_device *dev) drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base, &vc4_ctm_state_funcs); - load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); - if (!load_state) { - drm_atomic_private_obj_fini(&vc4->ctm_manager); - return -ENOMEM; - } + if (vc4->load_tracker_available) { + load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); + if (!load_state) { + drm_atomic_private_obj_fini(&vc4->ctm_manager); + return -ENOMEM; + } - drm_atomic_private_obj_init(dev, &vc4->load_tracker, &load_state->base, - &vc4_load_tracker_state_funcs); + drm_atomic_private_obj_init(dev, &vc4->load_tracker, + &load_state->base, + &vc4_load_tracker_state_funcs); + } drm_mode_config_reset(dev); diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index d040d9f12c6d..6b39cc2ca18d 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -32,45 +32,60 @@ static const struct hvs_format { u32 drm; /* DRM_FORMAT_* */ u32 hvs; /* HVS_FORMAT_* */ u32 pixel_order; + u32 pixel_order_hvs5; } hvs_formats[] = { { - .drm = DRM_FORMAT_XRGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, + .drm = DRM_FORMAT_XRGB8888, + .hvs = HVS_PIXEL_FORMAT_RGBA8888, .pixel_order = HVS_PIXEL_ORDER_ABGR, + .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB, }, { - .drm = DRM_FORMAT_ARGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, + .drm = DRM_FORMAT_ARGB8888, + .hvs = HVS_PIXEL_FORMAT_RGBA8888, .pixel_order = HVS_PIXEL_ORDER_ABGR, + .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB, }, { - .drm = DRM_FORMAT_ABGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, + .drm = DRM_FORMAT_ABGR8888, + .hvs = HVS_PIXEL_FORMAT_RGBA8888, .pixel_order = HVS_PIXEL_ORDER_ARGB, + .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR, }, { - .drm = DRM_FORMAT_XBGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, + .drm = DRM_FORMAT_XBGR8888, + .hvs = HVS_PIXEL_FORMAT_RGBA8888, .pixel_order = HVS_PIXEL_ORDER_ARGB, + .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR, }, { - .drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565, + .drm = DRM_FORMAT_RGB565, + .hvs = HVS_PIXEL_FORMAT_RGB565, .pixel_order = HVS_PIXEL_ORDER_XRGB, }, { - .drm = DRM_FORMAT_BGR565, .hvs = HVS_PIXEL_FORMAT_RGB565, + .drm = DRM_FORMAT_BGR565, + .hvs = HVS_PIXEL_FORMAT_RGB565, .pixel_order = HVS_PIXEL_ORDER_XBGR, }, { - .drm = DRM_FORMAT_ARGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551, + .drm = DRM_FORMAT_ARGB1555, + .hvs = HVS_PIXEL_FORMAT_RGBA5551, .pixel_order = HVS_PIXEL_ORDER_ABGR, }, { - .drm = DRM_FORMAT_XRGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551, + .drm = DRM_FORMAT_XRGB1555, + .hvs = HVS_PIXEL_FORMAT_RGBA5551, .pixel_order = HVS_PIXEL_ORDER_ABGR, }, { - .drm = DRM_FORMAT_RGB888, .hvs = HVS_PIXEL_FORMAT_RGB888, + .drm = DRM_FORMAT_RGB888, + .hvs = HVS_PIXEL_FORMAT_RGB888, .pixel_order = HVS_PIXEL_ORDER_XRGB, }, { - .drm = DRM_FORMAT_BGR888, .hvs = HVS_PIXEL_FORMAT_RGB888, + .drm = DRM_FORMAT_BGR888, + .hvs = HVS_PIXEL_FORMAT_RGB888, .pixel_order = HVS_PIXEL_ORDER_XBGR, }, { @@ -422,10 +437,7 @@ static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst) static u32 vc4_lbm_size(struct drm_plane_state *state) { struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); - /* This is the worst case number. One of the two sizes will - * be used depending on the scaling configuration. - */ - u32 pix_per_line = max(vc4_state->src_w[0], (u32)vc4_state->crtc_w); + u32 pix_per_line; u32 lbm; /* LBM is not needed when there's no vertical scaling. */ @@ -433,6 +445,18 @@ static u32 vc4_lbm_size(struct drm_plane_state *state) vc4_state->y_scaling[1] == VC4_SCALING_NONE) return 0; + /* + * This can be further optimized in the RGB/YUV444 case if the PPF + * decimation factor is between 0.5 and 1.0 by using crtc_w. + * + * It's not an issue though, since in that case since src_w[0] is going + * to be greater than or equal to crtc_w. + */ + if (vc4_state->x_scaling[0] == VC4_SCALING_TPZ) + pix_per_line = vc4_state->crtc_w; + else + pix_per_line = vc4_state->src_w[0]; + if (!vc4_state->is_yuv) { if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ) lbm = pix_per_line * 8; @@ -492,6 +516,11 @@ static void vc4_plane_calc_load(struct drm_plane_state *state) struct vc4_plane_state *vc4_state; struct drm_crtc_state *crtc_state; unsigned int vscale_factor; + struct vc4_dev *vc4; + + vc4 = to_vc4_dev(state->plane->dev); + if (!vc4->load_tracker_available) + return; vc4_state = to_vc4_plane_state(state); crtc_state = drm_atomic_get_existing_crtc_state(state->state, @@ -563,7 +592,9 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state) spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm, &vc4_state->lbm, - lbm_size, 32, 0, 0); + lbm_size, + vc4->hvs->hvs5 ? 64 : 32, + 0, 0); spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); if (ret) @@ -776,35 +807,6 @@ static int vc4_plane_mode_set(struct drm_plane *plane, return -EINVAL; } - /* Control word */ - vc4_dlist_write(vc4_state, - SCALER_CTL0_VALID | - (rotation & DRM_MODE_REFLECT_X ? SCALER_CTL0_HFLIP : 0) | - (rotation & DRM_MODE_REFLECT_Y ? SCALER_CTL0_VFLIP : 0) | - VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) | - (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) | - (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) | - VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) | - (vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) | - VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) | - VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1)); - - /* Position Word 0: Image Positions and Alpha Value */ - vc4_state->pos0_offset = vc4_state->dlist_count; - vc4_dlist_write(vc4_state, - VC4_SET_FIELD(state->alpha >> 8, SCALER_POS0_FIXED_ALPHA) | - VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) | - VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y)); - - /* Position Word 1: Scaled Image Dimensions. */ - if (!vc4_state->is_unity) { - vc4_dlist_write(vc4_state, - VC4_SET_FIELD(vc4_state->crtc_w, - SCALER_POS1_SCL_WIDTH) | - VC4_SET_FIELD(vc4_state->crtc_h, - SCALER_POS1_SCL_HEIGHT)); - } - /* Don't waste cycles mixing with plane alpha if the set alpha * is opaque or there is no per-pixel alpha information. * In any case we use the alpha property value as the fixed alpha. @@ -812,20 +814,120 @@ static int vc4_plane_mode_set(struct drm_plane *plane, mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE && fb->format->has_alpha; - /* Position Word 2: Source Image Size, Alpha */ - vc4_state->pos2_offset = vc4_state->dlist_count; - vc4_dlist_write(vc4_state, - VC4_SET_FIELD(fb->format->has_alpha ? - SCALER_POS2_ALPHA_MODE_PIPELINE : - SCALER_POS2_ALPHA_MODE_FIXED, - SCALER_POS2_ALPHA_MODE) | - (mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) | - (fb->format->has_alpha ? SCALER_POS2_ALPHA_PREMULT : 0) | - VC4_SET_FIELD(vc4_state->src_w[0], SCALER_POS2_WIDTH) | - VC4_SET_FIELD(vc4_state->src_h[0], SCALER_POS2_HEIGHT)); + if (!vc4->hvs->hvs5) { + /* Control word */ + vc4_dlist_write(vc4_state, + SCALER_CTL0_VALID | + (rotation & DRM_MODE_REFLECT_X ? SCALER_CTL0_HFLIP : 0) | + (rotation & DRM_MODE_REFLECT_Y ? SCALER_CTL0_VFLIP : 0) | + VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) | + (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) | + (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) | + VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) | + (vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) | + VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) | + VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1)); + + /* Position Word 0: Image Positions and Alpha Value */ + vc4_state->pos0_offset = vc4_state->dlist_count; + vc4_dlist_write(vc4_state, + VC4_SET_FIELD(state->alpha >> 8, SCALER_POS0_FIXED_ALPHA) | + VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) | + VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y)); + + /* Position Word 1: Scaled Image Dimensions. */ + if (!vc4_state->is_unity) { + vc4_dlist_write(vc4_state, + VC4_SET_FIELD(vc4_state->crtc_w, + SCALER_POS1_SCL_WIDTH) | + VC4_SET_FIELD(vc4_state->crtc_h, + SCALER_POS1_SCL_HEIGHT)); + } + + /* Position Word 2: Source Image Size, Alpha */ + vc4_state->pos2_offset = vc4_state->dlist_count; + vc4_dlist_write(vc4_state, + VC4_SET_FIELD(fb->format->has_alpha ? + SCALER_POS2_ALPHA_MODE_PIPELINE : + SCALER_POS2_ALPHA_MODE_FIXED, + SCALER_POS2_ALPHA_MODE) | + (mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) | + (fb->format->has_alpha ? + SCALER_POS2_ALPHA_PREMULT : 0) | + VC4_SET_FIELD(vc4_state->src_w[0], + SCALER_POS2_WIDTH) | + VC4_SET_FIELD(vc4_state->src_h[0], + SCALER_POS2_HEIGHT)); + + /* Position Word 3: Context. Written by the HVS. */ + vc4_dlist_write(vc4_state, 0xc0c0c0c0); - /* Position Word 3: Context. Written by the HVS. */ - vc4_dlist_write(vc4_state, 0xc0c0c0c0); + } else { + u32 hvs_pixel_order = format->pixel_order; + + if (format->pixel_order_hvs5) + hvs_pixel_order = format->pixel_order_hvs5; + + /* Control word */ + vc4_dlist_write(vc4_state, + SCALER_CTL0_VALID | + (hvs_pixel_order << SCALER_CTL0_ORDER_SHIFT) | + (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) | + VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) | + (vc4_state->is_unity ? + SCALER5_CTL0_UNITY : 0) | + VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) | + VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1) | + SCALER5_CTL0_ALPHA_EXPAND | + SCALER5_CTL0_RGB_EXPAND); + + /* Position Word 0: Image Positions and Alpha Value */ + vc4_state->pos0_offset = vc4_state->dlist_count; + vc4_dlist_write(vc4_state, + (rotation & DRM_MODE_REFLECT_Y ? + SCALER5_POS0_VFLIP : 0) | + VC4_SET_FIELD(vc4_state->crtc_x, + SCALER_POS0_START_X) | + (rotation & DRM_MODE_REFLECT_X ? + SCALER5_POS0_HFLIP : 0) | + VC4_SET_FIELD(vc4_state->crtc_y, + SCALER5_POS0_START_Y) + ); + + /* Control Word 2 */ + vc4_dlist_write(vc4_state, + VC4_SET_FIELD(state->alpha >> 4, + SCALER5_CTL2_ALPHA) | + (fb->format->has_alpha ? + SCALER5_CTL2_ALPHA_PREMULT : 0) | + (mix_plane_alpha ? + SCALER5_CTL2_ALPHA_MIX : 0) | + VC4_SET_FIELD(fb->format->has_alpha ? + SCALER5_CTL2_ALPHA_MODE_PIPELINE : + SCALER5_CTL2_ALPHA_MODE_FIXED, + SCALER5_CTL2_ALPHA_MODE) + ); + + /* Position Word 1: Scaled Image Dimensions. */ + if (!vc4_state->is_unity) { + vc4_dlist_write(vc4_state, + VC4_SET_FIELD(vc4_state->crtc_w, + SCALER_POS1_SCL_WIDTH) | + VC4_SET_FIELD(vc4_state->crtc_h, + SCALER_POS1_SCL_HEIGHT)); + } + + /* Position Word 2: Source Image Size */ + vc4_state->pos2_offset = vc4_state->dlist_count; + vc4_dlist_write(vc4_state, + VC4_SET_FIELD(vc4_state->src_w[0], + SCALER5_POS2_WIDTH) | + VC4_SET_FIELD(vc4_state->src_h[0], + SCALER5_POS2_HEIGHT)); + + /* Position Word 3: Context. Written by the HVS. */ + vc4_dlist_write(vc4_state, 0xc0c0c0c0); + } /* Pointer Word 0/1/2: RGB / Y / Cb / Cr Pointers @@ -1203,6 +1305,10 @@ static bool vc4_format_mod_supported(struct drm_plane *plane, default: return false; } + case DRM_FORMAT_RGBX1010102: + case DRM_FORMAT_BGRX1010102: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: case DRM_FORMAT_YUV422: case DRM_FORMAT_YVU422: case DRM_FORMAT_YUV420: @@ -1255,6 +1361,8 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, &vc4_plane_funcs, formats, ARRAY_SIZE(formats), modifiers, type, NULL); + if (ret) + return ERR_PTR(ret); drm_plane_helper_add(plane, &vc4_plane_helper_funcs); @@ -1283,7 +1391,7 @@ int vc4_plane_create_additional_planes(struct drm_device *drm) * modest number of planes to expose, that should hopefully * still cover any sane usecase. */ - for (i = 0; i < 8; i++) { + for (i = 0; i < 16; i++) { struct drm_plane *plane = vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY); diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h index 324462cc9cd4..be2c32a519b3 100644 --- a/drivers/gpu/drm/vc4/vc4_regs.h +++ b/drivers/gpu/drm/vc4/vc4_regs.h @@ -129,6 +129,8 @@ #define V3D_ERRSTAT 0x00f20 #define PV_CONTROL 0x00 +# define PV5_CONTROL_FIFO_LEVEL_HIGH_MASK VC4_MASK(26, 25) +# define PV5_CONTROL_FIFO_LEVEL_HIGH_SHIFT 25 # define PV_CONTROL_FORMAT_MASK VC4_MASK(23, 21) # define PV_CONTROL_FORMAT_SHIFT 21 # define PV_CONTROL_FORMAT_24 0 @@ -208,6 +210,11 @@ #define PV_HACT_ACT 0x30 +#define PV_MUX_CFG 0x34 +# define PV_MUX_CFG_RGB_PIXEL_MUX_MODE_MASK VC4_MASK(5, 2) +# define PV_MUX_CFG_RGB_PIXEL_MUX_MODE_SHIFT 2 +# define PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP 8 + #define SCALER_CHANNELS_COUNT 3 #define SCALER_DISPCTRL 0x00000000 @@ -286,9 +293,19 @@ #define SCALER_DISPID 0x00000008 #define SCALER_DISPECTRL 0x0000000c +# define SCALER_DISPECTRL_DSP2_MUX_SHIFT 31 +# define SCALER_DISPECTRL_DSP2_MUX_MASK VC4_MASK(31, 31) + #define SCALER_DISPPROF 0x00000010 + #define SCALER_DISPDITHER 0x00000014 +# define SCALER_DISPDITHER_DSP5_MUX_SHIFT 30 +# define SCALER_DISPDITHER_DSP5_MUX_MASK VC4_MASK(31, 30) + #define SCALER_DISPEOLN 0x00000018 +# define SCALER_DISPEOLN_DSP4_MUX_SHIFT 30 +# define SCALER_DISPEOLN_DSP4_MUX_MASK VC4_MASK(31, 30) + #define SCALER_DISPLIST0 0x00000020 #define SCALER_DISPLIST1 0x00000024 #define SCALER_DISPLIST2 0x00000028 @@ -327,6 +344,20 @@ # define SCALER_DISPCTRLX_HEIGHT_MASK VC4_MASK(11, 0) # define SCALER_DISPCTRLX_HEIGHT_SHIFT 0 +# define SCALER5_DISPCTRLX_WIDTH_MASK VC4_MASK(28, 16) +# define SCALER5_DISPCTRLX_WIDTH_SHIFT 16 +/* Generates a single frame when VSTART is seen and stops at the last + * pixel read from the FIFO. + */ +# define SCALER5_DISPCTRLX_ONESHOT BIT(15) +/* Processes a single context in the dlist and then task switch, + * instead of an entire line. + */ +# define SCALER5_DISPCTRLX_ONECTX_MASK VC4_MASK(14, 13) +# define SCALER5_DISPCTRLX_ONECTX_SHIFT 13 +# define SCALER5_DISPCTRLX_HEIGHT_MASK VC4_MASK(12, 0) +# define SCALER5_DISPCTRLX_HEIGHT_SHIFT 0 + #define SCALER_DISPBKGND0 0x00000044 # define SCALER_DISPBKGND_AUTOHS BIT(31) # define SCALER_DISPBKGND_INTERLACE BIT(30) @@ -460,32 +491,18 @@ #define SCALER_DLIST_START 0x00002000 #define SCALER_DLIST_SIZE 0x00004000 -#define VC4_HDMI_CORE_REV 0x000 +#define SCALER5_DLIST_START 0x00004000 -#define VC4_HDMI_SW_RESET_CONTROL 0x004 # define VC4_HDMI_SW_RESET_FORMAT_DETECT BIT(1) # define VC4_HDMI_SW_RESET_HDMI BIT(0) -#define VC4_HDMI_HOTPLUG_INT 0x008 - -#define VC4_HDMI_HOTPLUG 0x00c # define VC4_HDMI_HOTPLUG_CONNECTED BIT(0) -/* 3 bits per field, where each field maps from that corresponding MAI - * bus channel to the given HDMI channel. - */ -#define VC4_HDMI_MAI_CHANNEL_MAP 0x090 - -#define VC4_HDMI_MAI_CONFIG 0x094 # define VC4_HDMI_MAI_CONFIG_FORMAT_REVERSE BIT(27) # define VC4_HDMI_MAI_CONFIG_BIT_REVERSE BIT(26) # define VC4_HDMI_MAI_CHANNEL_MASK_MASK VC4_MASK(15, 0) # define VC4_HDMI_MAI_CHANNEL_MASK_SHIFT 0 -/* Last received format word on the MAI bus. */ -#define VC4_HDMI_MAI_FORMAT 0x098 - -#define VC4_HDMI_AUDIO_PACKET_CONFIG 0x09c # define VC4_HDMI_AUDIO_PACKET_ZERO_DATA_ON_SAMPLE_FLAT BIT(29) # define VC4_HDMI_AUDIO_PACKET_ZERO_DATA_ON_INACTIVE_CHANNELS BIT(24) # define VC4_HDMI_AUDIO_PACKET_FORCE_SAMPLE_PRESENT BIT(19) @@ -499,12 +516,8 @@ # define VC4_HDMI_AUDIO_PACKET_CEA_MASK_MASK VC4_MASK(7, 0) # define VC4_HDMI_AUDIO_PACKET_CEA_MASK_SHIFT 0 -#define VC4_HDMI_RAM_PACKET_CONFIG 0x0a0 # define VC4_HDMI_RAM_PACKET_ENABLE BIT(16) -#define VC4_HDMI_RAM_PACKET_STATUS 0x0a4 - -#define VC4_HDMI_CRP_CFG 0x0a8 /* When set, the CTS_PERIOD counts based on MAI bus sync pulse instead * of pixel clock. */ @@ -518,23 +531,12 @@ # define VC4_HDMI_CRP_CFG_N_MASK VC4_MASK(19, 0) # define VC4_HDMI_CRP_CFG_N_SHIFT 0 -/* 20-bit fields containing CTS values to be transmitted if !EXTERNAL_CTS_EN */ -#define VC4_HDMI_CTS_0 0x0ac -#define VC4_HDMI_CTS_1 0x0b0 -/* 20-bit fields containing number of clocks to send CTS0/1 before - * switching to the other one. - */ -#define VC4_HDMI_CTS_PERIOD_0 0x0b4 -#define VC4_HDMI_CTS_PERIOD_1 0x0b8 - -#define VC4_HDMI_HORZA 0x0c4 # define VC4_HDMI_HORZA_VPOS BIT(14) # define VC4_HDMI_HORZA_HPOS BIT(13) /* Horizontal active pixels (hdisplay). */ # define VC4_HDMI_HORZA_HAP_MASK VC4_MASK(12, 0) # define VC4_HDMI_HORZA_HAP_SHIFT 0 -#define VC4_HDMI_HORZB 0x0c8 /* Horizontal pack porch (htotal - hsync_end). */ # define VC4_HDMI_HORZB_HBP_MASK VC4_MASK(29, 20) # define VC4_HDMI_HORZB_HBP_SHIFT 20 @@ -545,7 +547,6 @@ # define VC4_HDMI_HORZB_HFP_MASK VC4_MASK(9, 0) # define VC4_HDMI_HORZB_HFP_SHIFT 0 -#define VC4_HDMI_FIFO_CTL 0x05c # define VC4_HDMI_FIFO_CTL_RECENTER_DONE BIT(14) # define VC4_HDMI_FIFO_CTL_USE_EMPTY BIT(13) # define VC4_HDMI_FIFO_CTL_ON_VB BIT(7) @@ -558,15 +559,12 @@ # define VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N BIT(0) # define VC4_HDMI_FIFO_VALID_WRITE_MASK 0xefff -#define VC4_HDMI_SCHEDULER_CONTROL 0x0c0 # define VC4_HDMI_SCHEDULER_CONTROL_MANUAL_FORMAT BIT(15) # define VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS BIT(5) # define VC4_HDMI_SCHEDULER_CONTROL_VERT_ALWAYS_KEEPOUT BIT(3) # define VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE BIT(1) # define VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI BIT(0) -#define VC4_HDMI_VERTA0 0x0cc -#define VC4_HDMI_VERTA1 0x0d4 /* Vertical sync pulse (vsync_end - vsync_start). */ # define VC4_HDMI_VERTA_VSP_MASK VC4_MASK(24, 20) # define VC4_HDMI_VERTA_VSP_SHIFT 20 @@ -577,8 +575,6 @@ # define VC4_HDMI_VERTA_VAL_MASK VC4_MASK(12, 0) # define VC4_HDMI_VERTA_VAL_SHIFT 0 -#define VC4_HDMI_VERTB0 0x0d0 -#define VC4_HDMI_VERTB1 0x0d8 /* Vertical sync pulse offset (for interlaced) */ # define VC4_HDMI_VERTB_VSPO_MASK VC4_MASK(21, 9) # define VC4_HDMI_VERTB_VSPO_SHIFT 9 @@ -586,7 +582,6 @@ # define VC4_HDMI_VERTB_VBP_MASK VC4_MASK(8, 0) # define VC4_HDMI_VERTB_VBP_SHIFT 0 -#define VC4_HDMI_CEC_CNTRL_1 0x0e8 /* Set when the transmission has ended. */ # define VC4_HDMI_CEC_TX_EOM BIT(31) /* If set, transmission was acked on the 1st or 2nd attempt (only one @@ -627,7 +622,6 @@ /* Set these fields to how many bit clock cycles get to that many * microseconds. */ -#define VC4_HDMI_CEC_CNTRL_2 0x0ec # define VC4_HDMI_CEC_CNT_TO_1500_US_MASK VC4_MASK(30, 24) # define VC4_HDMI_CEC_CNT_TO_1500_US_SHIFT 24 # define VC4_HDMI_CEC_CNT_TO_1300_US_MASK VC4_MASK(23, 17) @@ -639,7 +633,6 @@ # define VC4_HDMI_CEC_CNT_TO_400_US_MASK VC4_MASK(4, 0) # define VC4_HDMI_CEC_CNT_TO_400_US_SHIFT 0 -#define VC4_HDMI_CEC_CNTRL_3 0x0f0 # define VC4_HDMI_CEC_CNT_TO_2750_US_MASK VC4_MASK(31, 24) # define VC4_HDMI_CEC_CNT_TO_2750_US_SHIFT 24 # define VC4_HDMI_CEC_CNT_TO_2400_US_MASK VC4_MASK(23, 16) @@ -649,7 +642,6 @@ # define VC4_HDMI_CEC_CNT_TO_1700_US_MASK VC4_MASK(7, 0) # define VC4_HDMI_CEC_CNT_TO_1700_US_SHIFT 0 -#define VC4_HDMI_CEC_CNTRL_4 0x0f4 # define VC4_HDMI_CEC_CNT_TO_4300_US_MASK VC4_MASK(31, 24) # define VC4_HDMI_CEC_CNT_TO_4300_US_SHIFT 24 # define VC4_HDMI_CEC_CNT_TO_3900_US_MASK VC4_MASK(23, 16) @@ -659,7 +651,6 @@ # define VC4_HDMI_CEC_CNT_TO_3500_US_MASK VC4_MASK(7, 0) # define VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT 0 -#define VC4_HDMI_CEC_CNTRL_5 0x0f8 # define VC4_HDMI_CEC_TX_SW_RESET BIT(27) # define VC4_HDMI_CEC_RX_SW_RESET BIT(26) # define VC4_HDMI_CEC_PAD_SW_RESET BIT(25) @@ -672,39 +663,11 @@ # define VC4_HDMI_CEC_CNT_TO_4500_US_MASK VC4_MASK(7, 0) # define VC4_HDMI_CEC_CNT_TO_4500_US_SHIFT 0 -/* Transmit data, first byte is low byte of the 32-bit reg. MSB of - * each byte transmitted first. - */ -#define VC4_HDMI_CEC_TX_DATA_1 0x0fc -#define VC4_HDMI_CEC_TX_DATA_2 0x100 -#define VC4_HDMI_CEC_TX_DATA_3 0x104 -#define VC4_HDMI_CEC_TX_DATA_4 0x108 -#define VC4_HDMI_CEC_RX_DATA_1 0x10c -#define VC4_HDMI_CEC_RX_DATA_2 0x110 -#define VC4_HDMI_CEC_RX_DATA_3 0x114 -#define VC4_HDMI_CEC_RX_DATA_4 0x118 - -#define VC4_HDMI_TX_PHY_RESET_CTL 0x2c0 - -#define VC4_HDMI_TX_PHY_CTL0 0x2c4 # define VC4_HDMI_TX_PHY_RNG_PWRDN BIT(25) -/* Interrupt status bits */ -#define VC4_HDMI_CPU_STATUS 0x340 -#define VC4_HDMI_CPU_SET 0x344 -#define VC4_HDMI_CPU_CLEAR 0x348 # define VC4_HDMI_CPU_CEC BIT(6) # define VC4_HDMI_CPU_HOTPLUG BIT(0) -#define VC4_HDMI_CPU_MASK_STATUS 0x34c -#define VC4_HDMI_CPU_MASK_SET 0x350 -#define VC4_HDMI_CPU_MASK_CLEAR 0x354 - -#define VC4_HDMI_GCP(x) (0x400 + ((x) * 0x4)) -#define VC4_HDMI_RAM_PACKET(x) (0x400 + ((x) * 0x24)) -#define VC4_HDMI_PACKET_STRIDE 0x24 - -#define VC4_HD_M_CTL 0x00c /* Debug: Current receive value on the CEC pad. */ # define VC4_HD_CECRXD BIT(9) /* Debug: Override CEC output to 0. */ @@ -714,7 +677,6 @@ # define VC4_HD_M_SW_RST BIT(2) # define VC4_HD_M_ENABLE BIT(0) -#define VC4_HD_MAI_CTL 0x014 /* Set when audio stream is received at a slower rate than the * sampling period, so MAI fifo goes empty. Write 1 to clear. */ @@ -739,7 +701,6 @@ /* Single-shot reset bit. Read value is undefined. */ # define VC4_HD_MAI_CTL_RESET BIT(0) -#define VC4_HD_MAI_THR 0x018 # define VC4_HD_MAI_THR_PANICHIGH_MASK VC4_MASK(29, 24) # define VC4_HD_MAI_THR_PANICHIGH_SHIFT 24 # define VC4_HD_MAI_THR_PANICLOW_MASK VC4_MASK(21, 16) @@ -749,31 +710,23 @@ # define VC4_HD_MAI_THR_DREQLOW_MASK VC4_MASK(5, 0) # define VC4_HD_MAI_THR_DREQLOW_SHIFT 0 -/* Format header to be placed on the MAI data. Unused. */ -#define VC4_HD_MAI_FMT 0x01c - -/* Register for DMAing in audio data to be transported over the MAI - * bus to the Falcon core. - */ -#define VC4_HD_MAI_DATA 0x020 - /* Divider from HDMI HSM clock to MAI serial clock. Sampling period * converges to N / (M + 1) cycles. */ -#define VC4_HD_MAI_SMP 0x02c # define VC4_HD_MAI_SMP_N_MASK VC4_MASK(31, 8) # define VC4_HD_MAI_SMP_N_SHIFT 8 # define VC4_HD_MAI_SMP_M_MASK VC4_MASK(7, 0) # define VC4_HD_MAI_SMP_M_SHIFT 0 -#define VC4_HD_VID_CTL 0x038 # define VC4_HD_VID_CTL_ENABLE BIT(31) # define VC4_HD_VID_CTL_UNDERFLOW_ENABLE BIT(30) # define VC4_HD_VID_CTL_FRAME_COUNTER_RESET BIT(29) # define VC4_HD_VID_CTL_VSYNC_LOW BIT(28) # define VC4_HD_VID_CTL_HSYNC_LOW BIT(27) +# define VC4_HD_VID_CTL_CLRSYNC BIT(24) +# define VC4_HD_VID_CTL_CLRRGB BIT(23) +# define VC4_HD_VID_CTL_BLANKPIX BIT(18) -#define VC4_HD_CSC_CTL 0x040 # define VC4_HD_CSC_CTL_ORDER_MASK VC4_MASK(7, 5) # define VC4_HD_CSC_CTL_ORDER_SHIFT 5 # define VC4_HD_CSC_CTL_ORDER_RGB 0 @@ -791,14 +744,7 @@ # define VC4_HD_CSC_CTL_RGB2YCC BIT(1) # define VC4_HD_CSC_CTL_ENABLE BIT(0) -#define VC4_HD_CSC_12_11 0x044 -#define VC4_HD_CSC_14_13 0x048 -#define VC4_HD_CSC_22_21 0x04c -#define VC4_HD_CSC_24_23 0x050 -#define VC4_HD_CSC_32_31 0x054 -#define VC4_HD_CSC_34_33 0x058 - -#define VC4_HD_FRAME_COUNT 0x068 +# define VC4_DVP_HT_CLOCK_STOP_PIXEL BIT(1) /* HVS display list information. */ #define HVS_BOOTLOADER_DLIST_END 32 @@ -825,6 +771,8 @@ enum hvs_pixel_format { HVS_PIXEL_FORMAT_PALETTE = 13, HVS_PIXEL_FORMAT_YUV444_RGB = 14, HVS_PIXEL_FORMAT_AYUV444_RGB = 15, + HVS_PIXEL_FORMAT_RGBA1010102 = 16, + HVS_PIXEL_FORMAT_YCBCR_10BIT = 17, }; /* Note: the LSB is the rightmost character shown. Only valid for @@ -879,6 +827,10 @@ enum hvs_pixel_format { #define SCALER_CTL0_RGBA_EXPAND_MSB 2 #define SCALER_CTL0_RGBA_EXPAND_ROUND 3 +#define SCALER5_CTL0_ALPHA_EXPAND BIT(12) + +#define SCALER5_CTL0_RGB_EXPAND BIT(11) + #define SCALER_CTL0_SCL1_MASK VC4_MASK(10, 8) #define SCALER_CTL0_SCL1_SHIFT 8 @@ -896,10 +848,13 @@ enum hvs_pixel_format { /* Set to indicate no scaling. */ #define SCALER_CTL0_UNITY BIT(4) +#define SCALER5_CTL0_UNITY BIT(15) #define SCALER_CTL0_PIXEL_FORMAT_MASK VC4_MASK(3, 0) #define SCALER_CTL0_PIXEL_FORMAT_SHIFT 0 +#define SCALER5_CTL0_PIXEL_FORMAT_MASK VC4_MASK(4, 0) + #define SCALER_POS0_FIXED_ALPHA_MASK VC4_MASK(31, 24) #define SCALER_POS0_FIXED_ALPHA_SHIFT 24 @@ -909,12 +864,48 @@ enum hvs_pixel_format { #define SCALER_POS0_START_X_MASK VC4_MASK(11, 0) #define SCALER_POS0_START_X_SHIFT 0 +#define SCALER5_POS0_START_Y_MASK VC4_MASK(27, 16) +#define SCALER5_POS0_START_Y_SHIFT 16 + +#define SCALER5_POS0_START_X_MASK VC4_MASK(13, 0) +#define SCALER5_POS0_START_X_SHIFT 0 + +#define SCALER5_POS0_VFLIP BIT(31) +#define SCALER5_POS0_HFLIP BIT(15) + +#define SCALER5_CTL2_ALPHA_MODE_MASK VC4_MASK(31, 30) +#define SCALER5_CTL2_ALPHA_MODE_SHIFT 30 +#define SCALER5_CTL2_ALPHA_MODE_PIPELINE 0 +#define SCALER5_CTL2_ALPHA_MODE_FIXED 1 +#define SCALER5_CTL2_ALPHA_MODE_FIXED_NONZERO 2 +#define SCALER5_CTL2_ALPHA_MODE_FIXED_OVER_0x07 3 + +#define SCALER5_CTL2_ALPHA_PREMULT BIT(29) + +#define SCALER5_CTL2_ALPHA_MIX BIT(28) + +#define SCALER5_CTL2_ALPHA_LOC BIT(25) + +#define SCALER5_CTL2_MAP_SEL_MASK VC4_MASK(18, 17) +#define SCALER5_CTL2_MAP_SEL_SHIFT 17 + +#define SCALER5_CTL2_GAMMA BIT(16) + +#define SCALER5_CTL2_ALPHA_MASK VC4_MASK(15, 4) +#define SCALER5_CTL2_ALPHA_SHIFT 4 + #define SCALER_POS1_SCL_HEIGHT_MASK VC4_MASK(27, 16) #define SCALER_POS1_SCL_HEIGHT_SHIFT 16 #define SCALER_POS1_SCL_WIDTH_MASK VC4_MASK(11, 0) #define SCALER_POS1_SCL_WIDTH_SHIFT 0 +#define SCALER5_POS1_SCL_HEIGHT_MASK VC4_MASK(28, 16) +#define SCALER5_POS1_SCL_HEIGHT_SHIFT 16 + +#define SCALER5_POS1_SCL_WIDTH_MASK VC4_MASK(12, 0) +#define SCALER5_POS1_SCL_WIDTH_SHIFT 0 + #define SCALER_POS2_ALPHA_MODE_MASK VC4_MASK(31, 30) #define SCALER_POS2_ALPHA_MODE_SHIFT 30 #define SCALER_POS2_ALPHA_MODE_PIPELINE 0 @@ -930,6 +921,12 @@ enum hvs_pixel_format { #define SCALER_POS2_WIDTH_MASK VC4_MASK(11, 0) #define SCALER_POS2_WIDTH_SHIFT 0 +#define SCALER5_POS2_HEIGHT_MASK VC4_MASK(28, 16) +#define SCALER5_POS2_HEIGHT_SHIFT 16 + +#define SCALER5_POS2_WIDTH_MASK VC4_MASK(12, 0) +#define SCALER5_POS2_WIDTH_SHIFT 0 + /* Color Space Conversion words. Some values are S2.8 signed * integers, except that the 2 integer bits map as {0x0: 0, 0x1: 1, * 0x2: 2, 0x3: -1} diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c index a7c3af0005a0..849dcafbfff1 100644 --- a/drivers/gpu/drm/vc4/vc4_txp.c +++ b/drivers/gpu/drm/vc4/vc4_txp.c @@ -436,7 +436,6 @@ static const struct drm_crtc_helper_funcs vc4_txp_crtc_helper_funcs = { .atomic_flush = vc4_hvs_atomic_flush, .atomic_enable = vc4_txp_atomic_enable, .atomic_disable = vc4_txp_atomic_disable, - .mode_set_nofb = vc4_hvs_mode_set_nofb, }; static irqreturn_t vc4_txp_interrupt(int irq, void *data) @@ -452,7 +451,8 @@ static irqreturn_t vc4_txp_interrupt(int irq, void *data) } static const struct vc4_crtc_data vc4_txp_crtc_data = { - .hvs_channel = 2, + .hvs_available_channels = BIT(2), + .hvs_output = 2, }; static int vc4_txp_bind(struct device *dev, struct device *master, void *data) diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 313339bbff90..cb884c890065 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -321,7 +321,7 @@ static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj) { struct drm_vgem_gem_object *bo = to_vgem_bo(obj); - return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT); + return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT); } static struct drm_gem_object* vgem_prime_import(struct drm_device *dev, @@ -401,16 +401,8 @@ static int vgem_prime_mmap(struct drm_gem_object *obj, return 0; } -static void vgem_release(struct drm_device *dev) -{ - struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm); - - platform_device_unregister(vgem->platform); -} - static struct drm_driver vgem_driver = { .driver_features = DRIVER_GEM | DRIVER_RENDER, - .release = vgem_release, .open = vgem_open, .postclose = vgem_postclose, .gem_free_object_unlocked = vgem_gem_free_object, @@ -442,48 +434,49 @@ static struct drm_driver vgem_driver = { static int __init vgem_init(void) { int ret; + struct platform_device *pdev; - vgem_device = kzalloc(sizeof(*vgem_device), GFP_KERNEL); - if (!vgem_device) - return -ENOMEM; + pdev = platform_device_register_simple("vgem", -1, NULL, 0); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); - vgem_device->platform = - platform_device_register_simple("vgem", -1, NULL, 0); - if (IS_ERR(vgem_device->platform)) { - ret = PTR_ERR(vgem_device->platform); - goto out_free; + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) { + ret = -ENOMEM; + goto out_unregister; } - dma_coerce_mask_and_coherent(&vgem_device->platform->dev, + dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); - ret = drm_dev_init(&vgem_device->drm, &vgem_driver, - &vgem_device->platform->dev); - if (ret) - goto out_unregister; - drmm_add_final_kfree(&vgem_device->drm, vgem_device); + + vgem_device = devm_drm_dev_alloc(&pdev->dev, &vgem_driver, + struct vgem_device, drm); + if (IS_ERR(vgem_device)) { + ret = PTR_ERR(vgem_device); + goto out_devres; + } + vgem_device->platform = pdev; /* Final step: expose the device/driver to userspace */ ret = drm_dev_register(&vgem_device->drm, 0); if (ret) - goto out_put; + goto out_devres; return 0; -out_put: - drm_dev_put(&vgem_device->drm); - platform_device_unregister(vgem_device->platform); - return ret; +out_devres: + devres_release_group(&pdev->dev, NULL); out_unregister: - platform_device_unregister(vgem_device->platform); -out_free: - kfree(vgem_device); + platform_device_unregister(pdev); return ret; } static void __exit vgem_exit(void) { + struct platform_device *pdev = vgem_device->platform; + drm_dev_unregister(&vgem_device->drm); - drm_dev_put(&vgem_device->drm); + devres_release_group(&pdev->dev, NULL); + platform_device_unregister(pdev); } module_init(vgem_init); diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c index 3221520f61f0..d5b0c543bd6d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c @@ -48,6 +48,7 @@ static int virtio_gpu_features(struct seq_file *m, void *data) virtio_add_bool(m, "virgl", vgdev->has_virgl_3d); virtio_add_bool(m, "edid", vgdev->has_edid); virtio_add_bool(m, "indirect", vgdev->has_indirect); + virtio_add_bool(m, "resource uuid", vgdev->has_resource_assign_uuid); virtio_add_int(m, "cap sets", vgdev->num_capsets); virtio_add_int(m, "scanouts", vgdev->num_scanouts); return 0; diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index effea07abe62..f84b7e61311b 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -325,11 +325,14 @@ static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) +int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) { - int i; + int i, ret; + + ret = drmm_mode_config_init(vgdev->ddev); + if (ret) + return ret; - drm_mode_config_init(vgdev->ddev); vgdev->ddev->mode_config.quirk_addfb_prefer_host_byte_order = true; vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs; @@ -343,6 +346,7 @@ void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) vgdev_output_init(vgdev, i); drm_mode_config_reset(vgdev->ddev); + return 0; } void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev) @@ -351,5 +355,4 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev) for (i = 0 ; i < vgdev->num_scanouts; ++i) kfree(vgdev->outputs[i].edid); - drm_mode_config_cleanup(vgdev->ddev); } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index a52b7a39f286..55c34b4fc3e9 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -352,7 +352,7 @@ virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_array *objs); /* virtgpu_display.c */ -void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); +int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev); /* virtgpu_plane.c */ diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 75d0dc2f6d28..eed57a931309 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -80,8 +80,10 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev, vgdev->capsets[i].id > 0, 5 * HZ); if (ret == 0) { DRM_ERROR("timed out waiting for cap set %d\n", i); + spin_lock(&vgdev->display_info_lock); kfree(vgdev->capsets); vgdev->capsets = NULL; + spin_unlock(&vgdev->display_info_lock); return; } DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n", @@ -103,7 +105,7 @@ int virtio_gpu_init(struct drm_device *dev) /* this will expand later */ struct virtqueue *vqs[2]; u32 num_scanouts, num_capsets; - int ret; + int ret = 0; if (!virtio_has_feature(dev_to_virtio(dev->dev), VIRTIO_F_VERSION_1)) return -ENODEV; @@ -184,7 +186,11 @@ int virtio_gpu_init(struct drm_device *dev) num_capsets, &num_capsets); DRM_INFO("number of cap sets: %d\n", num_capsets); - virtio_gpu_modeset_init(vgdev); + ret = virtio_gpu_modeset_init(vgdev); + if (ret) { + DRM_ERROR("modeset init failed\n"); + goto err_scanouts; + } virtio_device_ready(vgdev->vdev); diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 842f8b61aa89..00d6b95e259d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -72,9 +72,8 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) if (shmem->pages) { if (shmem->mapped) { - dma_unmap_sg(vgdev->vdev->dev.parent, - shmem->pages->sgl, shmem->mapped, - DMA_TO_DEVICE); + dma_unmap_sgtable(vgdev->vdev->dev.parent, + shmem->pages, DMA_TO_DEVICE, 0); shmem->mapped = 0; } @@ -164,13 +163,13 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, } if (use_dma_api) { - shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent, - shmem->pages->sgl, - shmem->pages->nents, - DMA_TO_DEVICE); - *nents = shmem->mapped; + ret = dma_map_sgtable(vgdev->vdev->dev.parent, + shmem->pages, DMA_TO_DEVICE, 0); + if (ret) + return ret; + *nents = shmem->mapped = shmem->pages->nents; } else { - *nents = shmem->pages->nents; + *nents = shmem->pages->orig_nents; } *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry), @@ -180,13 +179,20 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, return -ENOMEM; } - for_each_sg(shmem->pages->sgl, sg, *nents, si) { - (*ents)[si].addr = cpu_to_le64(use_dma_api - ? sg_dma_address(sg) - : sg_phys(sg)); - (*ents)[si].length = cpu_to_le32(sg->length); - (*ents)[si].padding = 0; + if (use_dma_api) { + for_each_sgtable_dma_sg(shmem->pages, sg, si) { + (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg)); + (*ents)[si].length = cpu_to_le32(sg_dma_len(sg)); + (*ents)[si].padding = 0; + } + } else { + for_each_sgtable_sg(shmem->pages, sg, si) { + (*ents)[si].addr = cpu_to_le64(sg_phys(sg)); + (*ents)[si].length = cpu_to_le32(sg->length); + (*ents)[si].padding = 0; + } } + return 0; } diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index c93c2db35aaf..07945ca238e2 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -302,7 +302,7 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents) return NULL; } - for_each_sg(sgt->sgl, sg, *sg_ents, i) { + for_each_sgtable_sg(sgt, sg, i) { pg = vmalloc_to_page(data); if (!pg) { sg_free_table(sgt); @@ -320,13 +320,13 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents) return sgt; } -static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf, - struct virtio_gpu_fence *fence, - int elemcnt, - struct scatterlist **sgs, - int outcnt, - int incnt) +static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + struct virtio_gpu_fence *fence, + int elemcnt, + struct scatterlist **sgs, + int outcnt, + int incnt) { struct virtqueue *vq = vgdev->ctrlq.vq; int ret, idx; @@ -335,7 +335,7 @@ static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, if (fence && vbuf->objs) virtio_gpu_array_unlock_resv(vbuf->objs); free_vbuf(vgdev, vbuf); - return; + return -1; } if (vgdev->has_indirect) @@ -373,15 +373,16 @@ again: spin_unlock(&vgdev->ctrlq.qlock); drm_dev_exit(idx); + return 0; } -static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf, - struct virtio_gpu_fence *fence) +static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + struct virtio_gpu_fence *fence) { struct scatterlist *sgs[3], vcmd, vout, vresp; struct sg_table *sgt = NULL; - int elemcnt = 0, outcnt = 0, incnt = 0; + int elemcnt = 0, outcnt = 0, incnt = 0, ret; /* set up vcmd */ sg_init_one(&vcmd, vbuf->buf, vbuf->size); @@ -398,7 +399,7 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, if (!sgt) { if (fence && vbuf->objs) virtio_gpu_array_unlock_resv(vbuf->objs); - return; + return -1; } elemcnt += sg_ents; @@ -419,13 +420,14 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, incnt++; } - virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt, - incnt); + ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt, + incnt); if (sgt) { sg_free_table(sgt); kfree(sgt); } + return ret; } void virtio_gpu_notify(struct virtio_gpu_device *vgdev) @@ -444,10 +446,10 @@ void virtio_gpu_notify(struct virtio_gpu_device *vgdev) virtqueue_notify(vgdev->ctrlq.vq); } -static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf) +static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) { - virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL); + return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL); } static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, @@ -534,6 +536,7 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, { struct virtio_gpu_resource_unref *cmd_p; struct virtio_gpu_vbuffer *vbuf; + int ret; cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p), virtio_gpu_cmd_unref_cb); @@ -543,7 +546,9 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); vbuf->resp_cb_data = bo; - virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + if (ret < 0) + virtio_gpu_cleanup_object(bo); } void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, @@ -603,9 +608,8 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); if (use_dma_api) - dma_sync_sg_for_device(vgdev->vdev->dev.parent, - shmem->pages->sgl, shmem->pages->nents, - DMA_TO_DEVICE); + dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, + shmem->pages, DMA_TO_DEVICE); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); @@ -684,9 +688,13 @@ static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev, int i = le32_to_cpu(cmd->capset_index); spin_lock(&vgdev->display_info_lock); - vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); - vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); - vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); + if (vgdev->capsets) { + vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); + vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); + vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); + } else { + DRM_ERROR("invalid capset memory."); + } spin_unlock(&vgdev->display_info_lock); wake_up(&vgdev->resp_wq); } @@ -1019,9 +1027,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); if (use_dma_api) - dma_sync_sg_for_device(vgdev->vdev->dev.parent, - shmem->pages->sgl, shmem->pages->nents, - DMA_TO_DEVICE); + dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, + shmem->pages, DMA_TO_DEVICE); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile index 0b767d7efa24..333d3cead0e3 100644 --- a/drivers/gpu/drm/vkms/Makefile +++ b/drivers/gpu/drm/vkms/Makefile @@ -1,4 +1,11 @@ # SPDX-License-Identifier: GPL-2.0-only -vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o vkms_composer.o +vkms-y := \ + vkms_drv.o \ + vkms_plane.o \ + vkms_output.o \ + vkms_crtc.o \ + vkms_gem.o \ + vkms_composer.o \ + vkms_writeback.o obj-$(CONFIG_DRM_VKMS) += vkms.o diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c index eaecc5a6c5db..33c031f27c2c 100644 --- a/drivers/gpu/drm/vkms/vkms_composer.c +++ b/drivers/gpu/drm/vkms/vkms_composer.c @@ -9,31 +9,41 @@ #include "vkms_drv.h" +static u32 get_pixel_from_buffer(int x, int y, const u8 *buffer, + const struct vkms_composer *composer) +{ + u32 pixel; + int src_offset = composer->offset + (y * composer->pitch) + + (x * composer->cpp); + + pixel = *(u32 *)&buffer[src_offset]; + + return pixel; +} + /** * compute_crc - Compute CRC value on output frame * - * @vaddr_out: address to final framebuffer + * @vaddr: address to final framebuffer * @composer: framebuffer's metadata * * returns CRC value computed using crc32 on the visible portion of * the final framebuffer at vaddr_out */ -static uint32_t compute_crc(void *vaddr_out, struct vkms_composer *composer) +static uint32_t compute_crc(const u8 *vaddr, + const struct vkms_composer *composer) { - int i, j, src_offset; + int x, y; + u32 crc = 0, pixel = 0; int x_src = composer->src.x1 >> 16; int y_src = composer->src.y1 >> 16; int h_src = drm_rect_height(&composer->src) >> 16; int w_src = drm_rect_width(&composer->src) >> 16; - u32 crc = 0; - - for (i = y_src; i < y_src + h_src; ++i) { - for (j = x_src; j < x_src + w_src; ++j) { - src_offset = composer->offset - + (i * composer->pitch) - + (j * composer->cpp); - crc = crc32_le(crc, vaddr_out + src_offset, - sizeof(u32)); + + for (y = y_src; y < y_src + h_src; ++y) { + for (x = x_src; x < x_src + w_src; ++x) { + pixel = get_pixel_from_buffer(x, y, vaddr, composer); + crc = crc32_le(crc, (void *)&pixel, sizeof(u32)); } } @@ -131,35 +141,31 @@ static void compose_cursor(struct vkms_composer *cursor_composer, primary_composer, cursor_composer); } -static uint32_t _vkms_get_crc(struct vkms_composer *primary_composer, - struct vkms_composer *cursor_composer) +static int compose_planes(void **vaddr_out, + struct vkms_composer *primary_composer, + struct vkms_composer *cursor_composer) { struct drm_framebuffer *fb = &primary_composer->fb; struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0); struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj); - void *vaddr_out = kzalloc(vkms_obj->gem.size, GFP_KERNEL); - u32 crc = 0; - if (!vaddr_out) { - DRM_ERROR("Failed to allocate memory for output frame."); - return 0; + if (!*vaddr_out) { + *vaddr_out = kzalloc(vkms_obj->gem.size, GFP_KERNEL); + if (!*vaddr_out) { + DRM_ERROR("Cannot allocate memory for output frame."); + return -ENOMEM; + } } - if (WARN_ON(!vkms_obj->vaddr)) { - kfree(vaddr_out); - return crc; - } + if (WARN_ON(!vkms_obj->vaddr)) + return -EINVAL; - memcpy(vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size); + memcpy(*vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size); if (cursor_composer) - compose_cursor(cursor_composer, primary_composer, vaddr_out); - - crc = compute_crc(vaddr_out, primary_composer); - - kfree(vaddr_out); + compose_cursor(cursor_composer, primary_composer, *vaddr_out); - return crc; + return 0; } /** @@ -180,14 +186,17 @@ void vkms_composer_worker(struct work_struct *work) struct vkms_output *out = drm_crtc_to_vkms_output(crtc); struct vkms_composer *primary_composer = NULL; struct vkms_composer *cursor_composer = NULL; + bool crc_pending, wb_pending; + void *vaddr_out = NULL; u32 crc32 = 0; u64 frame_start, frame_end; - bool crc_pending; + int ret; spin_lock_irq(&out->composer_lock); frame_start = crtc_state->frame_start; frame_end = crtc_state->frame_end; crc_pending = crtc_state->crc_pending; + wb_pending = crtc_state->wb_pending; crtc_state->frame_start = 0; crtc_state->frame_end = 0; crtc_state->crc_pending = false; @@ -206,8 +215,29 @@ void vkms_composer_worker(struct work_struct *work) if (crtc_state->num_active_planes == 2) cursor_composer = crtc_state->active_planes[1]->composer; - if (primary_composer) - crc32 = _vkms_get_crc(primary_composer, cursor_composer); + if (!primary_composer) + return; + + if (wb_pending) + vaddr_out = crtc_state->active_writeback; + + ret = compose_planes(&vaddr_out, primary_composer, cursor_composer); + if (ret) { + if (ret == -EINVAL && !wb_pending) + kfree(vaddr_out); + return; + } + + crc32 = compute_crc(vaddr_out, primary_composer); + + if (wb_pending) { + drm_writeback_signal_completion(&out->wb_connector, 0); + spin_lock_irq(&out->composer_lock); + crtc_state->wb_pending = false; + spin_unlock_irq(&out->composer_lock); + } else { + kfree(vaddr_out); + } /* * The worker can fall behind the vblank hrtimer, make sure we catch up. @@ -256,7 +286,7 @@ int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name, return 0; } -static void vkms_set_composer(struct vkms_output *out, bool enabled) +void vkms_set_composer(struct vkms_output *out, bool enabled) { bool old_enabled; diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 83dd5567de8b..cb0b6230c22c 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -61,9 +61,6 @@ static void vkms_release(struct drm_device *dev) { struct vkms_device *vkms = container_of(dev, struct vkms_device, drm); - platform_device_unregister(vkms->platform); - drm_atomic_helper_shutdown(&vkms->drm); - drm_mode_config_cleanup(&vkms->drm); destroy_workqueue(vkms->output.composer_workq); } @@ -144,30 +141,31 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev) static int __init vkms_init(void) { int ret; + struct platform_device *pdev; - vkms_device = kzalloc(sizeof(*vkms_device), GFP_KERNEL); - if (!vkms_device) - return -ENOMEM; + pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); - vkms_device->platform = - platform_device_register_simple(DRIVER_NAME, -1, NULL, 0); - if (IS_ERR(vkms_device->platform)) { - ret = PTR_ERR(vkms_device->platform); - goto out_free; + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) { + ret = -ENOMEM; + goto out_unregister; } - ret = drm_dev_init(&vkms_device->drm, &vkms_driver, - &vkms_device->platform->dev); - if (ret) - goto out_unregister; - drmm_add_final_kfree(&vkms_device->drm, vkms_device); + vkms_device = devm_drm_dev_alloc(&pdev->dev, &vkms_driver, + struct vkms_device, drm); + if (IS_ERR(vkms_device)) { + ret = PTR_ERR(vkms_device); + goto out_devres; + } + vkms_device->platform = pdev; ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev, DMA_BIT_MASK(64)); if (ret) { DRM_ERROR("Could not initialize DMA support\n"); - goto out_put; + goto out_devres; } vkms_device->drm.irq_enabled = true; @@ -175,39 +173,41 @@ static int __init vkms_init(void) ret = drm_vblank_init(&vkms_device->drm, 1); if (ret) { DRM_ERROR("Failed to vblank\n"); - goto out_put; + goto out_devres; } ret = vkms_modeset_init(vkms_device); if (ret) - goto out_put; + goto out_devres; ret = drm_dev_register(&vkms_device->drm, 0); if (ret) - goto out_put; + goto out_devres; return 0; -out_put: - drm_dev_put(&vkms_device->drm); - platform_device_unregister(vkms_device->platform); - return ret; +out_devres: + devres_release_group(&pdev->dev, NULL); out_unregister: - platform_device_unregister(vkms_device->platform); -out_free: - kfree(vkms_device); + platform_device_unregister(pdev); return ret; } static void __exit vkms_exit(void) { + struct platform_device *pdev; + if (!vkms_device) { DRM_INFO("vkms_device is NULL.\n"); return; } + pdev = vkms_device->platform; + drm_dev_unregister(&vkms_device->drm); - drm_dev_put(&vkms_device->drm); + drm_atomic_helper_shutdown(&vkms_device->drm); + devres_release_group(&pdev->dev, NULL); + platform_device_unregister(pdev); } module_init(vkms_init); diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index f4036bb0b9a8..380a8f27e156 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -8,6 +8,7 @@ #include <drm/drm.h> #include <drm/drm_gem.h> #include <drm/drm_encoder.h> +#include <drm/drm_writeback.h> #define XRES_MIN 20 #define YRES_MIN 20 @@ -52,9 +53,11 @@ struct vkms_crtc_state { int num_active_planes; /* stack of active planes for crc computation, should be in z order */ struct vkms_plane_state **active_planes; + void *active_writeback; - /* below three are protected by vkms_output.composer_lock */ + /* below four are protected by vkms_output.composer_lock */ bool crc_pending; + bool wb_pending; u64 frame_start; u64 frame_end; }; @@ -63,6 +66,7 @@ struct vkms_output { struct drm_crtc crtc; struct drm_encoder encoder; struct drm_connector connector; + struct drm_writeback_connector wb_connector; struct hrtimer vblank_hrtimer; ktime_t period_ns; struct drm_pending_vblank_event *event; @@ -143,5 +147,9 @@ int vkms_verify_crc_source(struct drm_crtc *crtc, const char *source_name, /* Composer Support */ void vkms_composer_worker(struct work_struct *work); +void vkms_set_composer(struct vkms_output *out, bool enabled); + +/* Writeback */ +int vkms_enable_writeback_connector(struct vkms_device *vkmsdev); #endif /* _VKMS_DRV_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 85afb77e97f0..4a1848b0318f 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -80,6 +80,10 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index) goto err_attach; } + ret = vkms_enable_writeback_connector(vkmsdev); + if (ret) + DRM_ERROR("Failed to init writeback connector\n"); + drm_mode_config_reset(dev); return 0; diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c new file mode 100644 index 000000000000..094fa4aa061d --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_writeback.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "vkms_drv.h" +#include <drm/drm_fourcc.h> +#include <drm/drm_writeback.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> + +static const u32 vkms_wb_formats[] = { + DRM_FORMAT_XRGB8888, +}; + +static const struct drm_connector_funcs vkms_wb_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int vkms_wb_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct drm_framebuffer *fb; + const struct drm_display_mode *mode = &crtc_state->mode; + + if (!conn_state->writeback_job || !conn_state->writeback_job->fb) + return 0; + + fb = conn_state->writeback_job->fb; + if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) { + DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n", + fb->width, fb->height); + return -EINVAL; + } + + if (fb->format->format != vkms_wb_formats[0]) { + struct drm_format_name_buf format_name; + + DRM_DEBUG_KMS("Invalid pixel format %s\n", + drm_get_format_name(fb->format->format, + &format_name)); + return -EINVAL; + } + + return 0; +} + +static const struct drm_encoder_helper_funcs vkms_wb_encoder_helper_funcs = { + .atomic_check = vkms_wb_encoder_atomic_check, +}; + +static int vkms_wb_connector_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + + return drm_add_modes_noedid(connector, dev->mode_config.max_width, + dev->mode_config.max_height); +} + +static int vkms_wb_prepare_job(struct drm_writeback_connector *wb_connector, + struct drm_writeback_job *job) +{ + struct vkms_gem_object *vkms_obj; + struct drm_gem_object *gem_obj; + int ret; + + if (!job->fb) + return 0; + + gem_obj = drm_gem_fb_get_obj(job->fb, 0); + ret = vkms_gem_vmap(gem_obj); + if (ret) { + DRM_ERROR("vmap failed: %d\n", ret); + return ret; + } + + vkms_obj = drm_gem_to_vkms_gem(gem_obj); + job->priv = vkms_obj->vaddr; + + return 0; +} + +static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector, + struct drm_writeback_job *job) +{ + struct drm_gem_object *gem_obj; + struct vkms_device *vkmsdev; + + if (!job->fb) + return; + + gem_obj = drm_gem_fb_get_obj(job->fb, 0); + vkms_gem_vunmap(gem_obj); + + vkmsdev = drm_device_to_vkms_device(gem_obj->dev); + vkms_set_composer(&vkmsdev->output, false); +} + +static void vkms_wb_atomic_commit(struct drm_connector *conn, + struct drm_connector_state *state) +{ + struct vkms_device *vkmsdev = drm_device_to_vkms_device(conn->dev); + struct vkms_output *output = &vkmsdev->output; + struct drm_writeback_connector *wb_conn = &output->wb_connector; + struct drm_connector_state *conn_state = wb_conn->base.state; + struct vkms_crtc_state *crtc_state = output->composer_state; + + if (!conn_state) + return; + + vkms_set_composer(&vkmsdev->output, true); + + spin_lock_irq(&output->composer_lock); + crtc_state->active_writeback = conn_state->writeback_job->priv; + crtc_state->wb_pending = true; + spin_unlock_irq(&output->composer_lock); + drm_writeback_queue_job(wb_conn, state); +} + +static const struct drm_connector_helper_funcs vkms_wb_conn_helper_funcs = { + .get_modes = vkms_wb_connector_get_modes, + .prepare_writeback_job = vkms_wb_prepare_job, + .cleanup_writeback_job = vkms_wb_cleanup_job, + .atomic_commit = vkms_wb_atomic_commit, +}; + +int vkms_enable_writeback_connector(struct vkms_device *vkmsdev) +{ + struct drm_writeback_connector *wb = &vkmsdev->output.wb_connector; + + vkmsdev->output.wb_connector.encoder.possible_crtcs = 1; + drm_connector_helper_add(&wb->base, &vkms_wb_conn_helper_funcs); + + return drm_writeback_connector_init(&vkmsdev->drm, wb, + &vkms_wb_connector_funcs, + &vkms_wb_encoder_helper_funcs, + vkms_wb_formats, + ARRAY_SIZE(vkms_wb_formats)); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c index 1629427d5734..e8d66182cd7b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c @@ -464,14 +464,14 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT)) dma_resv_assert_held(src->base.resv); - if (dst->ttm->state == tt_unpopulated) { - ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx); + if (!ttm_tt_is_populated(dst->ttm)) { + ret = dst->bdev->driver->ttm_tt_populate(dst->bdev, dst->ttm, &ctx); if (ret) return ret; } - if (src->ttm->state == tt_unpopulated) { - ret = src->ttm->bdev->driver->ttm_tt_populate(src->ttm, &ctx); + if (!ttm_tt_is_populated(src->ttm)) { + ret = src->bdev->driver->ttm_tt_populate(src->bdev, src->ttm, &ctx); if (ret) return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 3229451d0706..813f1b148094 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -354,10 +354,12 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin) pl.fpfn = 0; pl.lpfn = 0; - pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB - | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; + pl.mem_type = bo->mem.mem_type; + pl.flags = bo->mem.placement; if (pin) pl.flags |= TTM_PL_FLAG_NO_EVICT; + else + pl.flags &= ~TTM_PL_FLAG_NO_EVICT; memset(&placement, 0, sizeof(placement)); placement.num_placement = 1; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index e60012886065..31e3e5c9f362 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -626,9 +626,8 @@ static int vmw_vram_manager_init(struct vmw_private *dev_priv) #ifdef CONFIG_TRANSPARENT_HUGEPAGE ret = vmw_thp_init(dev_priv); #else - ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, - TTM_PL_FLAG_CACHED, TTM_PL_FLAG_CACHED, - false, dev_priv->vram_size >> PAGE_SHIFT); + ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false, + dev_priv->vram_size >> PAGE_SHIFT); #endif ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false); return ret; @@ -882,8 +881,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) DRM_ERROR("Failed initializing TTM buffer object driver.\n"); goto out_no_bdev; } - ttm_manager_type(&dev_priv->bdev, TTM_PL_SYSTEM)->available_caching = - TTM_PL_FLAG_CACHED; /* * Enable VRAM, but initially don't use it until SVGA is enabled and diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 871ad738dadb..1523b51a7284 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -82,9 +82,7 @@ VMWGFX_NUM_GB_SCREEN_TARGET) #define VMW_PL_GMR (TTM_PL_PRIV + 0) -#define VMW_PL_FLAG_GMR (TTM_PL_FLAG_PRIV << 0) #define VMW_PL_MOB (TTM_PL_PRIV + 1) -#define VMW_PL_FLAG_MOB (TTM_PL_FLAG_PRIV << 1) #define VMW_RES_CONTEXT ttm_driver_type0 #define VMW_RES_SURFACE ttm_driver_type1 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index bb76acb5b0fc..db64c3a90285 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -112,8 +112,6 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type) man = &gman->manager; man->func = &vmw_gmrid_manager_func; - man->available_caching = TTM_PL_FLAG_CACHED; - man->default_caching = TTM_PL_FLAG_CACHED; /* TODO: This is most likely not correct */ man->use_tt = true; ttm_resource_manager_init(man, 0); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c index 3c00a9e7cfcc..c158e672b762 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c @@ -26,6 +26,8 @@ static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man) return container_of(man, struct vmw_thp_manager, manager); } +static const struct ttm_resource_manager_func vmw_thp_func; + static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node, unsigned long align_pages, const struct ttm_place *place, @@ -123,25 +125,21 @@ static void vmw_thp_put_node(struct ttm_resource_manager *man, int vmw_thp_init(struct vmw_private *dev_priv) { - struct ttm_resource_manager *man; struct vmw_thp_manager *rman; rman = kzalloc(sizeof(*rman), GFP_KERNEL); if (!rman) return -ENOMEM; - man = &rman->manager; - man->available_caching = TTM_PL_FLAG_CACHED; - man->default_caching = TTM_PL_FLAG_CACHED; - - ttm_resource_manager_init(man, + ttm_resource_manager_init(&rman->manager, dev_priv->vram_size >> PAGE_SHIFT); - drm_mm_init(&rman->mm, 0, man->size); + rman->manager.func = &vmw_thp_func; + drm_mm_init(&rman->mm, 0, rman->manager.size); spin_lock_init(&rman->lock); ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager); - ttm_resource_manager_set_used(man, true); + ttm_resource_manager_set_used(&rman->manager, true); return 0; } @@ -176,7 +174,7 @@ static void vmw_thp_debug(struct ttm_resource_manager *man, spin_unlock(&rman->lock); } -const struct ttm_resource_manager_func vmw_thp_func = { +static const struct ttm_resource_manager_func vmw_thp_func = { .alloc = vmw_thp_get_node, .free = vmw_thp_put_node, .debug = vmw_thp_debug diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index c7f10b2c93d2..7f0310441da1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -33,49 +33,57 @@ static const struct ttm_place vram_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED + .mem_type = TTM_PL_VRAM, + .flags = TTM_PL_FLAG_CACHED }; static const struct ttm_place vram_ne_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT + .mem_type = TTM_PL_VRAM, + .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; static const struct ttm_place sys_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED + .mem_type = TTM_PL_SYSTEM, + .flags = TTM_PL_FLAG_CACHED }; static const struct ttm_place sys_ne_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT + .mem_type = TTM_PL_SYSTEM, + .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; static const struct ttm_place gmr_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_GMR, + .flags = TTM_PL_FLAG_CACHED }; static const struct ttm_place gmr_ne_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT + .mem_type = VMW_PL_GMR, + .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; static const struct ttm_place mob_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_MOB, + .flags = TTM_PL_FLAG_CACHED }; static const struct ttm_place mob_ne_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT + .mem_type = VMW_PL_MOB, + .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; struct ttm_placement vmw_vram_placement = { @@ -89,11 +97,13 @@ static const struct ttm_place vram_gmr_placement_flags[] = { { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED + .mem_type = TTM_PL_VRAM, + .flags = TTM_PL_FLAG_CACHED }, { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_GMR, + .flags = TTM_PL_FLAG_CACHED } }; @@ -101,11 +111,13 @@ static const struct ttm_place gmr_vram_placement_flags[] = { { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_GMR, + .flags = TTM_PL_FLAG_CACHED }, { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED + .mem_type = TTM_PL_VRAM, + .flags = TTM_PL_FLAG_CACHED } }; @@ -120,12 +132,14 @@ static const struct ttm_place vram_gmr_ne_placement_flags[] = { { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | + .mem_type = TTM_PL_VRAM, + .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }, { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | + .mem_type = VMW_PL_GMR, + .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT } }; @@ -169,19 +183,23 @@ static const struct ttm_place evictable_placement_flags[] = { { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED + .mem_type = TTM_PL_SYSTEM, + .flags = TTM_PL_FLAG_CACHED }, { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED + .mem_type = TTM_PL_VRAM, + .flags = TTM_PL_FLAG_CACHED }, { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_GMR, + .flags = TTM_PL_FLAG_CACHED }, { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_MOB, + .flags = TTM_PL_FLAG_CACHED } }; @@ -189,15 +207,18 @@ static const struct ttm_place nonfixed_placement_flags[] = { { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED + .mem_type = TTM_PL_SYSTEM, + .flags = TTM_PL_FLAG_CACHED }, { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_GMR, + .flags = TTM_PL_FLAG_CACHED }, { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_MOB, + .flags = TTM_PL_FLAG_CACHED } }; @@ -246,6 +267,7 @@ struct vmw_ttm_tt { struct vmw_sg_table vsgt; uint64_t sg_alloc_size; bool mapped; + bool bound; }; const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); @@ -362,8 +384,7 @@ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) { struct device *dev = vmw_tt->dev_priv->dev->dev; - dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents, - DMA_BIDIRECTIONAL); + dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents; } @@ -383,16 +404,8 @@ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) { struct device *dev = vmw_tt->dev_priv->dev->dev; - int ret; - - ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents, - DMA_BIDIRECTIONAL); - if (unlikely(ret == 0)) - return -ENOMEM; - - vmw_tt->sgt.nents = ret; - return 0; + return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); } /** @@ -449,10 +462,10 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) if (unlikely(ret != 0)) goto out_sg_alloc_fail; - if (vsgt->num_pages > vmw_tt->sgt.nents) { + if (vsgt->num_pages > vmw_tt->sgt.orig_nents) { uint64_t over_alloc = sgl_size * (vsgt->num_pages - - vmw_tt->sgt.nents); + vmw_tt->sgt.orig_nents); ttm_mem_global_free(glob, over_alloc); vmw_tt->sg_alloc_size -= over_alloc; @@ -539,11 +552,18 @@ const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) } -static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) +static int vmw_ttm_bind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, struct ttm_resource *bo_mem) { struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); - int ret; + int ret = 0; + + if (!bo_mem) + return -EINVAL; + + if (vmw_be->bound) + return 0; ret = vmw_ttm_map_dma(vmw_be); if (unlikely(ret != 0)) @@ -554,8 +574,9 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) switch (bo_mem->mem_type) { case VMW_PL_GMR: - return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, + ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, ttm->num_pages, vmw_be->gmr_id); + break; case VMW_PL_MOB: if (unlikely(vmw_be->mob == NULL)) { vmw_be->mob = @@ -564,20 +585,26 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) return -ENOMEM; } - return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, + ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, &vmw_be->vsgt, ttm->num_pages, vmw_be->gmr_id); + break; default: BUG(); } - return 0; + vmw_be->bound = true; + return ret; } -static void vmw_ttm_unbind(struct ttm_tt *ttm) +static void vmw_ttm_unbind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); + if (!vmw_be->bound) + return; + switch (vmw_be->mem_type) { case VMW_PL_GMR: vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); @@ -591,14 +618,17 @@ static void vmw_ttm_unbind(struct ttm_tt *ttm) if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) vmw_ttm_unmap_dma(vmw_be); + vmw_be->bound = false; } -static void vmw_ttm_destroy(struct ttm_tt *ttm) +static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); + vmw_ttm_unbind(bdev, ttm); + ttm_tt_destroy_common(bdev, ttm); vmw_ttm_unmap_dma(vmw_be); if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) ttm_dma_tt_fini(&vmw_be->dma_ttm); @@ -612,7 +642,8 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm) } -static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) +static int vmw_ttm_populate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); @@ -620,7 +651,7 @@ static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); int ret; - if (ttm->state != tt_unpopulated) + if (ttm_tt_is_populated(ttm)) return 0; if (dev_priv->map_mode == vmw_dma_alloc_coherent) { @@ -640,7 +671,8 @@ static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) return ret; } -static void vmw_ttm_unpopulate(struct ttm_tt *ttm) +static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); @@ -664,12 +696,6 @@ static void vmw_ttm_unpopulate(struct ttm_tt *ttm) ttm_pool_unpopulate(ttm); } -static struct ttm_backend_func vmw_ttm_func = { - .bind = vmw_ttm_bind, - .unbind = vmw_ttm_unbind, - .destroy = vmw_ttm_destroy, -}; - static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { @@ -680,7 +706,6 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, if (!vmw_be) return NULL; - vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev); vmw_be->mob = NULL; @@ -721,8 +746,8 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resourc case VMW_PL_MOB: return 0; case TTM_PL_VRAM: - mem->bus.offset = mem->start << PAGE_SHIFT; - mem->bus.base = dev_priv->vram_start; + mem->bus.offset = (mem->start << PAGE_SHIFT) + + dev_priv->vram_start; mem->bus.is_iomem = true; break; default: @@ -766,6 +791,9 @@ struct ttm_bo_driver vmw_bo_driver = { .ttm_tt_create = &vmw_ttm_tt_create, .ttm_tt_populate = &vmw_ttm_populate, .ttm_tt_unpopulate = &vmw_ttm_unpopulate, + .ttm_tt_bind = &vmw_ttm_bind, + .ttm_tt_unbind = &vmw_ttm_unbind, + .ttm_tt_destroy = &vmw_ttm_destroy, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = vmw_evict_flags, .move = NULL, @@ -796,7 +824,7 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv, ret = ttm_bo_reserve(bo, false, true, NULL); BUG_ON(ret != 0); - ret = vmw_ttm_populate(bo->ttm, &ctx); + ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx); if (likely(ret == 0)) { struct vmw_ttm_tt *vmw_tt = container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index 534daf37c97e..2f464ef2d53e 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -180,7 +180,8 @@ struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj) if (!xen_obj->pages) return ERR_PTR(-ENOMEM); - return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages); + return drm_prime_pages_to_sg(gem_obj->dev, + xen_obj->pages, xen_obj->num_pages); } struct drm_gem_object * @@ -217,7 +218,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev, return ERR_PTR(ret); DRM_DEBUG("Imported buffer of size %zu with nents %u\n", - size, sgt->nents); + size, sgt->orig_nents); return &xen_obj->base; } diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c index a455cfc1bee5..98bd48f13fd1 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_disp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c @@ -242,12 +242,6 @@ static const u32 scaling_factors_565[] = { ZYNQMP_DISP_AV_BUF_5BIT_SF, }; -static const u32 scaling_factors_666[] = { - ZYNQMP_DISP_AV_BUF_6BIT_SF, - ZYNQMP_DISP_AV_BUF_6BIT_SF, - ZYNQMP_DISP_AV_BUF_6BIT_SF, -}; - static const u32 scaling_factors_888[] = { ZYNQMP_DISP_AV_BUF_8BIT_SF, ZYNQMP_DISP_AV_BUF_8BIT_SF, diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c index 26328c76305b..8e69303aad3f 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c @@ -111,7 +111,7 @@ static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub) /* Initialize mode config, vblank and the KMS poll helper. */ ret = drmm_mode_config_init(drm); if (ret < 0) - goto err_dev_put; + return ret; drm->mode_config.funcs = &zynqmp_dpsub_mode_config_funcs; drm->mode_config.min_width = 0; @@ -121,7 +121,7 @@ static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub) ret = drm_vblank_init(drm, 1); if (ret) - goto err_dev_put; + return ret; drm->irq_enabled = 1; @@ -154,8 +154,6 @@ static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub) err_poll_fini: drm_kms_helper_poll_fini(drm); -err_dev_put: - drm_dev_put(drm); return ret; } @@ -208,27 +206,16 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev) int ret; /* Allocate private data. */ - dpsub = kzalloc(sizeof(*dpsub), GFP_KERNEL); - if (!dpsub) - return -ENOMEM; + dpsub = devm_drm_dev_alloc(&pdev->dev, &zynqmp_dpsub_drm_driver, + struct zynqmp_dpsub, drm); + if (IS_ERR(dpsub)) + return PTR_ERR(dpsub); dpsub->dev = &pdev->dev; platform_set_drvdata(pdev, dpsub); dma_set_mask(dpsub->dev, DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT)); - /* - * Initialize the DRM device early, as the DRM core mandates usage of - * the managed memory helpers tied to the DRM device. - */ - ret = drm_dev_init(&dpsub->drm, &zynqmp_dpsub_drm_driver, &pdev->dev); - if (ret < 0) { - kfree(dpsub); - return ret; - } - - drmm_add_final_kfree(&dpsub->drm, dpsub); - /* Try the reserved memory. Proceed if there's none. */ of_reserved_mem_device_init(&pdev->dev); @@ -286,8 +273,6 @@ static int zynqmp_dpsub_remove(struct platform_device *pdev) clk_disable_unprepare(dpsub->apb_clk); of_reserved_mem_device_release(&pdev->dev); - drm_dev_put(drm); - return 0; } diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index 89b6c14b7392..82d0a60ba3f7 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -170,11 +170,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto unpin; } - err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); - if (!err) { - err = -ENOMEM; + err = dma_map_sgtable(dev, sgt, dir, 0); + if (err) goto unpin; - } job->unpins[job->num_unpins].dev = dev; job->unpins[job->num_unpins].dir = dir; @@ -228,7 +226,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) } if (host->domain) { - for_each_sg(sgt->sgl, sg, sgt->nents, j) + for_each_sgtable_sg(sgt, sg, j) gather_size += sg->length; gather_size = iova_align(&host->iova, gather_size); @@ -240,9 +238,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto put; } - err = iommu_map_sg(host->domain, + err = iommu_map_sgtable(host->domain, iova_dma_addr(&host->iova, alloc), - sgt->sgl, sgt->nents, IOMMU_READ); + sgt, IOMMU_READ); if (err == 0) { __free_iova(&host->iova, alloc); err = -EINVAL; @@ -252,12 +250,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) job->unpins[job->num_unpins].size = gather_size; phys_addr = iova_dma_addr(&host->iova, alloc); } else if (sgt) { - err = dma_map_sg(host->dev, sgt->sgl, sgt->nents, - DMA_TO_DEVICE); - if (!err) { - err = -ENOMEM; + err = dma_map_sgtable(host->dev, sgt, DMA_TO_DEVICE, 0); + if (err) goto put; - } job->unpins[job->num_unpins].dir = DMA_TO_DEVICE; job->unpins[job->num_unpins].dev = host->dev; @@ -660,8 +655,7 @@ void host1x_job_unpin(struct host1x_job *job) } if (unpin->dev && sgt) - dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents, - unpin->dir); + dma_unmap_sgtable(unpin->dev, sgt, unpin->dir, 0); host1x_bo_unpin(dev, unpin->bo, sgt); host1x_bo_put(unpin->bo); diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c index a4a45d68a6ef..86d5e3f4b1ff 100644 --- a/drivers/media/platform/vsp1/vsp1_drm.c +++ b/drivers/media/platform/vsp1/vsp1_drm.c @@ -912,8 +912,8 @@ int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt) * skip cache sync. This will need to be revisited when support for * non-coherent buffers will be added to the DU driver. */ - return dma_map_sg_attrs(vsp1->bus_master, sgt->sgl, sgt->nents, - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + return dma_map_sgtable(vsp1->bus_master, sgt, DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); } EXPORT_SYMBOL_GPL(vsp1_du_map_sg); @@ -921,8 +921,8 @@ void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); - dma_unmap_sg_attrs(vsp1->bus_master, sgt->sgl, sgt->nents, - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + dma_unmap_sgtable(vsp1->bus_master, sgt, DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); } EXPORT_SYMBOL_GPL(vsp1_du_unmap_sg); diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c index 7116127358ee..116aca36f7dd 100644 --- a/drivers/phy/cadence/phy-cadence-torrent.c +++ b/drivers/phy/cadence/phy-cadence-torrent.c @@ -1852,6 +1852,10 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev) cdns_phy->phys[node].num_lanes, cdns_phy->max_bit_rate / 1000, cdns_phy->max_bit_rate % 1000); + + gphy->attrs.bus_width = cdns_phy->phys[node].num_lanes; + gphy->attrs.max_link_rate = cdns_phy->max_bit_rate; + gphy->attrs.mode = PHY_MODE_DP; } else { dev_err(dev, "Driver supports only PHY_TYPE_DP\n"); ret = -ENOTSUPP; diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig index dee757c957f2..50c5e9306e19 100644 --- a/drivers/phy/mediatek/Kconfig +++ b/drivers/phy/mediatek/Kconfig @@ -35,3 +35,10 @@ config PHY_MTK_XSPHY Enable this to support the SuperSpeedPlus XS-PHY transceiver for USB3.1 GEN2 controllers on MediaTek chips. The driver supports multiple USB2.0, USB3.1 GEN2 ports. + +config PHY_MTK_HDMI + tristate "MediaTek HDMI-PHY Driver" + depends on ARCH_MEDIATEK && OF + select GENERIC_PHY + help + Support HDMI PHY for Mediatek SoCs. diff --git a/drivers/phy/mediatek/Makefile b/drivers/phy/mediatek/Makefile index 08a8e6a97b1e..6325e38709ed 100644 --- a/drivers/phy/mediatek/Makefile +++ b/drivers/phy/mediatek/Makefile @@ -6,3 +6,8 @@ obj-$(CONFIG_PHY_MTK_TPHY) += phy-mtk-tphy.o obj-$(CONFIG_PHY_MTK_UFS) += phy-mtk-ufs.o obj-$(CONFIG_PHY_MTK_XSPHY) += phy-mtk-xsphy.o + +phy-mtk-hdmi-drv-y := phy-mtk-hdmi.o +phy-mtk-hdmi-drv-y += phy-mtk-hdmi-mt2701.o +phy-mtk-hdmi-drv-y += phy-mtk-hdmi-mt8173.o +obj-$(CONFIG_PHY_MTK_HDMI) += phy-mtk-hdmi-drv.o diff --git a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c b/drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c index d3cc4022e988..b74c65a1762c 100644 --- a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c +++ b/drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c @@ -4,7 +4,7 @@ * Author: Chunhui Dai <chunhui.dai@mediatek.com> */ -#include "mtk_hdmi_phy.h" +#include "phy-mtk-hdmi.h" #define HDMI_CON0 0x00 #define RG_HDMITX_DRV_IBIAS 0 @@ -237,8 +237,8 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy) } struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = { - .tz_disabled = true, .flags = CLK_SET_RATE_GATE, + .pll_default_off = true, .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops, .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds, .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds, diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c index 827b93786fac..6cdfdf5a698a 100644 --- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c +++ b/drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c @@ -4,7 +4,7 @@ * Author: Jie Qiu <jie.qiu@mediatek.com> */ -#include "mtk_hdmi_phy.h" +#include "phy-mtk-hdmi.h" #define HDMI_CON0 0x00 #define RG_HDMITX_PLL_EN BIT(31) diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c b/drivers/phy/mediatek/phy-mtk-hdmi.c index 5223498502c4..47c029d4b270 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c +++ b/drivers/phy/mediatek/phy-mtk-hdmi.c @@ -4,7 +4,7 @@ * Author: Jie Qiu <jie.qiu@mediatek.com> */ -#include "mtk_hdmi_phy.h" +#include "phy-mtk-hdmi.h" static int mtk_hdmi_phy_power_on(struct phy *phy); static int mtk_hdmi_phy_power_off(struct phy *phy); @@ -184,6 +184,9 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev) return PTR_ERR(phy_provider); } + if (hdmi_phy->conf->pll_default_off) + hdmi_phy->conf->hdmi_phy_disable_tmds(hdmi_phy); + return of_clk_add_provider(dev->of_node, of_clk_src_simple_get, hdmi_phy->pll); } @@ -205,6 +208,7 @@ struct platform_driver mtk_hdmi_phy_driver = { .of_match_table = mtk_hdmi_phy_match, }, }; +module_platform_driver(mtk_hdmi_phy_driver); MODULE_DESCRIPTION("MediaTek HDMI PHY Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h b/drivers/phy/mediatek/phy-mtk-hdmi.h index 2d8b3182470d..dcf9bb13699b 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h +++ b/drivers/phy/mediatek/phy-mtk-hdmi.h @@ -20,8 +20,8 @@ struct mtk_hdmi_phy; struct mtk_hdmi_phy_conf { - bool tz_disabled; unsigned long flags; + bool pll_default_off; const struct clk_ops *hdmi_phy_clk_ops; void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy); void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy); @@ -50,7 +50,6 @@ void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset, u32 val, u32 mask); struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw); -extern struct platform_driver mtk_hdmi_phy_driver; extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf; extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf; diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c index 272eeb071147..ecfdfac0c2d9 100644 --- a/drivers/pwm/pwm-crc.c +++ b/drivers/pwm/pwm-crc.c @@ -21,8 +21,8 @@ #define PWM_MAX_LEVEL 0xFF -#define PWM_BASE_CLK 6000000 /* 6 MHz */ -#define PWM_MAX_PERIOD_NS 21333 /* 46.875KHz */ +#define PWM_BASE_CLK_MHZ 6 /* 6 MHz */ +#define PWM_MAX_PERIOD_NS 5461334 /* 183 Hz */ /** * struct crystalcove_pwm - Crystal Cove PWM controller @@ -39,59 +39,121 @@ static inline struct crystalcove_pwm *to_crc_pwm(struct pwm_chip *pc) return container_of(pc, struct crystalcove_pwm, chip); } -static int crc_pwm_enable(struct pwm_chip *c, struct pwm_device *pwm) +static int crc_pwm_calc_clk_div(int period_ns) { - struct crystalcove_pwm *crc_pwm = to_crc_pwm(c); + int clk_div; - regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 1); + clk_div = PWM_BASE_CLK_MHZ * period_ns / (256 * NSEC_PER_USEC); + /* clk_div 1 - 128, maps to register values 0-127 */ + if (clk_div > 0) + clk_div--; - return 0; -} - -static void crc_pwm_disable(struct pwm_chip *c, struct pwm_device *pwm) -{ - struct crystalcove_pwm *crc_pwm = to_crc_pwm(c); - - regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 0); + return clk_div; } -static int crc_pwm_config(struct pwm_chip *c, struct pwm_device *pwm, - int duty_ns, int period_ns) +static int crc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) { - struct crystalcove_pwm *crc_pwm = to_crc_pwm(c); + struct crystalcove_pwm *crc_pwm = to_crc_pwm(chip); struct device *dev = crc_pwm->chip.dev; - int level; + int err; - if (period_ns > PWM_MAX_PERIOD_NS) { + if (state->period > PWM_MAX_PERIOD_NS) { dev_err(dev, "un-supported period_ns\n"); return -EINVAL; } - if (pwm_get_period(pwm) != period_ns) { - int clk_div; + if (state->polarity != PWM_POLARITY_NORMAL) + return -EOPNOTSUPP; - /* changing the clk divisor, need to disable fisrt */ - crc_pwm_disable(c, pwm); - clk_div = PWM_BASE_CLK * period_ns / NSEC_PER_SEC; + if (pwm_is_enabled(pwm) && !state->enabled) { + err = regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 0); + if (err) { + dev_err(dev, "Error writing BACKLIGHT_EN %d\n", err); + return err; + } + } - regmap_write(crc_pwm->regmap, PWM0_CLK_DIV, - clk_div | PWM_OUTPUT_ENABLE); + if (pwm_get_duty_cycle(pwm) != state->duty_cycle || + pwm_get_period(pwm) != state->period) { + u64 level = state->duty_cycle * PWM_MAX_LEVEL; + + do_div(level, state->period); + + err = regmap_write(crc_pwm->regmap, PWM0_DUTY_CYCLE, level); + if (err) { + dev_err(dev, "Error writing PWM0_DUTY_CYCLE %d\n", err); + return err; + } + } + + if (pwm_is_enabled(pwm) && state->enabled && + pwm_get_period(pwm) != state->period) { + /* changing the clk divisor, clear PWM_OUTPUT_ENABLE first */ + err = regmap_write(crc_pwm->regmap, PWM0_CLK_DIV, 0); + if (err) { + dev_err(dev, "Error writing PWM0_CLK_DIV %d\n", err); + return err; + } + } - /* enable back */ - crc_pwm_enable(c, pwm); + if (pwm_get_period(pwm) != state->period || + pwm_is_enabled(pwm) != state->enabled) { + int clk_div = crc_pwm_calc_clk_div(state->period); + int pwm_output_enable = state->enabled ? PWM_OUTPUT_ENABLE : 0; + + err = regmap_write(crc_pwm->regmap, PWM0_CLK_DIV, + clk_div | pwm_output_enable); + if (err) { + dev_err(dev, "Error writing PWM0_CLK_DIV %d\n", err); + return err; + } } - /* change the pwm duty cycle */ - level = duty_ns * PWM_MAX_LEVEL / period_ns; - regmap_write(crc_pwm->regmap, PWM0_DUTY_CYCLE, level); + if (!pwm_is_enabled(pwm) && state->enabled) { + err = regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 1); + if (err) { + dev_err(dev, "Error writing BACKLIGHT_EN %d\n", err); + return err; + } + } return 0; } +static void crc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct crystalcove_pwm *crc_pwm = to_crc_pwm(chip); + struct device *dev = crc_pwm->chip.dev; + unsigned int clk_div, clk_div_reg, duty_cycle_reg; + int error; + + error = regmap_read(crc_pwm->regmap, PWM0_CLK_DIV, &clk_div_reg); + if (error) { + dev_err(dev, "Error reading PWM0_CLK_DIV %d\n", error); + return; + } + + error = regmap_read(crc_pwm->regmap, PWM0_DUTY_CYCLE, &duty_cycle_reg); + if (error) { + dev_err(dev, "Error reading PWM0_DUTY_CYCLE %d\n", error); + return; + } + + clk_div = (clk_div_reg & ~PWM_OUTPUT_ENABLE) + 1; + + state->period = + DIV_ROUND_UP(clk_div * NSEC_PER_USEC * 256, PWM_BASE_CLK_MHZ); + state->duty_cycle = + DIV_ROUND_UP_ULL(duty_cycle_reg * state->period, PWM_MAX_LEVEL); + state->polarity = PWM_POLARITY_NORMAL; + state->enabled = !!(clk_div_reg & PWM_OUTPUT_ENABLE); +} + static const struct pwm_ops crc_pwm_ops = { - .config = crc_pwm_config, - .enable = crc_pwm_enable, - .disable = crc_pwm_disable, + .apply = crc_pwm_apply, + .get_state = crc_pwm_get_state, }; static int crystalcove_pwm_probe(struct platform_device *pdev) diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c index 48f34d20aecd..c6502cf7a7af 100644 --- a/drivers/pwm/pwm-lpss-platform.c +++ b/drivers/pwm/pwm-lpss-platform.c @@ -89,7 +89,6 @@ static int pwm_lpss_prepare(struct device *dev) static const struct dev_pm_ops pwm_lpss_platform_pm_ops = { .prepare = pwm_lpss_prepare, - SET_SYSTEM_SLEEP_PM_OPS(pwm_lpss_suspend, pwm_lpss_resume) }; static const struct acpi_device_id pwm_lpss_acpi_match[] = { diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c index 9d965ffe66d1..3444c56b4bed 100644 --- a/drivers/pwm/pwm-lpss.c +++ b/drivers/pwm/pwm-lpss.c @@ -85,7 +85,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, unsigned long long on_time_div; unsigned long c = lpwm->info->clk_rate, base_unit_range; unsigned long long base_unit, freq = NSEC_PER_SEC; - u32 orig_ctrl, ctrl; + u32 ctrl; do_div(freq, period_ns); @@ -93,26 +93,25 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, * The equation is: * base_unit = round(base_unit_range * freq / c) */ - base_unit_range = BIT(lpwm->info->base_unit_bits) - 1; + base_unit_range = BIT(lpwm->info->base_unit_bits); freq *= base_unit_range; base_unit = DIV_ROUND_CLOSEST_ULL(freq, c); + /* base_unit must not be 0 and we also want to avoid overflowing it */ + base_unit = clamp_val(base_unit, 1, base_unit_range - 1); on_time_div = 255ULL * duty_ns; do_div(on_time_div, period_ns); on_time_div = 255ULL - on_time_div; - orig_ctrl = ctrl = pwm_lpss_read(pwm); + ctrl = pwm_lpss_read(pwm); ctrl &= ~PWM_ON_TIME_DIV_MASK; - ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT); - base_unit &= base_unit_range; + ctrl &= ~((base_unit_range - 1) << PWM_BASE_UNIT_SHIFT); ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT; ctrl |= on_time_div; - if (orig_ctrl != ctrl) { - pwm_lpss_write(pwm, ctrl); - pwm_lpss_write(pwm, ctrl | PWM_SW_UPDATE); - } + pwm_lpss_write(pwm, ctrl); + pwm_lpss_write(pwm, ctrl | PWM_SW_UPDATE); } static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond) @@ -121,41 +120,47 @@ static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond) pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE); } +static int pwm_lpss_prepare_enable(struct pwm_lpss_chip *lpwm, + struct pwm_device *pwm, + const struct pwm_state *state) +{ + int ret; + + ret = pwm_lpss_is_updating(pwm); + if (ret) + return ret; + + pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); + pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false); + ret = pwm_lpss_wait_for_update(pwm); + if (ret) + return ret; + + pwm_lpss_cond_enable(pwm, lpwm->info->bypass == true); + return 0; +} + static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { struct pwm_lpss_chip *lpwm = to_lpwm(chip); - int ret; + int ret = 0; if (state->enabled) { if (!pwm_is_enabled(pwm)) { pm_runtime_get_sync(chip->dev); - ret = pwm_lpss_is_updating(pwm); - if (ret) { - pm_runtime_put(chip->dev); - return ret; - } - pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); - pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false); - ret = pwm_lpss_wait_for_update(pwm); - if (ret) { + ret = pwm_lpss_prepare_enable(lpwm, pwm, state); + if (ret) pm_runtime_put(chip->dev); - return ret; - } - pwm_lpss_cond_enable(pwm, lpwm->info->bypass == true); } else { - ret = pwm_lpss_is_updating(pwm); - if (ret) - return ret; - pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); - return pwm_lpss_wait_for_update(pwm); + ret = pwm_lpss_prepare_enable(lpwm, pwm, state); } } else if (pwm_is_enabled(pwm)) { pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE); pm_runtime_put(chip->dev); } - return 0; + return ret; } static void pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm, @@ -255,30 +260,6 @@ int pwm_lpss_remove(struct pwm_lpss_chip *lpwm) } EXPORT_SYMBOL_GPL(pwm_lpss_remove); -int pwm_lpss_suspend(struct device *dev) -{ - struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev); - int i; - - for (i = 0; i < lpwm->info->npwm; i++) - lpwm->saved_ctrl[i] = readl(lpwm->regs + i * PWM_SIZE + PWM); - - return 0; -} -EXPORT_SYMBOL_GPL(pwm_lpss_suspend); - -int pwm_lpss_resume(struct device *dev) -{ - struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev); - int i; - - for (i = 0; i < lpwm->info->npwm; i++) - writel(lpwm->saved_ctrl[i], lpwm->regs + i * PWM_SIZE + PWM); - - return 0; -} -EXPORT_SYMBOL_GPL(pwm_lpss_resume); - MODULE_DESCRIPTION("PWM driver for Intel LPSS"); MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h index 7909fa12fca2..70db7e389d66 100644 --- a/drivers/pwm/pwm-lpss.h +++ b/drivers/pwm/pwm-lpss.h @@ -19,7 +19,6 @@ struct pwm_lpss_chip { struct pwm_chip chip; void __iomem *regs; const struct pwm_lpss_boardinfo *info; - u32 saved_ctrl[MAX_PWMS]; }; struct pwm_lpss_boardinfo { @@ -37,7 +36,5 @@ struct pwm_lpss_boardinfo { struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, const struct pwm_lpss_boardinfo *info); int pwm_lpss_remove(struct pwm_lpss_chip *lpwm); -int pwm_lpss_suspend(struct device *dev); -int pwm_lpss_resume(struct device *dev); #endif /* __PWM_LPSS_H */ diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index b2c9dd4f0cb5..e36578258b5b 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1775,25 +1775,6 @@ config PXA3XX_GCU If you compile this as a module, it will be called pxa3xx_gcu. -config FB_MBX - tristate "2700G LCD framebuffer support" - depends on FB && ARCH_PXA - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT - help - Framebuffer driver for the Intel 2700G (Marathon) Graphics - Accelerator - -config FB_MBX_DEBUG - bool "Enable debugging info via debugfs" - depends on FB_MBX && DEBUG_FS - help - Enable this if you want debugging information using the debug - filesystem (debugfs) - - If unsure, say N. - config FB_FSL_DIU tristate "Freescale DIU framebuffer support" depends on FB && FSL_SOC diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile index cad4fb64442a..2ff8849ffde6 100644 --- a/drivers/video/fbdev/Makefile +++ b/drivers/video/fbdev/Makefile @@ -31,7 +31,6 @@ obj-$(CONFIG_FB_VIA) += via/ obj-$(CONFIG_FB_KYRO) += kyro/ obj-$(CONFIG_FB_SAVAGE) += savage/ obj-$(CONFIG_FB_GEODE) += geode/ -obj-$(CONFIG_FB_MBX) += mbx/ obj-$(CONFIG_FB_NEOMAGIC) += neofb.o obj-$(CONFIG_FB_3DFX) += tdfxfb.o obj-$(CONFIG_FB_CONTROL) += controlfb.o diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c index 11ab9a153860..edf169d0816e 100644 --- a/drivers/video/fbdev/arkfb.c +++ b/drivers/video/fbdev/arkfb.c @@ -1085,12 +1085,11 @@ static void ark_pci_remove(struct pci_dev *dev) } -#ifdef CONFIG_PM /* PCI suspend */ -static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state) +static int __maybe_unused ark_pci_suspend(struct device *dev) { - struct fb_info *info = pci_get_drvdata(dev); + struct fb_info *info = dev_get_drvdata(dev); struct arkfb_info *par = info->par; dev_info(info->device, "suspend\n"); @@ -1098,7 +1097,7 @@ static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state) console_lock(); mutex_lock(&(par->open_lock)); - if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) { + if (par->ref_count == 0) { mutex_unlock(&(par->open_lock)); console_unlock(); return 0; @@ -1106,10 +1105,6 @@ static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state) fb_set_suspend(info, 1); - pci_save_state(dev); - pci_disable_device(dev); - pci_set_power_state(dev, pci_choose_state(dev, state)); - mutex_unlock(&(par->open_lock)); console_unlock(); @@ -1119,9 +1114,9 @@ static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state) /* PCI resume */ -static int ark_pci_resume (struct pci_dev* dev) +static int __maybe_unused ark_pci_resume(struct device *dev) { - struct fb_info *info = pci_get_drvdata(dev); + struct fb_info *info = dev_get_drvdata(dev); struct arkfb_info *par = info->par; dev_info(info->device, "resume\n"); @@ -1132,14 +1127,6 @@ static int ark_pci_resume (struct pci_dev* dev) if (par->ref_count == 0) goto fail; - pci_set_power_state(dev, PCI_D0); - pci_restore_state(dev); - - if (pci_enable_device(dev)) - goto fail; - - pci_set_master(dev); - arkfb_set_par(info); fb_set_suspend(info, 0); @@ -1148,10 +1135,17 @@ fail: console_unlock(); return 0; } -#else -#define ark_pci_suspend NULL -#define ark_pci_resume NULL -#endif /* CONFIG_PM */ + +static const struct dev_pm_ops ark_pci_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .suspend = ark_pci_suspend, + .resume = ark_pci_resume, + .freeze = NULL, + .thaw = ark_pci_resume, + .poweroff = ark_pci_suspend, + .restore = ark_pci_resume, +#endif +}; /* List of boards that we are trying to support */ @@ -1168,8 +1162,7 @@ static struct pci_driver arkfb_pci_driver = { .id_table = ark_devices, .probe = ark_pci_probe, .remove = ark_pci_remove, - .suspend = ark_pci_suspend, - .resume = ark_pci_resume, + .driver.pm = &ark_pci_pm_ops, }; /* Cleanup */ diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c index 6fae6ad6cb77..e6a48689c294 100644 --- a/drivers/video/fbdev/aty/aty128fb.c +++ b/drivers/video/fbdev/aty/aty128fb.c @@ -162,10 +162,22 @@ static char * const r128_family[] = { static int aty128_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void aty128_remove(struct pci_dev *pdev); -static int aty128_pci_suspend(struct pci_dev *pdev, pm_message_t state); -static int aty128_pci_resume(struct pci_dev *pdev); +static int aty128_pci_suspend_late(struct device *dev, pm_message_t state); +static int __maybe_unused aty128_pci_suspend(struct device *dev); +static int __maybe_unused aty128_pci_hibernate(struct device *dev); +static int __maybe_unused aty128_pci_freeze(struct device *dev); +static int __maybe_unused aty128_pci_resume(struct device *dev); static int aty128_do_resume(struct pci_dev *pdev); +static const struct dev_pm_ops aty128_pci_pm_ops = { + .suspend = aty128_pci_suspend, + .resume = aty128_pci_resume, + .freeze = aty128_pci_freeze, + .thaw = aty128_pci_resume, + .poweroff = aty128_pci_hibernate, + .restore = aty128_pci_resume, +}; + /* supported Rage128 chipsets */ static const struct pci_device_id aty128_pci_tbl[] = { { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_LE, @@ -272,8 +284,7 @@ static struct pci_driver aty128fb_driver = { .id_table = aty128_pci_tbl, .probe = aty128_probe, .remove = aty128_remove, - .suspend = aty128_pci_suspend, - .resume = aty128_pci_resume, + .driver.pm = &aty128_pci_pm_ops, }; /* packed BIOS settings */ @@ -2316,7 +2327,6 @@ static int aty128fb_ioctl(struct fb_info *info, u_int cmd, u_long arg) static void aty128_set_suspend(struct aty128fb_par *par, int suspend) { u32 pmgt; - struct pci_dev *pdev = par->pdev; if (!par->pdev->pm_cap) return; @@ -2343,23 +2353,15 @@ static void aty128_set_suspend(struct aty128fb_par *par, int suspend) aty_st_le32(BUS_CNTL1, 0x00000010); aty_st_le32(MEM_POWER_MISC, 0x0c830000); msleep(100); - - /* Switch PCI power management to D2 */ - pci_set_power_state(pdev, PCI_D2); } } -static int aty128_pci_suspend(struct pci_dev *pdev, pm_message_t state) +static int aty128_pci_suspend_late(struct device *dev, pm_message_t state) { + struct pci_dev *pdev = to_pci_dev(dev); struct fb_info *info = pci_get_drvdata(pdev); struct aty128fb_par *par = info->par; - /* Because we may change PCI D state ourselves, we need to - * first save the config space content so the core can - * restore it properly on resume. - */ - pci_save_state(pdev); - /* We don't do anything but D2, for now we return 0, but * we may want to change that. How do we know if the BIOS * can properly take care of D3 ? Also, with swsusp, we @@ -2418,6 +2420,21 @@ static int aty128_pci_suspend(struct pci_dev *pdev, pm_message_t state) return 0; } +static int __maybe_unused aty128_pci_suspend(struct device *dev) +{ + return aty128_pci_suspend_late(dev, PMSG_SUSPEND); +} + +static int __maybe_unused aty128_pci_hibernate(struct device *dev) +{ + return aty128_pci_suspend_late(dev, PMSG_HIBERNATE); +} + +static int __maybe_unused aty128_pci_freeze(struct device *dev) +{ + return aty128_pci_suspend_late(dev, PMSG_FREEZE); +} + static int aty128_do_resume(struct pci_dev *pdev) { struct fb_info *info = pci_get_drvdata(pdev); @@ -2464,12 +2481,12 @@ static int aty128_do_resume(struct pci_dev *pdev) return 0; } -static int aty128_pci_resume(struct pci_dev *pdev) +static int __maybe_unused aty128_pci_resume(struct device *dev) { int rc; console_lock(); - rc = aty128_do_resume(pdev); + rc = aty128_do_resume(to_pci_dev(dev)); console_unlock(); return rc; diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h index a7833bc98225..551372f9b9aa 100644 --- a/drivers/video/fbdev/aty/atyfb.h +++ b/drivers/video/fbdev/aty/atyfb.h @@ -287,8 +287,8 @@ static inline void aty_st_8(int regindex, u8 val, const struct atyfb_par *par) #endif } -#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \ -defined (CONFIG_FB_ATY_GENERIC_LCD) || defined (CONFIG_FB_ATY_BACKLIGHT) +#if defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) || \ +defined (CONFIG_FB_ATY_BACKLIGHT) extern void aty_st_lcd(int index, u32 val, const struct atyfb_par *par); extern u32 aty_ld_lcd(int index, const struct atyfb_par *par); #endif diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index ad9cfe34c9ff..c8feff0ee8da 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -132,8 +132,8 @@ #define PRINTKI(fmt, args...) printk(KERN_INFO "atyfb: " fmt, ## args) #define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args) -#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \ -defined (CONFIG_FB_ATY_GENERIC_LCD) || defined(CONFIG_FB_ATY_BACKLIGHT) +#if defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_GENERIC_LCD) || \ +defined(CONFIG_FB_ATY_BACKLIGHT) static const u32 lt_lcd_regs[] = { CNFG_PANEL_LG, LCD_GEN_CNTL_LG, @@ -175,7 +175,7 @@ u32 aty_ld_lcd(int index, const struct atyfb_par *par) return aty_ld_le32(LCD_DATA, par); } } -#endif /* defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) */ +#endif /* defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) */ #ifdef CONFIG_FB_ATY_GENERIC_LCD /* @@ -1989,7 +1989,7 @@ static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma) -#if defined(CONFIG_PM) && defined(CONFIG_PCI) +#if defined(CONFIG_PCI) #ifdef CONFIG_PPC_PMAC /* Power management routines. Those are used for PowerBook sleep. @@ -2050,8 +2050,9 @@ static int aty_power_mgmt(int sleep, struct atyfb_par *par) } #endif /* CONFIG_PPC_PMAC */ -static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state) +static int atyfb_pci_suspend_late(struct device *dev, pm_message_t state) { + struct pci_dev *pdev = to_pci_dev(dev); struct fb_info *info = pci_get_drvdata(pdev); struct atyfb_par *par = (struct atyfb_par *) info->par; @@ -2077,7 +2078,6 @@ static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state) * first save the config space content so the core can * restore it properly on resume. */ - pci_save_state(pdev); #ifdef CONFIG_PPC_PMAC /* Set chip to "suspend" mode */ @@ -2089,8 +2089,6 @@ static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state) console_unlock(); return -EIO; } -#else - pci_set_power_state(pdev, pci_choose_state(pdev, state)); #endif console_unlock(); @@ -2100,6 +2098,21 @@ static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state) return 0; } +static int __maybe_unused atyfb_pci_suspend(struct device *dev) +{ + return atyfb_pci_suspend_late(dev, PMSG_SUSPEND); +} + +static int __maybe_unused atyfb_pci_hibernate(struct device *dev) +{ + return atyfb_pci_suspend_late(dev, PMSG_HIBERNATE); +} + +static int __maybe_unused atyfb_pci_freeze(struct device *dev) +{ + return atyfb_pci_suspend_late(dev, PMSG_FREEZE); +} + static void aty_resume_chip(struct fb_info *info) { struct atyfb_par *par = info->par; @@ -2114,8 +2127,9 @@ static void aty_resume_chip(struct fb_info *info) aty_ld_le32(BUS_CNTL, par) | BUS_APER_REG_DIS, par); } -static int atyfb_pci_resume(struct pci_dev *pdev) +static int __maybe_unused atyfb_pci_resume(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct fb_info *info = pci_get_drvdata(pdev); struct atyfb_par *par = (struct atyfb_par *) info->par; @@ -2157,7 +2171,18 @@ static int atyfb_pci_resume(struct pci_dev *pdev) return 0; } -#endif /* defined(CONFIG_PM) && defined(CONFIG_PCI) */ +static const struct dev_pm_ops atyfb_pci_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .suspend = atyfb_pci_suspend, + .resume = atyfb_pci_resume, + .freeze = atyfb_pci_freeze, + .thaw = atyfb_pci_resume, + .poweroff = atyfb_pci_hibernate, + .restore = atyfb_pci_resume, +#endif /* CONFIG_PM_SLEEP */ +}; + +#endif /* defined(CONFIG_PCI) */ /* Backlight */ #ifdef CONFIG_FB_ATY_BACKLIGHT @@ -3796,10 +3821,7 @@ static struct pci_driver atyfb_driver = { .id_table = atyfb_pci_tbl, .probe = atyfb_pci_probe, .remove = atyfb_pci_remove, -#ifdef CONFIG_PM - .suspend = atyfb_pci_suspend, - .resume = atyfb_pci_resume, -#endif /* CONFIG_PM */ + .driver.pm = &atyfb_pci_pm_ops, }; #endif /* CONFIG_PCI */ diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c index 3fe509cb9b87..2fe690150420 100644 --- a/drivers/video/fbdev/aty/radeon_base.c +++ b/drivers/video/fbdev/aty/radeon_base.c @@ -2307,7 +2307,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev, ret = radeon_kick_out_firmware_fb(pdev); if (ret) - return ret; + goto err_release_fb; /* request the mem regions */ ret = pci_request_region(pdev, 0, "radeonfb framebuffer"); @@ -2555,16 +2555,18 @@ static void radeonfb_pci_unregister(struct pci_dev *pdev) framebuffer_release(info); } +#ifdef CONFIG_PM +#define RADEONFB_PCI_PM_OPS (&radeonfb_pci_pm_ops) +#else +#define RADEONFB_PCI_PM_OPS NULL +#endif static struct pci_driver radeonfb_driver = { .name = "radeonfb", .id_table = radeonfb_pci_table, .probe = radeonfb_pci_register, .remove = radeonfb_pci_unregister, -#ifdef CONFIG_PM - .suspend = radeonfb_pci_suspend, - .resume = radeonfb_pci_resume, -#endif /* CONFIG_PM */ + .driver.pm = RADEONFB_PCI_PM_OPS, }; #ifndef MODULE diff --git a/drivers/video/fbdev/aty/radeon_pm.c b/drivers/video/fbdev/aty/radeon_pm.c index f3d8123d7f36..b5fbd5329652 100644 --- a/drivers/video/fbdev/aty/radeon_pm.c +++ b/drivers/video/fbdev/aty/radeon_pm.c @@ -1431,7 +1431,6 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo) mdelay( 15); } -#if defined(CONFIG_PM) #if defined(CONFIG_X86) || defined(CONFIG_PPC_PMAC) static void radeon_pm_reset_pad_ctlr_strength(struct radeonfb_info *rinfo) { @@ -2210,7 +2209,6 @@ static void radeon_reinitialize_M9P(struct radeonfb_info *rinfo) radeon_pm_m10_enable_lvds_spread_spectrum(rinfo); } #endif -#endif #if 0 /* Not ready yet */ static void radeon_reinitialize_QW(struct radeonfb_info *rinfo) @@ -2613,8 +2611,9 @@ static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend) } } -int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) +static int radeonfb_pci_suspend_late(struct device *dev, pm_message_t mesg) { + struct pci_dev *pdev = to_pci_dev(dev); struct fb_info *info = pci_get_drvdata(pdev); struct radeonfb_info *rinfo = info->par; @@ -2662,11 +2661,6 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) pmac_suspend_agp_for_card(pdev); #endif /* CONFIG_PPC_PMAC */ - /* It's unclear whether or when the generic code will do that, so let's - * do it ourselves. We save state before we do any power management - */ - pci_save_state(pdev); - /* If we support wakeup from poweroff, we save all regs we can including cfg * space */ @@ -2691,7 +2685,6 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) msleep(20); OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) & ~(LVDS_DIGON)); } - pci_disable_device(pdev); } /* If we support D2, we go to it (should be fixed later with a flag forcing * D3 only for some laptops) @@ -2707,6 +2700,21 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) return 0; } +static int radeonfb_pci_suspend(struct device *dev) +{ + return radeonfb_pci_suspend_late(dev, PMSG_SUSPEND); +} + +static int radeonfb_pci_hibernate(struct device *dev) +{ + return radeonfb_pci_suspend_late(dev, PMSG_HIBERNATE); +} + +static int radeonfb_pci_freeze(struct device *dev) +{ + return radeonfb_pci_suspend_late(dev, PMSG_FREEZE); +} + static int radeon_check_power_loss(struct radeonfb_info *rinfo) { return rinfo->save_regs[4] != INPLL(CLK_PIN_CNTL) || @@ -2714,8 +2722,9 @@ static int radeon_check_power_loss(struct radeonfb_info *rinfo) rinfo->save_regs[3] != INPLL(SCLK_CNTL); } -int radeonfb_pci_resume(struct pci_dev *pdev) +static int radeonfb_pci_resume(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct fb_info *info = pci_get_drvdata(pdev); struct radeonfb_info *rinfo = info->par; int rc = 0; @@ -2797,6 +2806,15 @@ int radeonfb_pci_resume(struct pci_dev *pdev) return rc; } +const struct dev_pm_ops radeonfb_pci_pm_ops = { + .suspend = radeonfb_pci_suspend, + .resume = radeonfb_pci_resume, + .freeze = radeonfb_pci_freeze, + .thaw = radeonfb_pci_resume, + .poweroff = radeonfb_pci_hibernate, + .restore = radeonfb_pci_resume, +}; + #ifdef CONFIG_PPC__disabled static void radeonfb_early_resume(void *data) { diff --git a/drivers/video/fbdev/aty/radeonfb.h b/drivers/video/fbdev/aty/radeonfb.h index 131b34dd65af..93f403cbb415 100644 --- a/drivers/video/fbdev/aty/radeonfb.h +++ b/drivers/video/fbdev/aty/radeonfb.h @@ -483,8 +483,7 @@ extern void radeon_delete_i2c_busses(struct radeonfb_info *rinfo); extern int radeon_probe_i2c_connector(struct radeonfb_info *rinfo, int conn, u8 **out_edid); /* PM Functions */ -extern int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t state); -extern int radeonfb_pci_resume(struct pci_dev *pdev); +extern const struct dev_pm_ops radeonfb_pci_pm_ops; extern void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlist, int force_sleep); extern void radeonfb_pm_exit(struct radeonfb_info *rinfo); diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 66167830fefd..ae4de3bfd2d7 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -1912,7 +1912,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, vc->vc_video_erase_char, vc->vc_size_row * count); return true; - break; case SCROLL_WRAP_MOVE: if (b - t - count > 3 * vc->vc_rows >> 2) { @@ -2003,7 +2002,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, vc->vc_video_erase_char, vc->vc_size_row * count); return true; - break; case SCROLL_WRAP_MOVE: if (b - t - count > 3 * vc->vc_rows >> 2) { diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index cc69649dce95..8268bbee8cae 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1006,6 +1006,10 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) return 0; } + /* bitfill_aligned() assumes that it's at least 8x8 */ + if (var->xres < 8 || var->yres < 8) + return -EINVAL; + ret = info->fbops->fb_check_var(var, info); if (ret) diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c index 42d37bed518a..d45355b9a58c 100644 --- a/drivers/video/fbdev/cyber2000fb.c +++ b/drivers/video/fbdev/cyber2000fb.c @@ -1810,7 +1810,7 @@ static void cyberpro_pci_remove(struct pci_dev *dev) } } -static int cyberpro_pci_suspend(struct pci_dev *dev, pm_message_t state) +static int __maybe_unused cyberpro_pci_suspend(struct device *dev) { return 0; } @@ -1818,9 +1818,9 @@ static int cyberpro_pci_suspend(struct pci_dev *dev, pm_message_t state) /* * Re-initialise the CyberPro hardware */ -static int cyberpro_pci_resume(struct pci_dev *dev) +static int __maybe_unused cyberpro_pci_resume(struct device *dev) { - struct cfb_info *cfb = pci_get_drvdata(dev); + struct cfb_info *cfb = dev_get_drvdata(dev); if (cfb) { cyberpro_pci_enable_mmio(cfb); @@ -1846,12 +1846,15 @@ static struct pci_device_id cyberpro_pci_table[] = { MODULE_DEVICE_TABLE(pci, cyberpro_pci_table); +static SIMPLE_DEV_PM_OPS(cyberpro_pci_pm_ops, + cyberpro_pci_suspend, + cyberpro_pci_resume); + static struct pci_driver cyberpro_driver = { .name = "CyberPro", .probe = cyberpro_pci_probe, .remove = cyberpro_pci_remove, - .suspend = cyberpro_pci_suspend, - .resume = cyberpro_pci_resume, + .driver.pm = &cyberpro_pci_pm_ops, .id_table = cyberpro_pci_table }; diff --git a/drivers/video/fbdev/geode/gxfb.h b/drivers/video/fbdev/geode/gxfb.h index d2e9c5c8e294..792c111c21e4 100644 --- a/drivers/video/fbdev/geode/gxfb.h +++ b/drivers/video/fbdev/geode/gxfb.h @@ -21,7 +21,6 @@ struct gxfb_par { void __iomem *dc_regs; void __iomem *vid_regs; void __iomem *gp_regs; -#ifdef CONFIG_PM int powered_down; /* register state, for power management functionality */ @@ -36,7 +35,6 @@ struct gxfb_par { uint64_t fp[FP_REG_COUNT]; uint32_t pal[DC_PAL_COUNT]; -#endif }; unsigned int gx_frame_buffer_size(void); @@ -49,11 +47,8 @@ void gx_set_dclk_frequency(struct fb_info *info); void gx_configure_display(struct fb_info *info); int gx_blank_display(struct fb_info *info, int blank_mode); -#ifdef CONFIG_PM int gx_powerdown(struct fb_info *info); int gx_powerup(struct fb_info *info); -#endif - /* Graphics Processor registers (table 6-23 from the data book) */ enum gp_registers { diff --git a/drivers/video/fbdev/geode/gxfb_core.c b/drivers/video/fbdev/geode/gxfb_core.c index d38a148d4746..44089b331f91 100644 --- a/drivers/video/fbdev/geode/gxfb_core.c +++ b/drivers/video/fbdev/geode/gxfb_core.c @@ -322,17 +322,14 @@ static struct fb_info *gxfb_init_fbinfo(struct device *dev) return info; } -#ifdef CONFIG_PM -static int gxfb_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused gxfb_suspend(struct device *dev) { - struct fb_info *info = pci_get_drvdata(pdev); + struct fb_info *info = dev_get_drvdata(dev); - if (state.event == PM_EVENT_SUSPEND) { - console_lock(); - gx_powerdown(info); - fb_set_suspend(info, 1); - console_unlock(); - } + console_lock(); + gx_powerdown(info); + fb_set_suspend(info, 1); + console_unlock(); /* there's no point in setting PCI states; we emulate PCI, so * we don't end up getting power savings anyways */ @@ -340,9 +337,9 @@ static int gxfb_suspend(struct pci_dev *pdev, pm_message_t state) return 0; } -static int gxfb_resume(struct pci_dev *pdev) +static int __maybe_unused gxfb_resume(struct device *dev) { - struct fb_info *info = pci_get_drvdata(pdev); + struct fb_info *info = dev_get_drvdata(dev); int ret; console_lock(); @@ -356,7 +353,6 @@ static int gxfb_resume(struct pci_dev *pdev) console_unlock(); return 0; } -#endif static int gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -467,15 +463,23 @@ static const struct pci_device_id gxfb_id_table[] = { MODULE_DEVICE_TABLE(pci, gxfb_id_table); +static const struct dev_pm_ops gxfb_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .suspend = gxfb_suspend, + .resume = gxfb_resume, + .freeze = NULL, + .thaw = gxfb_resume, + .poweroff = NULL, + .restore = gxfb_resume, +#endif +}; + static struct pci_driver gxfb_driver = { .name = "gxfb", .id_table = gxfb_id_table, .probe = gxfb_probe, .remove = gxfb_remove, -#ifdef CONFIG_PM - .suspend = gxfb_suspend, - .resume = gxfb_resume, -#endif + .driver.pm = &gxfb_pm_ops, }; #ifndef MODULE diff --git a/drivers/video/fbdev/geode/lxfb.h b/drivers/video/fbdev/geode/lxfb.h index ef24bf6d49dc..d37b32dbcd68 100644 --- a/drivers/video/fbdev/geode/lxfb.h +++ b/drivers/video/fbdev/geode/lxfb.h @@ -29,7 +29,6 @@ struct lxfb_par { void __iomem *gp_regs; void __iomem *dc_regs; void __iomem *vp_regs; -#ifdef CONFIG_PM int powered_down; /* register state, for power mgmt functionality */ @@ -50,7 +49,6 @@ struct lxfb_par { uint32_t hcoeff[DC_HFILT_COUNT * 2]; uint32_t vcoeff[DC_VFILT_COUNT]; uint32_t vp_coeff[VP_COEFF_SIZE / 4]; -#endif }; static inline unsigned int lx_get_pitch(unsigned int xres, int bpp) @@ -64,11 +62,8 @@ int lx_blank_display(struct fb_info *, int); void lx_set_palette_reg(struct fb_info *, unsigned int, unsigned int, unsigned int, unsigned int); -#ifdef CONFIG_PM int lx_powerdown(struct fb_info *info); int lx_powerup(struct fb_info *info); -#endif - /* Graphics Processor registers (table 6-29 from the data book) */ enum gp_registers { diff --git a/drivers/video/fbdev/geode/lxfb_core.c b/drivers/video/fbdev/geode/lxfb_core.c index adc2d9c2395e..66c81262d18f 100644 --- a/drivers/video/fbdev/geode/lxfb_core.c +++ b/drivers/video/fbdev/geode/lxfb_core.c @@ -443,17 +443,14 @@ static struct fb_info *lxfb_init_fbinfo(struct device *dev) return info; } -#ifdef CONFIG_PM -static int lxfb_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused lxfb_suspend(struct device *dev) { - struct fb_info *info = pci_get_drvdata(pdev); + struct fb_info *info = dev_get_drvdata(dev); - if (state.event == PM_EVENT_SUSPEND) { - console_lock(); - lx_powerdown(info); - fb_set_suspend(info, 1); - console_unlock(); - } + console_lock(); + lx_powerdown(info); + fb_set_suspend(info, 1); + console_unlock(); /* there's no point in setting PCI states; we emulate PCI, so * we don't end up getting power savings anyways */ @@ -461,9 +458,9 @@ static int lxfb_suspend(struct pci_dev *pdev, pm_message_t state) return 0; } -static int lxfb_resume(struct pci_dev *pdev) +static int __maybe_unused lxfb_resume(struct device *dev) { - struct fb_info *info = pci_get_drvdata(pdev); + struct fb_info *info = dev_get_drvdata(dev); int ret; console_lock(); @@ -477,10 +474,6 @@ static int lxfb_resume(struct pci_dev *pdev) console_unlock(); return 0; } -#else -#define lxfb_suspend NULL -#define lxfb_resume NULL -#endif static int lxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -600,13 +593,23 @@ static struct pci_device_id lxfb_id_table[] = { MODULE_DEVICE_TABLE(pci, lxfb_id_table); +static const struct dev_pm_ops lxfb_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .suspend = lxfb_suspend, + .resume = lxfb_resume, + .freeze = NULL, + .thaw = lxfb_resume, + .poweroff = NULL, + .restore = lxfb_resume, +#endif +}; + static struct pci_driver lxfb_driver = { .name = "lxfb", .id_table = lxfb_id_table, .probe = lxfb_probe, .remove = lxfb_remove, - .suspend = lxfb_suspend, - .resume = lxfb_resume, + .driver.pm = &lxfb_pm_ops, }; #ifndef MODULE diff --git a/drivers/video/fbdev/geode/lxfb_ops.c b/drivers/video/fbdev/geode/lxfb_ops.c index 5be8bc62844c..b3a041fce570 100644 --- a/drivers/video/fbdev/geode/lxfb_ops.c +++ b/drivers/video/fbdev/geode/lxfb_ops.c @@ -580,8 +580,6 @@ int lx_blank_display(struct fb_info *info, int blank_mode) return 0; } -#ifdef CONFIG_PM - static void lx_save_regs(struct lxfb_par *par) { uint32_t filt; @@ -837,5 +835,3 @@ int lx_powerup(struct fb_info *info) par->powered_down = 0; return 0; } - -#endif diff --git a/drivers/video/fbdev/geode/suspend_gx.c b/drivers/video/fbdev/geode/suspend_gx.c index 1110a527c35c..8c49d4e98772 100644 --- a/drivers/video/fbdev/geode/suspend_gx.c +++ b/drivers/video/fbdev/geode/suspend_gx.c @@ -11,8 +11,6 @@ #include "gxfb.h" -#ifdef CONFIG_PM - static void gx_save_regs(struct gxfb_par *par) { int i; @@ -259,5 +257,3 @@ int gx_powerup(struct fb_info *info) par->powered_down = 0; return 0; } - -#endif diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c index e6f35f8feefc..52cce0db8bd3 100644 --- a/drivers/video/fbdev/i740fb.c +++ b/drivers/video/fbdev/i740fb.c @@ -1175,16 +1175,11 @@ static void i740fb_remove(struct pci_dev *dev) } } -#ifdef CONFIG_PM -static int i740fb_suspend(struct pci_dev *dev, pm_message_t state) +static int __maybe_unused i740fb_suspend(struct device *dev) { - struct fb_info *info = pci_get_drvdata(dev); + struct fb_info *info = dev_get_drvdata(dev); struct i740fb_par *par = info->par; - /* don't disable console during hibernation and wakeup from it */ - if (state.event == PM_EVENT_FREEZE || state.event == PM_EVENT_PRETHAW) - return 0; - console_lock(); mutex_lock(&(par->open_lock)); @@ -1197,19 +1192,15 @@ static int i740fb_suspend(struct pci_dev *dev, pm_message_t state) fb_set_suspend(info, 1); - pci_save_state(dev); - pci_disable_device(dev); - pci_set_power_state(dev, pci_choose_state(dev, state)); - mutex_unlock(&(par->open_lock)); console_unlock(); return 0; } -static int i740fb_resume(struct pci_dev *dev) +static int __maybe_unused i740fb_resume(struct device *dev) { - struct fb_info *info = pci_get_drvdata(dev); + struct fb_info *info = dev_get_drvdata(dev); struct i740fb_par *par = info->par; console_lock(); @@ -1218,11 +1209,6 @@ static int i740fb_resume(struct pci_dev *dev) if (par->ref_count == 0) goto fail; - pci_set_power_state(dev, PCI_D0); - pci_restore_state(dev); - if (pci_enable_device(dev)) - goto fail; - i740fb_set_par(info); fb_set_suspend(info, 0); @@ -1231,10 +1217,17 @@ fail: console_unlock(); return 0; } -#else -#define i740fb_suspend NULL -#define i740fb_resume NULL -#endif /* CONFIG_PM */ + +static const struct dev_pm_ops i740fb_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .suspend = i740fb_suspend, + .resume = i740fb_resume, + .freeze = NULL, + .thaw = i740fb_resume, + .poweroff = i740fb_suspend, + .restore = i740fb_resume, +#endif /* CONFIG_PM_SLEEP */ +}; #define I740_ID_PCI 0x00d1 #define I740_ID_AGP 0x7800 @@ -1251,8 +1244,7 @@ static struct pci_driver i740fb_driver = { .id_table = i740fb_id_table, .probe = i740fb_probe, .remove = i740fb_remove, - .suspend = i740fb_suspend, - .resume = i740fb_resume, + .driver.pm = &i740fb_pm_ops, }; #ifndef MODULE diff --git a/drivers/video/fbdev/kyro/STG4000InitDevice.c b/drivers/video/fbdev/kyro/STG4000InitDevice.c index 1d3f2080aa6f..21875d3c2dc2 100644 --- a/drivers/video/fbdev/kyro/STG4000InitDevice.c +++ b/drivers/video/fbdev/kyro/STG4000InitDevice.c @@ -120,7 +120,7 @@ u32 ProgramClock(u32 refClock, { u32 R = 0, F = 0, OD = 0, ODIndex = 0; u32 ulBestR = 0, ulBestF = 0, ulBestOD = 0; - u32 ulBestVCO = 0, ulBestClk = 0, ulBestScore = 0; + u32 ulBestClk = 0, ulBestScore = 0; u32 ulScore, ulPhaseScore, ulVcoScore; u32 ulTmp = 0, ulVCO; u32 ulScaleClockReq, ulMinClock, ulMaxClock; @@ -189,7 +189,6 @@ u32 ProgramClock(u32 refClock, ulScore = ulPhaseScore + ulVcoScore; if (!ulBestScore) { - ulBestVCO = ulVCO; ulBestOD = OD; ulBestF = F; ulBestR = R; @@ -206,7 +205,6 @@ u32 ProgramClock(u32 refClock, but we shall keep this code in case new restrictions come into play --------------------------------------------------------------------------*/ if ((ulScore >= ulBestScore) && (OD > 0)) { - ulBestVCO = ulVCO; ulBestOD = OD; ulBestF = F; ulBestR = R; @@ -244,7 +242,6 @@ int SetCoreClockPLL(volatile STG4000REG __iomem *pSTGReg, struct pci_dev *pDev) { u32 F, R, P; u16 core_pll = 0, sub; - u32 ulCoreClock; u32 tmp; u32 ulChipSpeed; @@ -282,7 +279,7 @@ int SetCoreClockPLL(volatile STG4000REG __iomem *pSTGReg, struct pci_dev *pDev) if (ulChipSpeed == 0) return -EINVAL; - ulCoreClock = ProgramClock(REF_FREQ, CORE_PLL_FREQ, &F, &R, &P); + ProgramClock(REF_FREQ, CORE_PLL_FREQ, &F, &R, &P); core_pll |= ((P) | ((F - 2) << 2) | ((R - 2) << 11)); diff --git a/drivers/video/fbdev/mbx/Makefile b/drivers/video/fbdev/mbx/Makefile deleted file mode 100644 index 3e8e7ff41f18..000000000000 --- a/drivers/video/fbdev/mbx/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# Makefile for the 2700G controller driver. - -obj-y += mbxfb.o diff --git a/drivers/video/fbdev/mbx/mbxdebugfs.c b/drivers/video/fbdev/mbx/mbxdebugfs.c deleted file mode 100644 index 09af721638fb..000000000000 --- a/drivers/video/fbdev/mbx/mbxdebugfs.c +++ /dev/null @@ -1,232 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <linux/debugfs.h> -#include <linux/slab.h> - -#define BIG_BUFFER_SIZE (1024) - -static char big_buffer[BIG_BUFFER_SIZE]; - -struct mbxfb_debugfs_data { - struct dentry *dir; - struct dentry *sysconf; - struct dentry *clock; - struct dentry *display; - struct dentry *gsctl; - struct dentry *sdram; - struct dentry *misc; -}; - -static ssize_t write_file_dummy(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) -{ - return count; -} - -static ssize_t sysconf_read_file(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) -{ - char * s = big_buffer; - - s += sprintf(s, "SYSCFG = %08x\n", readl(SYSCFG)); - s += sprintf(s, "PFBASE = %08x\n", readl(PFBASE)); - s += sprintf(s, "PFCEIL = %08x\n", readl(PFCEIL)); - s += sprintf(s, "POLLFLAG = %08x\n", readl(POLLFLAG)); - s += sprintf(s, "SYSRST = %08x\n", readl(SYSRST)); - - return simple_read_from_buffer(userbuf, count, ppos, - big_buffer, s-big_buffer); -} - - -static ssize_t gsctl_read_file(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) -{ - char * s = big_buffer; - - s += sprintf(s, "GSCTRL = %08x\n", readl(GSCTRL)); - s += sprintf(s, "VSCTRL = %08x\n", readl(VSCTRL)); - s += sprintf(s, "GBBASE = %08x\n", readl(GBBASE)); - s += sprintf(s, "VBBASE = %08x\n", readl(VBBASE)); - s += sprintf(s, "GDRCTRL = %08x\n", readl(GDRCTRL)); - s += sprintf(s, "VCMSK = %08x\n", readl(VCMSK)); - s += sprintf(s, "GSCADR = %08x\n", readl(GSCADR)); - s += sprintf(s, "VSCADR = %08x\n", readl(VSCADR)); - s += sprintf(s, "VUBASE = %08x\n", readl(VUBASE)); - s += sprintf(s, "VVBASE = %08x\n", readl(VVBASE)); - s += sprintf(s, "GSADR = %08x\n", readl(GSADR)); - s += sprintf(s, "VSADR = %08x\n", readl(VSADR)); - s += sprintf(s, "HCCTRL = %08x\n", readl(HCCTRL)); - s += sprintf(s, "HCSIZE = %08x\n", readl(HCSIZE)); - s += sprintf(s, "HCPOS = %08x\n", readl(HCPOS)); - s += sprintf(s, "HCBADR = %08x\n", readl(HCBADR)); - s += sprintf(s, "HCCKMSK = %08x\n", readl(HCCKMSK)); - s += sprintf(s, "GPLUT = %08x\n", readl(GPLUT)); - - return simple_read_from_buffer(userbuf, count, ppos, - big_buffer, s-big_buffer); -} - -static ssize_t display_read_file(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) -{ - char * s = big_buffer; - - s += sprintf(s, "DSCTRL = %08x\n", readl(DSCTRL)); - s += sprintf(s, "DHT01 = %08x\n", readl(DHT01)); - s += sprintf(s, "DHT02 = %08x\n", readl(DHT02)); - s += sprintf(s, "DHT03 = %08x\n", readl(DHT03)); - s += sprintf(s, "DVT01 = %08x\n", readl(DVT01)); - s += sprintf(s, "DVT02 = %08x\n", readl(DVT02)); - s += sprintf(s, "DVT03 = %08x\n", readl(DVT03)); - s += sprintf(s, "DBCOL = %08x\n", readl(DBCOL)); - s += sprintf(s, "BGCOLOR = %08x\n", readl(BGCOLOR)); - s += sprintf(s, "DINTRS = %08x\n", readl(DINTRS)); - s += sprintf(s, "DINTRE = %08x\n", readl(DINTRE)); - s += sprintf(s, "DINTRCNT = %08x\n", readl(DINTRCNT)); - s += sprintf(s, "DSIG = %08x\n", readl(DSIG)); - s += sprintf(s, "DMCTRL = %08x\n", readl(DMCTRL)); - s += sprintf(s, "CLIPCTRL = %08x\n", readl(CLIPCTRL)); - s += sprintf(s, "SPOCTRL = %08x\n", readl(SPOCTRL)); - s += sprintf(s, "SVCTRL = %08x\n", readl(SVCTRL)); - s += sprintf(s, "DLSTS = %08x\n", readl(DLSTS)); - s += sprintf(s, "DLLCTRL = %08x\n", readl(DLLCTRL)); - s += sprintf(s, "DVLNUM = %08x\n", readl(DVLNUM)); - s += sprintf(s, "DUCTRL = %08x\n", readl(DUCTRL)); - s += sprintf(s, "DVECTRL = %08x\n", readl(DVECTRL)); - s += sprintf(s, "DHDET = %08x\n", readl(DHDET)); - s += sprintf(s, "DVDET = %08x\n", readl(DVDET)); - s += sprintf(s, "DODMSK = %08x\n", readl(DODMSK)); - s += sprintf(s, "CSC01 = %08x\n", readl(CSC01)); - s += sprintf(s, "CSC02 = %08x\n", readl(CSC02)); - s += sprintf(s, "CSC03 = %08x\n", readl(CSC03)); - s += sprintf(s, "CSC04 = %08x\n", readl(CSC04)); - s += sprintf(s, "CSC05 = %08x\n", readl(CSC05)); - - return simple_read_from_buffer(userbuf, count, ppos, - big_buffer, s-big_buffer); -} - -static ssize_t clock_read_file(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) -{ - char * s = big_buffer; - - s += sprintf(s, "SYSCLKSRC = %08x\n", readl(SYSCLKSRC)); - s += sprintf(s, "PIXCLKSRC = %08x\n", readl(PIXCLKSRC)); - s += sprintf(s, "CLKSLEEP = %08x\n", readl(CLKSLEEP)); - s += sprintf(s, "COREPLL = %08x\n", readl(COREPLL)); - s += sprintf(s, "DISPPLL = %08x\n", readl(DISPPLL)); - s += sprintf(s, "PLLSTAT = %08x\n", readl(PLLSTAT)); - s += sprintf(s, "VOVRCLK = %08x\n", readl(VOVRCLK)); - s += sprintf(s, "PIXCLK = %08x\n", readl(PIXCLK)); - s += sprintf(s, "MEMCLK = %08x\n", readl(MEMCLK)); - s += sprintf(s, "M24CLK = %08x\n", readl(M24CLK)); - s += sprintf(s, "MBXCLK = %08x\n", readl(MBXCLK)); - s += sprintf(s, "SDCLK = %08x\n", readl(SDCLK)); - s += sprintf(s, "PIXCLKDIV = %08x\n", readl(PIXCLKDIV)); - - return simple_read_from_buffer(userbuf, count, ppos, - big_buffer, s-big_buffer); -} - -static ssize_t sdram_read_file(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) -{ - char * s = big_buffer; - - s += sprintf(s, "LMRST = %08x\n", readl(LMRST)); - s += sprintf(s, "LMCFG = %08x\n", readl(LMCFG)); - s += sprintf(s, "LMPWR = %08x\n", readl(LMPWR)); - s += sprintf(s, "LMPWRSTAT = %08x\n", readl(LMPWRSTAT)); - s += sprintf(s, "LMCEMR = %08x\n", readl(LMCEMR)); - s += sprintf(s, "LMTYPE = %08x\n", readl(LMTYPE)); - s += sprintf(s, "LMTIM = %08x\n", readl(LMTIM)); - s += sprintf(s, "LMREFRESH = %08x\n", readl(LMREFRESH)); - s += sprintf(s, "LMPROTMIN = %08x\n", readl(LMPROTMIN)); - s += sprintf(s, "LMPROTMAX = %08x\n", readl(LMPROTMAX)); - s += sprintf(s, "LMPROTCFG = %08x\n", readl(LMPROTCFG)); - s += sprintf(s, "LMPROTERR = %08x\n", readl(LMPROTERR)); - - return simple_read_from_buffer(userbuf, count, ppos, - big_buffer, s-big_buffer); -} - -static ssize_t misc_read_file(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) -{ - char * s = big_buffer; - - s += sprintf(s, "LCD_CONFIG = %08x\n", readl(LCD_CONFIG)); - s += sprintf(s, "ODFBPWR = %08x\n", readl(ODFBPWR)); - s += sprintf(s, "ODFBSTAT = %08x\n", readl(ODFBSTAT)); - s += sprintf(s, "ID = %08x\n", readl(ID)); - - return simple_read_from_buffer(userbuf, count, ppos, - big_buffer, s-big_buffer); -} - - -static const struct file_operations sysconf_fops = { - .read = sysconf_read_file, - .write = write_file_dummy, - .open = simple_open, - .llseek = default_llseek, -}; - -static const struct file_operations clock_fops = { - .read = clock_read_file, - .write = write_file_dummy, - .open = simple_open, - .llseek = default_llseek, -}; - -static const struct file_operations display_fops = { - .read = display_read_file, - .write = write_file_dummy, - .open = simple_open, - .llseek = default_llseek, -}; - -static const struct file_operations gsctl_fops = { - .read = gsctl_read_file, - .write = write_file_dummy, - .open = simple_open, - .llseek = default_llseek, -}; - -static const struct file_operations sdram_fops = { - .read = sdram_read_file, - .write = write_file_dummy, - .open = simple_open, - .llseek = default_llseek, -}; - -static const struct file_operations misc_fops = { - .read = misc_read_file, - .write = write_file_dummy, - .open = simple_open, - .llseek = default_llseek, -}; - -static void mbxfb_debugfs_init(struct fb_info *fbi) -{ - struct mbxfb_info *mfbi = fbi->par; - struct dentry *dir; - - dir = debugfs_create_dir("mbxfb", NULL); - mfbi->debugfs_dir = dir; - - debugfs_create_file("sysconf", 0444, dir, fbi, &sysconf_fops); - debugfs_create_file("clock", 0444, dir, fbi, &clock_fops); - debugfs_create_file("display", 0444, dir, fbi, &display_fops); - debugfs_create_file("gsctl", 0444, dir, fbi, &gsctl_fops); - debugfs_create_file("sdram", 0444, dir, fbi, &sdram_fops); - debugfs_create_file("misc", 0444, dir, fbi, &misc_fops); -} - -static void mbxfb_debugfs_remove(struct fb_info *fbi) -{ - struct mbxfb_info *mfbi = fbi->par; - - debugfs_remove_recursive(mfbi->debugfs_dir); -} diff --git a/drivers/video/fbdev/mbx/mbxfb.c b/drivers/video/fbdev/mbx/mbxfb.c deleted file mode 100644 index 6dc287c819cb..000000000000 --- a/drivers/video/fbdev/mbx/mbxfb.c +++ /dev/null @@ -1,1053 +0,0 @@ -/* - * linux/drivers/video/mbx/mbxfb.c - * - * Copyright (C) 2006-2007 8D Technologies inc - * Raphael Assenat <raph@8d.com> - * - Added video overlay support - * - Various improvements - * - * Copyright (C) 2006 Compulab, Ltd. - * Mike Rapoport <mike@compulab.co.il> - * - Creation of driver - * - * Based on pxafb.c - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of this archive for - * more details. - * - * Intel 2700G (Marathon) Graphics Accelerator Frame Buffer Driver - * - */ - -#include <linux/delay.h> -#include <linux/fb.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/uaccess.h> -#include <linux/io.h> - -#include <video/mbxfb.h> - -#include "regs.h" -#include "reg_bits.h" - -static void __iomem *virt_base_2700; - -#define write_reg(val, reg) do { writel((val), (reg)); } while(0) - -/* Without this delay, the graphics appears somehow scaled and - * there is a lot of jitter in scanlines. This delay is probably - * needed only after setting some specific register(s) somewhere, - * not all over the place... */ -#define write_reg_dly(val, reg) do { writel((val), reg); udelay(1000); } while(0) - -#define MIN_XRES 16 -#define MIN_YRES 16 -#define MAX_XRES 2048 -#define MAX_YRES 2048 - -#define MAX_PALETTES 16 - -/* FIXME: take care of different chip revisions with different sizes - of ODFB */ -#define MEMORY_OFFSET 0x60000 - -struct mbxfb_info { - struct device *dev; - - struct resource *fb_res; - struct resource *fb_req; - - struct resource *reg_res; - struct resource *reg_req; - - void __iomem *fb_virt_addr; - unsigned long fb_phys_addr; - - void __iomem *reg_virt_addr; - unsigned long reg_phys_addr; - - int (*platform_probe) (struct fb_info * fb); - int (*platform_remove) (struct fb_info * fb); - - u32 pseudo_palette[MAX_PALETTES]; -#ifdef CONFIG_FB_MBX_DEBUG - struct dentry *debugfs_dir; -#endif - -}; - -static const struct fb_var_screeninfo mbxfb_default = { - .xres = 640, - .yres = 480, - .xres_virtual = 640, - .yres_virtual = 480, - .bits_per_pixel = 16, - .red = {11, 5, 0}, - .green = {5, 6, 0}, - .blue = {0, 5, 0}, - .activate = FB_ACTIVATE_TEST, - .height = -1, - .width = -1, - .pixclock = 40000, - .left_margin = 48, - .right_margin = 16, - .upper_margin = 33, - .lower_margin = 10, - .hsync_len = 96, - .vsync_len = 2, - .vmode = FB_VMODE_NONINTERLACED, - .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, -}; - -static const struct fb_fix_screeninfo mbxfb_fix = { - .id = "MBX", - .type = FB_TYPE_PACKED_PIXELS, - .visual = FB_VISUAL_TRUECOLOR, - .xpanstep = 0, - .ypanstep = 0, - .ywrapstep = 0, - .accel = FB_ACCEL_NONE, -}; - -struct pixclock_div { - u8 m; - u8 n; - u8 p; -}; - -static unsigned int mbxfb_get_pixclock(unsigned int pixclock_ps, - struct pixclock_div *div) -{ - u8 m, n, p; - unsigned int err = 0; - unsigned int min_err = ~0x0; - unsigned int clk; - unsigned int best_clk = 0; - unsigned int ref_clk = 13000; /* FIXME: take from platform data */ - unsigned int pixclock; - - /* convert pixclock to KHz */ - pixclock = PICOS2KHZ(pixclock_ps); - - /* PLL output freq = (ref_clk * M) / (N * 2^P) - * - * M: 1 to 63 - * N: 1 to 7 - * P: 0 to 7 - */ - - /* RAPH: When N==1, the resulting pixel clock appears to - * get divided by 2. Preventing N=1 by starting the following - * loop at 2 prevents this. Is this a bug with my chip - * revision or something I dont understand? */ - for (m = 1; m < 64; m++) { - for (n = 2; n < 8; n++) { - for (p = 0; p < 8; p++) { - clk = (ref_clk * m) / (n * (1 << p)); - err = (clk > pixclock) ? (clk - pixclock) : - (pixclock - clk); - if (err < min_err) { - min_err = err; - best_clk = clk; - div->m = m; - div->n = n; - div->p = p; - } - } - } - } - return KHZ2PICOS(best_clk); -} - -static int mbxfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, - u_int trans, struct fb_info *info) -{ - u32 val, ret = 1; - - if (regno < MAX_PALETTES) { - u32 *pal = info->pseudo_palette; - - val = (red & 0xf800) | ((green & 0xfc00) >> 5) | - ((blue & 0xf800) >> 11); - pal[regno] = val; - ret = 0; - } - - return ret; -} - -static int mbxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) -{ - struct pixclock_div div; - - var->pixclock = mbxfb_get_pixclock(var->pixclock, &div); - - if (var->xres < MIN_XRES) - var->xres = MIN_XRES; - if (var->yres < MIN_YRES) - var->yres = MIN_YRES; - if (var->xres > MAX_XRES) - return -EINVAL; - if (var->yres > MAX_YRES) - return -EINVAL; - var->xres_virtual = max(var->xres_virtual, var->xres); - var->yres_virtual = max(var->yres_virtual, var->yres); - - switch (var->bits_per_pixel) { - /* 8 bits-per-pixel is not supported yet */ - case 8: - return -EINVAL; - case 16: - var->green.length = (var->green.length == 5) ? 5 : 6; - var->red.length = 5; - var->blue.length = 5; - var->transp.length = 6 - var->green.length; - var->blue.offset = 0; - var->green.offset = 5; - var->red.offset = 5 + var->green.length; - var->transp.offset = (5 + var->red.offset) & 15; - break; - case 24: /* RGB 888 */ - case 32: /* RGBA 8888 */ - var->red.offset = 16; - var->red.length = 8; - var->green.offset = 8; - var->green.length = 8; - var->blue.offset = 0; - var->blue.length = 8; - var->transp.length = var->bits_per_pixel - 24; - var->transp.offset = (var->transp.length) ? 24 : 0; - break; - } - var->red.msb_right = 0; - var->green.msb_right = 0; - var->blue.msb_right = 0; - var->transp.msb_right = 0; - - return 0; -} - -static int mbxfb_set_par(struct fb_info *info) -{ - struct fb_var_screeninfo *var = &info->var; - struct pixclock_div div; - ushort hbps, ht, hfps, has; - ushort vbps, vt, vfps, vas; - u32 gsctrl = readl(GSCTRL); - u32 gsadr = readl(GSADR); - - info->fix.line_length = var->xres_virtual * var->bits_per_pixel / 8; - - /* setup color mode */ - gsctrl &= ~(FMsk(GSCTRL_GPIXFMT)); - /* FIXME: add *WORKING* support for 8-bits per color */ - if (info->var.bits_per_pixel == 8) { - return -EINVAL; - } else { - fb_dealloc_cmap(&info->cmap); - gsctrl &= ~GSCTRL_LUT_EN; - - info->fix.visual = FB_VISUAL_TRUECOLOR; - switch (info->var.bits_per_pixel) { - case 16: - if (info->var.green.length == 5) - gsctrl |= GSCTRL_GPIXFMT_ARGB1555; - else - gsctrl |= GSCTRL_GPIXFMT_RGB565; - break; - case 24: - gsctrl |= GSCTRL_GPIXFMT_RGB888; - break; - case 32: - gsctrl |= GSCTRL_GPIXFMT_ARGB8888; - break; - } - } - - /* setup resolution */ - gsctrl &= ~(FMsk(GSCTRL_GSWIDTH) | FMsk(GSCTRL_GSHEIGHT)); - gsctrl |= Gsctrl_Width(info->var.xres) | - Gsctrl_Height(info->var.yres); - write_reg_dly(gsctrl, GSCTRL); - - gsadr &= ~(FMsk(GSADR_SRCSTRIDE)); - gsadr |= Gsadr_Srcstride(info->var.xres * info->var.bits_per_pixel / - (8 * 16) - 1); - write_reg_dly(gsadr, GSADR); - - /* setup timings */ - var->pixclock = mbxfb_get_pixclock(info->var.pixclock, &div); - - write_reg_dly((Disp_Pll_M(div.m) | Disp_Pll_N(div.n) | - Disp_Pll_P(div.p) | DISP_PLL_EN), DISPPLL); - - hbps = var->hsync_len; - has = hbps + var->left_margin; - hfps = has + var->xres; - ht = hfps + var->right_margin; - - vbps = var->vsync_len; - vas = vbps + var->upper_margin; - vfps = vas + var->yres; - vt = vfps + var->lower_margin; - - write_reg_dly((Dht01_Hbps(hbps) | Dht01_Ht(ht)), DHT01); - write_reg_dly((Dht02_Hlbs(has) | Dht02_Has(has)), DHT02); - write_reg_dly((Dht03_Hfps(hfps) | Dht03_Hrbs(hfps)), DHT03); - write_reg_dly((Dhdet_Hdes(has) | Dhdet_Hdef(hfps)), DHDET); - - write_reg_dly((Dvt01_Vbps(vbps) | Dvt01_Vt(vt)), DVT01); - write_reg_dly((Dvt02_Vtbs(vas) | Dvt02_Vas(vas)), DVT02); - write_reg_dly((Dvt03_Vfps(vfps) | Dvt03_Vbbs(vfps)), DVT03); - write_reg_dly((Dvdet_Vdes(vas) | Dvdet_Vdef(vfps)), DVDET); - write_reg_dly((Dvectrl_Vevent(vfps) | Dvectrl_Vfetch(vbps)), DVECTRL); - - write_reg_dly((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL); - - write_reg_dly(DINTRE_VEVENT0_EN, DINTRE); - - return 0; -} - -static int mbxfb_blank(int blank, struct fb_info *info) -{ - switch (blank) { - case FB_BLANK_POWERDOWN: - case FB_BLANK_VSYNC_SUSPEND: - case FB_BLANK_HSYNC_SUSPEND: - case FB_BLANK_NORMAL: - write_reg_dly((readl(DSCTRL) & ~DSCTRL_SYNCGEN_EN), DSCTRL); - write_reg_dly((readl(PIXCLK) & ~PIXCLK_EN), PIXCLK); - write_reg_dly((readl(VOVRCLK) & ~VOVRCLK_EN), VOVRCLK); - break; - case FB_BLANK_UNBLANK: - write_reg_dly((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL); - write_reg_dly((readl(PIXCLK) | PIXCLK_EN), PIXCLK); - break; - } - return 0; -} - -static int mbxfb_setupOverlay(struct mbxfb_overlaySetup *set) -{ - u32 vsctrl, vscadr, vsadr; - u32 sssize, spoctrl, shctrl; - u32 vubase, vvbase; - u32 vovrclk; - - if (set->scaled_width==0 || set->scaled_height==0) - return -EINVAL; - - /* read registers which have reserved bits - * so we can write them back as-is. */ - vovrclk = readl(VOVRCLK); - vsctrl = readl(VSCTRL); - vscadr = readl(VSCADR); - vubase = readl(VUBASE); - vvbase = readl(VVBASE); - shctrl = readl(SHCTRL); - - spoctrl = readl(SPOCTRL); - sssize = readl(SSSIZE); - - vsctrl &= ~( FMsk(VSCTRL_VSWIDTH) | - FMsk(VSCTRL_VSHEIGHT) | - FMsk(VSCTRL_VPIXFMT) | - VSCTRL_GAMMA_EN | VSCTRL_CSC_EN | - VSCTRL_COSITED ); - vsctrl |= Vsctrl_Width(set->width) | Vsctrl_Height(set->height) | - VSCTRL_CSC_EN; - - vscadr &= ~(VSCADR_STR_EN | FMsk(VSCADR_VBASE_ADR) ); - vubase &= ~(VUBASE_UVHALFSTR | FMsk(VUBASE_UBASE_ADR)); - vvbase &= ~(FMsk(VVBASE_VBASE_ADR)); - - switch (set->fmt) { - case MBXFB_FMT_YUV16: - vsctrl |= VSCTRL_VPIXFMT_YUV12; - - set->Y_stride = ((set->width) + 0xf ) & ~0xf; - break; - case MBXFB_FMT_YUV12: - vsctrl |= VSCTRL_VPIXFMT_YUV12; - - set->Y_stride = ((set->width) + 0xf ) & ~0xf; - vubase |= VUBASE_UVHALFSTR; - - break; - case MBXFB_FMT_UY0VY1: - vsctrl |= VSCTRL_VPIXFMT_UY0VY1; - set->Y_stride = (set->width*2 + 0xf ) & ~0xf; - break; - case MBXFB_FMT_VY0UY1: - vsctrl |= VSCTRL_VPIXFMT_VY0UY1; - set->Y_stride = (set->width*2 + 0xf ) & ~0xf; - break; - case MBXFB_FMT_Y0UY1V: - vsctrl |= VSCTRL_VPIXFMT_Y0UY1V; - set->Y_stride = (set->width*2 + 0xf ) & ~0xf; - break; - case MBXFB_FMT_Y0VY1U: - vsctrl |= VSCTRL_VPIXFMT_Y0VY1U; - set->Y_stride = (set->width*2 + 0xf ) & ~0xf; - break; - default: - return -EINVAL; - } - - /* VSCTRL has the bits which sets the Video Pixel Format. - * When passing from a packed to planar format, - * if we write VSCTRL first, VVBASE and VUBASE would - * be zero if we would not set them here. (And then, - * the chips hangs and only a reset seems to fix it). - * - * If course, the values calculated here have no meaning - * for packed formats. - */ - set->UV_stride = ((set->width/2) + 0x7 ) & ~0x7; - set->U_offset = set->height * set->Y_stride; - set->V_offset = set->U_offset + - set->height * set->UV_stride; - vubase |= Vubase_Ubase_Adr( - (0x60000 + set->mem_offset + set->U_offset)>>3); - vvbase |= Vvbase_Vbase_Adr( - (0x60000 + set->mem_offset + set->V_offset)>>3); - - - vscadr |= Vscadr_Vbase_Adr((0x60000 + set->mem_offset)>>4); - - if (set->enable) - vscadr |= VSCADR_STR_EN; - - - vsadr = Vsadr_Srcstride((set->Y_stride)/16-1) | - Vsadr_Xstart(set->x) | Vsadr_Ystart(set->y); - - sssize &= ~(FMsk(SSSIZE_SC_WIDTH) | FMsk(SSSIZE_SC_HEIGHT)); - sssize = Sssize_Sc_Width(set->scaled_width-1) | - Sssize_Sc_Height(set->scaled_height-1); - - spoctrl &= ~(SPOCTRL_H_SC_BP | SPOCTRL_V_SC_BP | - SPOCTRL_HV_SC_OR | SPOCTRL_VS_UR_C | - FMsk(SPOCTRL_VPITCH)); - spoctrl |= Spoctrl_Vpitch((set->height<<11)/set->scaled_height); - - /* Bypass horiz/vert scaler when same size */ - if (set->scaled_width == set->width) - spoctrl |= SPOCTRL_H_SC_BP; - if (set->scaled_height == set->height) - spoctrl |= SPOCTRL_V_SC_BP; - - shctrl &= ~(FMsk(SHCTRL_HPITCH) | SHCTRL_HDECIM); - shctrl |= Shctrl_Hpitch((set->width<<11)/set->scaled_width); - - /* Video plane registers */ - write_reg(vsctrl, VSCTRL); - write_reg(vscadr, VSCADR); - write_reg(vubase, VUBASE); - write_reg(vvbase, VVBASE); - write_reg(vsadr, VSADR); - - /* Video scaler registers */ - write_reg(sssize, SSSIZE); - write_reg(spoctrl, SPOCTRL); - write_reg(shctrl, SHCTRL); - - /* Clock */ - if (set->enable) - vovrclk |= 1; - else - vovrclk &= ~1; - - write_reg(vovrclk, VOVRCLK); - - return 0; -} - -static int mbxfb_ioctl_planeorder(struct mbxfb_planeorder *porder) -{ - unsigned long gscadr, vscadr; - - if (porder->bottom == porder->top) - return -EINVAL; - - gscadr = readl(GSCADR); - vscadr = readl(VSCADR); - - gscadr &= ~(FMsk(GSCADR_BLEND_POS)); - vscadr &= ~(FMsk(VSCADR_BLEND_POS)); - - switch (porder->bottom) { - case MBXFB_PLANE_GRAPHICS: - gscadr |= GSCADR_BLEND_GFX; - break; - case MBXFB_PLANE_VIDEO: - vscadr |= VSCADR_BLEND_GFX; - break; - default: - return -EINVAL; - } - - switch (porder->top) { - case MBXFB_PLANE_GRAPHICS: - gscadr |= GSCADR_BLEND_VID; - break; - case MBXFB_PLANE_VIDEO: - vscadr |= GSCADR_BLEND_VID; - break; - default: - return -EINVAL; - } - - write_reg_dly(vscadr, VSCADR); - write_reg_dly(gscadr, GSCADR); - - return 0; - -} - -static int mbxfb_ioctl_alphactl(struct mbxfb_alphaCtl *alpha) -{ - unsigned long vscadr, vbbase, vcmsk; - unsigned long gscadr, gbbase, gdrctrl; - - vbbase = Vbbase_Glalpha(alpha->overlay_global_alpha) | - Vbbase_Colkey(alpha->overlay_colorkey); - - gbbase = Gbbase_Glalpha(alpha->graphics_global_alpha) | - Gbbase_Colkey(alpha->graphics_colorkey); - - vcmsk = readl(VCMSK); - vcmsk &= ~(FMsk(VCMSK_COLKEY_M)); - vcmsk |= Vcmsk_colkey_m(alpha->overlay_colorkey_mask); - - gdrctrl = readl(GDRCTRL); - gdrctrl &= ~(FMsk(GDRCTRL_COLKEYM)); - gdrctrl |= Gdrctrl_Colkeym(alpha->graphics_colorkey_mask); - - vscadr = readl(VSCADR); - vscadr &= ~(FMsk(VSCADR_BLEND_M) | VSCADR_COLKEYSRC | VSCADR_COLKEY_EN); - - gscadr = readl(GSCADR); - gscadr &= ~(FMsk(GSCADR_BLEND_M) | GSCADR_COLKEY_EN | GSCADR_COLKEYSRC); - - switch (alpha->overlay_colorkey_mode) { - case MBXFB_COLORKEY_DISABLED: - break; - case MBXFB_COLORKEY_PREVIOUS: - vscadr |= VSCADR_COLKEY_EN; - break; - case MBXFB_COLORKEY_CURRENT: - vscadr |= VSCADR_COLKEY_EN | VSCADR_COLKEYSRC; - break; - default: - return -EINVAL; - } - - switch (alpha->overlay_blend_mode) { - case MBXFB_ALPHABLEND_NONE: - vscadr |= VSCADR_BLEND_NONE; - break; - case MBXFB_ALPHABLEND_GLOBAL: - vscadr |= VSCADR_BLEND_GLOB; - break; - case MBXFB_ALPHABLEND_PIXEL: - vscadr |= VSCADR_BLEND_PIX; - break; - default: - return -EINVAL; - } - - switch (alpha->graphics_colorkey_mode) { - case MBXFB_COLORKEY_DISABLED: - break; - case MBXFB_COLORKEY_PREVIOUS: - gscadr |= GSCADR_COLKEY_EN; - break; - case MBXFB_COLORKEY_CURRENT: - gscadr |= GSCADR_COLKEY_EN | GSCADR_COLKEYSRC; - break; - default: - return -EINVAL; - } - - switch (alpha->graphics_blend_mode) { - case MBXFB_ALPHABLEND_NONE: - gscadr |= GSCADR_BLEND_NONE; - break; - case MBXFB_ALPHABLEND_GLOBAL: - gscadr |= GSCADR_BLEND_GLOB; - break; - case MBXFB_ALPHABLEND_PIXEL: - gscadr |= GSCADR_BLEND_PIX; - break; - default: - return -EINVAL; - } - - write_reg_dly(vbbase, VBBASE); - write_reg_dly(gbbase, GBBASE); - write_reg_dly(vcmsk, VCMSK); - write_reg_dly(gdrctrl, GDRCTRL); - write_reg_dly(gscadr, GSCADR); - write_reg_dly(vscadr, VSCADR); - - return 0; -} - -static int mbxfb_ioctl(struct fb_info *info, unsigned int cmd, - unsigned long arg) -{ - struct mbxfb_overlaySetup setup; - struct mbxfb_planeorder porder; - struct mbxfb_alphaCtl alpha; - struct mbxfb_reg reg; - int res; - __u32 tmp; - - switch (cmd) - { - case MBXFB_IOCX_OVERLAY: - if (copy_from_user(&setup, (void __user*)arg, - sizeof(struct mbxfb_overlaySetup))) - return -EFAULT; - - res = mbxfb_setupOverlay(&setup); - if (res) - return res; - - if (copy_to_user((void __user*)arg, &setup, - sizeof(struct mbxfb_overlaySetup))) - return -EFAULT; - - return 0; - - case MBXFB_IOCS_PLANEORDER: - if (copy_from_user(&porder, (void __user*)arg, - sizeof(struct mbxfb_planeorder))) - return -EFAULT; - - return mbxfb_ioctl_planeorder(&porder); - - case MBXFB_IOCS_ALPHA: - if (copy_from_user(&alpha, (void __user*)arg, - sizeof(struct mbxfb_alphaCtl))) - return -EFAULT; - - return mbxfb_ioctl_alphactl(&alpha); - - case MBXFB_IOCS_REG: - if (copy_from_user(®, (void __user*)arg, - sizeof(struct mbxfb_reg))) - return -EFAULT; - - if (reg.addr >= 0x10000) /* regs are from 0x3fe0000 to 0x3feffff */ - return -EINVAL; - - tmp = readl(virt_base_2700 + reg.addr); - tmp &= ~reg.mask; - tmp |= reg.val & reg.mask; - writel(tmp, virt_base_2700 + reg.addr); - - return 0; - case MBXFB_IOCX_REG: - if (copy_from_user(®, (void __user*)arg, - sizeof(struct mbxfb_reg))) - return -EFAULT; - - if (reg.addr >= 0x10000) /* regs are from 0x3fe0000 to 0x3feffff */ - return -EINVAL; - reg.val = readl(virt_base_2700 + reg.addr); - - if (copy_to_user((void __user*)arg, ®, - sizeof(struct mbxfb_reg))) - return -EFAULT; - - return 0; - } - return -EINVAL; -} - -static const struct fb_ops mbxfb_ops = { - .owner = THIS_MODULE, - .fb_check_var = mbxfb_check_var, - .fb_set_par = mbxfb_set_par, - .fb_setcolreg = mbxfb_setcolreg, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, - .fb_blank = mbxfb_blank, - .fb_ioctl = mbxfb_ioctl, -}; - -/* - Enable external SDRAM controller. Assume that all clocks are active - by now. -*/ -static void setup_memc(struct fb_info *fbi) -{ - unsigned long tmp; - int i; - - /* FIXME: use platform specific parameters */ - /* setup SDRAM controller */ - write_reg_dly((LMCFG_LMC_DS | LMCFG_LMC_TS | LMCFG_LMD_TS | - LMCFG_LMA_TS), - LMCFG); - - write_reg_dly(LMPWR_MC_PWR_ACT, LMPWR); - - /* setup SDRAM timings */ - write_reg_dly((Lmtim_Tras(7) | Lmtim_Trp(3) | Lmtim_Trcd(3) | - Lmtim_Trc(9) | Lmtim_Tdpl(2)), - LMTIM); - /* setup SDRAM refresh rate */ - write_reg_dly(0xc2b, LMREFRESH); - /* setup SDRAM type parameters */ - write_reg_dly((LMTYPE_CASLAT_3 | LMTYPE_BKSZ_2 | LMTYPE_ROWSZ_11 | - LMTYPE_COLSZ_8), - LMTYPE); - /* enable memory controller */ - write_reg_dly(LMPWR_MC_PWR_ACT, LMPWR); - /* perform dummy reads */ - for ( i = 0; i < 16; i++ ) { - tmp = readl(fbi->screen_base); - } -} - -static void enable_clocks(struct fb_info *fbi) -{ - /* enable clocks */ - write_reg_dly(SYSCLKSRC_PLL_2, SYSCLKSRC); - write_reg_dly(PIXCLKSRC_PLL_1, PIXCLKSRC); - write_reg_dly(0x00000000, CLKSLEEP); - - /* PLL output = (Frefclk * M) / (N * 2^P ) - * - * M: 0x17, N: 0x3, P: 0x0 == 100 Mhz! - * M: 0xb, N: 0x1, P: 0x1 == 71 Mhz - * */ - write_reg_dly((Core_Pll_M(0xb) | Core_Pll_N(0x1) | Core_Pll_P(0x1) | - CORE_PLL_EN), - COREPLL); - - write_reg_dly((Disp_Pll_M(0x1b) | Disp_Pll_N(0x7) | Disp_Pll_P(0x1) | - DISP_PLL_EN), - DISPPLL); - - write_reg_dly(0x00000000, VOVRCLK); - write_reg_dly(PIXCLK_EN, PIXCLK); - write_reg_dly(MEMCLK_EN, MEMCLK); - write_reg_dly(0x00000001, M24CLK); - write_reg_dly(0x00000001, MBXCLK); - write_reg_dly(SDCLK_EN, SDCLK); - write_reg_dly(0x00000001, PIXCLKDIV); -} - -static void setup_graphics(struct fb_info *fbi) -{ - unsigned long gsctrl; - unsigned long vscadr; - - gsctrl = GSCTRL_GAMMA_EN | Gsctrl_Width(fbi->var.xres) | - Gsctrl_Height(fbi->var.yres); - switch (fbi->var.bits_per_pixel) { - case 16: - if (fbi->var.green.length == 5) - gsctrl |= GSCTRL_GPIXFMT_ARGB1555; - else - gsctrl |= GSCTRL_GPIXFMT_RGB565; - break; - case 24: - gsctrl |= GSCTRL_GPIXFMT_RGB888; - break; - case 32: - gsctrl |= GSCTRL_GPIXFMT_ARGB8888; - break; - } - - write_reg_dly(gsctrl, GSCTRL); - write_reg_dly(0x00000000, GBBASE); - write_reg_dly(0x00ffffff, GDRCTRL); - write_reg_dly((GSCADR_STR_EN | Gscadr_Gbase_Adr(0x6000)), GSCADR); - write_reg_dly(0x00000000, GPLUT); - - vscadr = readl(VSCADR); - vscadr &= ~(FMsk(VSCADR_BLEND_POS) | FMsk(VSCADR_BLEND_M)); - vscadr |= VSCADR_BLEND_VID | VSCADR_BLEND_NONE; - write_reg_dly(vscadr, VSCADR); -} - -static void setup_display(struct fb_info *fbi) -{ - unsigned long dsctrl = 0; - - dsctrl = DSCTRL_BLNK_POL; - if (fbi->var.sync & FB_SYNC_HOR_HIGH_ACT) - dsctrl |= DSCTRL_HS_POL; - if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT) - dsctrl |= DSCTRL_VS_POL; - write_reg_dly(dsctrl, DSCTRL); - write_reg_dly(0xd0303010, DMCTRL); - write_reg_dly((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL); -} - -static void enable_controller(struct fb_info *fbi) -{ - u32 svctrl, shctrl; - - write_reg_dly(SYSRST_RST, SYSRST); - - /* setup a timeout, raise drive strength */ - write_reg_dly(0xffffff0c, SYSCFG); - - enable_clocks(fbi); - setup_memc(fbi); - setup_graphics(fbi); - setup_display(fbi); - - shctrl = readl(SHCTRL); - shctrl &= ~(FMsk(SHCTRL_HINITIAL)); - shctrl |= Shctrl_Hinitial(4<<11); - writel(shctrl, SHCTRL); - - svctrl = Svctrl_Initial1(1<<10) | Svctrl_Initial2(1<<10); - writel(svctrl, SVCTRL); - - writel(SPOCTRL_H_SC_BP | SPOCTRL_V_SC_BP | SPOCTRL_VORDER_4TAP - , SPOCTRL); - - /* Those coefficients are good for scaling up. For scaling - * down, the application has to calculate them. */ - write_reg(0xff000100, VSCOEFF0); - write_reg(0xfdfcfdfe, VSCOEFF1); - write_reg(0x170d0500, VSCOEFF2); - write_reg(0x3d372d22, VSCOEFF3); - write_reg(0x00000040, VSCOEFF4); - - write_reg(0xff010100, HSCOEFF0); - write_reg(0x00000000, HSCOEFF1); - write_reg(0x02010000, HSCOEFF2); - write_reg(0x01020302, HSCOEFF3); - write_reg(0xf9fbfe00, HSCOEFF4); - write_reg(0xfbf7f6f7, HSCOEFF5); - write_reg(0x1c110700, HSCOEFF6); - write_reg(0x3e393127, HSCOEFF7); - write_reg(0x00000040, HSCOEFF8); - -} - -#ifdef CONFIG_PM -/* - * Power management hooks. Note that we won't be called from IRQ context, - * unlike the blank functions above, so we may sleep. - */ -static int mbxfb_suspend(struct platform_device *dev, pm_message_t state) -{ - /* make frame buffer memory enter self-refresh mode */ - write_reg_dly(LMPWR_MC_PWR_SRM, LMPWR); - while (readl(LMPWRSTAT) != LMPWRSTAT_MC_PWR_SRM) - ; /* empty statement */ - - /* reset the device, since it's initial state is 'mostly sleeping' */ - write_reg_dly(SYSRST_RST, SYSRST); - return 0; -} - -static int mbxfb_resume(struct platform_device *dev) -{ - struct fb_info *fbi = platform_get_drvdata(dev); - - enable_clocks(fbi); -/* setup_graphics(fbi); */ -/* setup_display(fbi); */ - - write_reg_dly((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL); - return 0; -} -#else -#define mbxfb_suspend NULL -#define mbxfb_resume NULL -#endif - -/* debugfs entries */ -#ifndef CONFIG_FB_MBX_DEBUG -#define mbxfb_debugfs_init(x) do {} while(0) -#define mbxfb_debugfs_remove(x) do {} while(0) -#else -#include "mbxdebugfs.c" -#endif - -#define res_size(_r) (((_r)->end - (_r)->start) + 1) - -static int mbxfb_probe(struct platform_device *dev) -{ - int ret; - struct fb_info *fbi; - struct mbxfb_info *mfbi; - struct mbxfb_platform_data *pdata; - - dev_dbg(&dev->dev, "mbxfb_probe\n"); - - pdata = dev_get_platdata(&dev->dev); - if (!pdata) { - dev_err(&dev->dev, "platform data is required\n"); - return -EINVAL; - } - - fbi = framebuffer_alloc(sizeof(struct mbxfb_info), &dev->dev); - if (!fbi) - return -ENOMEM; - - mfbi = fbi->par; - fbi->pseudo_palette = mfbi->pseudo_palette; - - - if (pdata->probe) - mfbi->platform_probe = pdata->probe; - if (pdata->remove) - mfbi->platform_remove = pdata->remove; - - mfbi->fb_res = platform_get_resource(dev, IORESOURCE_MEM, 0); - mfbi->reg_res = platform_get_resource(dev, IORESOURCE_MEM, 1); - - if (!mfbi->fb_res || !mfbi->reg_res) { - dev_err(&dev->dev, "no resources found\n"); - ret = -ENODEV; - goto err1; - } - - mfbi->fb_req = request_mem_region(mfbi->fb_res->start, - res_size(mfbi->fb_res), dev->name); - if (mfbi->fb_req == NULL) { - dev_err(&dev->dev, "failed to claim framebuffer memory\n"); - ret = -EINVAL; - goto err1; - } - mfbi->fb_phys_addr = mfbi->fb_res->start; - - mfbi->reg_req = request_mem_region(mfbi->reg_res->start, - res_size(mfbi->reg_res), dev->name); - if (mfbi->reg_req == NULL) { - dev_err(&dev->dev, "failed to claim Marathon registers\n"); - ret = -EINVAL; - goto err2; - } - mfbi->reg_phys_addr = mfbi->reg_res->start; - - mfbi->reg_virt_addr = devm_ioremap(&dev->dev, - mfbi->reg_phys_addr, - res_size(mfbi->reg_req)); - if (!mfbi->reg_virt_addr) { - dev_err(&dev->dev, "failed to ioremap Marathon registers\n"); - ret = -EINVAL; - goto err3; - } - virt_base_2700 = mfbi->reg_virt_addr; - - mfbi->fb_virt_addr = devm_ioremap(&dev->dev, mfbi->fb_phys_addr, - res_size(mfbi->fb_req)); - if (!mfbi->fb_virt_addr) { - dev_err(&dev->dev, "failed to ioremap frame buffer\n"); - ret = -EINVAL; - goto err3; - } - - fbi->screen_base = (char __iomem *)(mfbi->fb_virt_addr + 0x60000); - fbi->screen_size = pdata->memsize; - fbi->fbops = &mbxfb_ops; - - fbi->var = mbxfb_default; - fbi->fix = mbxfb_fix; - fbi->fix.smem_start = mfbi->fb_phys_addr + 0x60000; - fbi->fix.smem_len = pdata->memsize; - fbi->fix.line_length = mbxfb_default.xres_virtual * - mbxfb_default.bits_per_pixel / 8; - - ret = fb_alloc_cmap(&fbi->cmap, 256, 0); - if (ret < 0) { - dev_err(&dev->dev, "fb_alloc_cmap failed\n"); - ret = -EINVAL; - goto err3; - } - - platform_set_drvdata(dev, fbi); - - fb_info(fbi, "mbx frame buffer device\n"); - - if (mfbi->platform_probe) - mfbi->platform_probe(fbi); - - enable_controller(fbi); - - mbxfb_debugfs_init(fbi); - - ret = register_framebuffer(fbi); - if (ret < 0) { - dev_err(&dev->dev, "register_framebuffer failed\n"); - ret = -EINVAL; - goto err6; - } - - return 0; - -err6: - fb_dealloc_cmap(&fbi->cmap); -err3: - release_mem_region(mfbi->reg_res->start, res_size(mfbi->reg_res)); -err2: - release_mem_region(mfbi->fb_res->start, res_size(mfbi->fb_res)); -err1: - framebuffer_release(fbi); - - return ret; -} - -static int mbxfb_remove(struct platform_device *dev) -{ - struct fb_info *fbi = platform_get_drvdata(dev); - - write_reg_dly(SYSRST_RST, SYSRST); - - mbxfb_debugfs_remove(fbi); - - if (fbi) { - struct mbxfb_info *mfbi = fbi->par; - - unregister_framebuffer(fbi); - if (mfbi) { - if (mfbi->platform_remove) - mfbi->platform_remove(fbi); - - - if (mfbi->reg_req) - release_mem_region(mfbi->reg_req->start, - res_size(mfbi->reg_req)); - if (mfbi->fb_req) - release_mem_region(mfbi->fb_req->start, - res_size(mfbi->fb_req)); - } - framebuffer_release(fbi); - } - - return 0; -} - -static struct platform_driver mbxfb_driver = { - .probe = mbxfb_probe, - .remove = mbxfb_remove, - .suspend = mbxfb_suspend, - .resume = mbxfb_resume, - .driver = { - .name = "mbx-fb", - }, -}; - -module_platform_driver(mbxfb_driver); - -MODULE_DESCRIPTION("loadable framebuffer driver for Marathon device"); -MODULE_AUTHOR("Mike Rapoport, Compulab"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/fbdev/mbx/reg_bits.h b/drivers/video/fbdev/mbx/reg_bits.h deleted file mode 100644 index 6607f353639b..000000000000 --- a/drivers/video/fbdev/mbx/reg_bits.h +++ /dev/null @@ -1,614 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __REG_BITS_2700G_ -#define __REG_BITS_2700G_ - -/* use defines from asm-arm/arch-pxa/bitfields.h for bit fields access */ -#define UData(Data) ((unsigned long) (Data)) -#define Fld(Size, Shft) (((Size) << 16) + (Shft)) -#define FSize(Field) ((Field) >> 16) -#define FShft(Field) ((Field) & 0x0000FFFF) -#define FMsk(Field) (((UData (1) << FSize (Field)) - 1) << FShft (Field)) -#define FAlnMsk(Field) ((UData (1) << FSize (Field)) - 1) -#define F1stBit(Field) (UData (1) << FShft (Field)) - -#define SYSRST_RST (1 << 0) - -/* SYSCLKSRC - SYSCLK Source Control Register */ -#define SYSCLKSRC_SEL Fld(2,0) -#define SYSCLKSRC_REF ((0x0) << FShft(SYSCLKSRC_SEL)) -#define SYSCLKSRC_PLL_1 ((0x1) << FShft(SYSCLKSRC_SEL)) -#define SYSCLKSRC_PLL_2 ((0x2) << FShft(SYSCLKSRC_SEL)) - -/* PIXCLKSRC - PIXCLK Source Control Register */ -#define PIXCLKSRC_SEL Fld(2,0) -#define PIXCLKSRC_REF ((0x0) << FShft(PIXCLKSRC_SEL)) -#define PIXCLKSRC_PLL_1 ((0x1) << FShft(PIXCLKSRC_SEL)) -#define PIXCLKSRC_PLL_2 ((0x2) << FShft(PIXCLKSRC_SEL)) - -/* Clock Disable Register */ -#define CLKSLEEP_SLP (1 << 0) - -/* Core PLL Control Register */ -#define CORE_PLL_M Fld(6,7) -#define Core_Pll_M(x) ((x) << FShft(CORE_PLL_M)) -#define CORE_PLL_N Fld(3,4) -#define Core_Pll_N(x) ((x) << FShft(CORE_PLL_N)) -#define CORE_PLL_P Fld(3,1) -#define Core_Pll_P(x) ((x) << FShft(CORE_PLL_P)) -#define CORE_PLL_EN (1 << 0) - -/* Display PLL Control Register */ -#define DISP_PLL_M Fld(6,7) -#define Disp_Pll_M(x) ((x) << FShft(DISP_PLL_M)) -#define DISP_PLL_N Fld(3,4) -#define Disp_Pll_N(x) ((x) << FShft(DISP_PLL_N)) -#define DISP_PLL_P Fld(3,1) -#define Disp_Pll_P(x) ((x) << FShft(DISP_PLL_P)) -#define DISP_PLL_EN (1 << 0) - -/* PLL status register */ -#define PLLSTAT_CORE_PLL_LOST_L (1 << 3) -#define PLLSTAT_CORE_PLL_LSTS (1 << 2) -#define PLLSTAT_DISP_PLL_LOST_L (1 << 1) -#define PLLSTAT_DISP_PLL_LSTS (1 << 0) - -/* Video and scale clock control register */ -#define VOVRCLK_EN (1 << 0) - -/* Pixel clock control register */ -#define PIXCLK_EN (1 << 0) - -/* Memory clock control register */ -#define MEMCLK_EN (1 << 0) - -/* MBX clock control register */ -#define MBXCLK_DIV Fld(2,2) -#define MBXCLK_DIV_1 ((0x0) << FShft(MBXCLK_DIV)) -#define MBXCLK_DIV_2 ((0x1) << FShft(MBXCLK_DIV)) -#define MBXCLK_DIV_3 ((0x2) << FShft(MBXCLK_DIV)) -#define MBXCLK_DIV_4 ((0x3) << FShft(MBXCLK_DIV)) -#define MBXCLK_EN Fld(2,0) -#define MBXCLK_EN_NONE ((0x0) << FShft(MBXCLK_EN)) -#define MBXCLK_EN_2D ((0x1) << FShft(MBXCLK_EN)) -#define MBXCLK_EN_BOTH ((0x2) << FShft(MBXCLK_EN)) - -/* M24 clock control register */ -#define M24CLK_DIV Fld(2,1) -#define M24CLK_DIV_1 ((0x0) << FShft(M24CLK_DIV)) -#define M24CLK_DIV_2 ((0x1) << FShft(M24CLK_DIV)) -#define M24CLK_DIV_3 ((0x2) << FShft(M24CLK_DIV)) -#define M24CLK_DIV_4 ((0x3) << FShft(M24CLK_DIV)) -#define M24CLK_EN (1 << 0) - -/* SDRAM clock control register */ -#define SDCLK_EN (1 << 0) - -/* PixClk Divisor Register */ -#define PIXCLKDIV_PD Fld(9,0) -#define Pixclkdiv_Pd(x) ((x) << FShft(PIXCLKDIV_PD)) - -/* LCD Config control register */ -#define LCDCFG_IN_FMT Fld(3,28) -#define Lcdcfg_In_Fmt(x) ((x) << FShft(LCDCFG_IN_FMT)) -#define LCDCFG_LCD1DEN_POL (1 << 27) -#define LCDCFG_LCD1FCLK_POL (1 << 26) -#define LCDCFG_LCD1LCLK_POL (1 << 25) -#define LCDCFG_LCD1D_POL (1 << 24) -#define LCDCFG_LCD2DEN_POL (1 << 23) -#define LCDCFG_LCD2FCLK_POL (1 << 22) -#define LCDCFG_LCD2LCLK_POL (1 << 21) -#define LCDCFG_LCD2D_POL (1 << 20) -#define LCDCFG_LCD1_TS (1 << 19) -#define LCDCFG_LCD1D_DS (1 << 18) -#define LCDCFG_LCD1C_DS (1 << 17) -#define LCDCFG_LCD1_IS_IN (1 << 16) -#define LCDCFG_LCD2_TS (1 << 3) -#define LCDCFG_LCD2D_DS (1 << 2) -#define LCDCFG_LCD2C_DS (1 << 1) -#define LCDCFG_LCD2_IS_IN (1 << 0) - -/* On-Die Frame Buffer Power Control Register */ -#define ODFBPWR_SLOW (1 << 2) -#define ODFBPWR_MODE Fld(2,0) -#define ODFBPWR_MODE_ACT ((0x0) << FShft(ODFBPWR_MODE)) -#define ODFBPWR_MODE_ACT_LP ((0x1) << FShft(ODFBPWR_MODE)) -#define ODFBPWR_MODE_SLEEP ((0x2) << FShft(ODFBPWR_MODE)) -#define ODFBPWR_MODE_SHUTD ((0x3) << FShft(ODFBPWR_MODE)) - -/* On-Die Frame Buffer Power State Status Register */ -#define ODFBSTAT_ACT (1 << 2) -#define ODFBSTAT_SLP (1 << 1) -#define ODFBSTAT_SDN (1 << 0) - -/* LMRST - Local Memory (SDRAM) Reset */ -#define LMRST_MC_RST (1 << 0) - -/* LMCFG - Local Memory (SDRAM) Configuration Register */ -#define LMCFG_LMC_DS (1 << 5) -#define LMCFG_LMD_DS (1 << 4) -#define LMCFG_LMA_DS (1 << 3) -#define LMCFG_LMC_TS (1 << 2) -#define LMCFG_LMD_TS (1 << 1) -#define LMCFG_LMA_TS (1 << 0) - -/* LMPWR - Local Memory (SDRAM) Power Control Register */ -#define LMPWR_MC_PWR_CNT Fld(2,0) -#define LMPWR_MC_PWR_ACT ((0x0) << FShft(LMPWR_MC_PWR_CNT)) /* Active */ -#define LMPWR_MC_PWR_SRM ((0x1) << FShft(LMPWR_MC_PWR_CNT)) /* Self-refresh */ -#define LMPWR_MC_PWR_DPD ((0x3) << FShft(LMPWR_MC_PWR_CNT)) /* deep power down */ - -/* LMPWRSTAT - Local Memory (SDRAM) Power Status Register */ -#define LMPWRSTAT_MC_PWR_CNT Fld(2,0) -#define LMPWRSTAT_MC_PWR_ACT ((0x0) << FShft(LMPWRSTAT_MC_PWR_CNT)) /* Active */ -#define LMPWRSTAT_MC_PWR_SRM ((0x1) << FShft(LMPWRSTAT_MC_PWR_CNT)) /* Self-refresh */ -#define LMPWRSTAT_MC_PWR_DPD ((0x3) << FShft(LMPWRSTAT_MC_PWR_CNT)) /* deep power down */ - -/* LMTYPE - Local Memory (SDRAM) Type Register */ -#define LMTYPE_CASLAT Fld(3,10) -#define LMTYPE_CASLAT_1 ((0x1) << FShft(LMTYPE_CASLAT)) -#define LMTYPE_CASLAT_2 ((0x2) << FShft(LMTYPE_CASLAT)) -#define LMTYPE_CASLAT_3 ((0x3) << FShft(LMTYPE_CASLAT)) -#define LMTYPE_BKSZ Fld(2,8) -#define LMTYPE_BKSZ_1 ((0x1) << FShft(LMTYPE_BKSZ)) -#define LMTYPE_BKSZ_2 ((0x2) << FShft(LMTYPE_BKSZ)) -#define LMTYPE_ROWSZ Fld(4,4) -#define LMTYPE_ROWSZ_11 ((0xb) << FShft(LMTYPE_ROWSZ)) -#define LMTYPE_ROWSZ_12 ((0xc) << FShft(LMTYPE_ROWSZ)) -#define LMTYPE_ROWSZ_13 ((0xd) << FShft(LMTYPE_ROWSZ)) -#define LMTYPE_COLSZ Fld(4,0) -#define LMTYPE_COLSZ_7 ((0x7) << FShft(LMTYPE_COLSZ)) -#define LMTYPE_COLSZ_8 ((0x8) << FShft(LMTYPE_COLSZ)) -#define LMTYPE_COLSZ_9 ((0x9) << FShft(LMTYPE_COLSZ)) -#define LMTYPE_COLSZ_10 ((0xa) << FShft(LMTYPE_COLSZ)) -#define LMTYPE_COLSZ_11 ((0xb) << FShft(LMTYPE_COLSZ)) -#define LMTYPE_COLSZ_12 ((0xc) << FShft(LMTYPE_COLSZ)) - -/* LMTIM - Local Memory (SDRAM) Timing Register */ -#define LMTIM_TRAS Fld(4,16) -#define Lmtim_Tras(x) ((x) << FShft(LMTIM_TRAS)) -#define LMTIM_TRP Fld(4,12) -#define Lmtim_Trp(x) ((x) << FShft(LMTIM_TRP)) -#define LMTIM_TRCD Fld(4,8) -#define Lmtim_Trcd(x) ((x) << FShft(LMTIM_TRCD)) -#define LMTIM_TRC Fld(4,4) -#define Lmtim_Trc(x) ((x) << FShft(LMTIM_TRC)) -#define LMTIM_TDPL Fld(4,0) -#define Lmtim_Tdpl(x) ((x) << FShft(LMTIM_TDPL)) - -/* LMREFRESH - Local Memory (SDRAM) tREF Control Register */ -#define LMREFRESH_TREF Fld(2,0) -#define Lmrefresh_Tref(x) ((x) << FShft(LMREFRESH_TREF)) - -/* GSCTRL - Graphics surface control register */ -#define GSCTRL_LUT_EN (1 << 31) -#define GSCTRL_GPIXFMT Fld(4,27) -#define GSCTRL_GPIXFMT_INDEXED ((0x0) << FShft(GSCTRL_GPIXFMT)) -#define GSCTRL_GPIXFMT_ARGB4444 ((0x4) << FShft(GSCTRL_GPIXFMT)) -#define GSCTRL_GPIXFMT_ARGB1555 ((0x5) << FShft(GSCTRL_GPIXFMT)) -#define GSCTRL_GPIXFMT_RGB888 ((0x6) << FShft(GSCTRL_GPIXFMT)) -#define GSCTRL_GPIXFMT_RGB565 ((0x7) << FShft(GSCTRL_GPIXFMT)) -#define GSCTRL_GPIXFMT_ARGB8888 ((0x8) << FShft(GSCTRL_GPIXFMT)) -#define GSCTRL_GAMMA_EN (1 << 26) - -#define GSCTRL_GSWIDTH Fld(11,11) -#define Gsctrl_Width(Pixel) /* Display Width [1..2048 pix.] */ \ - (((Pixel) - 1) << FShft(GSCTRL_GSWIDTH)) - -#define GSCTRL_GSHEIGHT Fld(11,0) -#define Gsctrl_Height(Pixel) /* Display Height [1..2048 pix.] */ \ - (((Pixel) - 1) << FShft(GSCTRL_GSHEIGHT)) - -/* GBBASE fileds */ -#define GBBASE_GLALPHA Fld(8,24) -#define Gbbase_Glalpha(x) ((x) << FShft(GBBASE_GLALPHA)) - -#define GBBASE_COLKEY Fld(24,0) -#define Gbbase_Colkey(x) ((x) << FShft(GBBASE_COLKEY)) - -/* GDRCTRL fields */ -#define GDRCTRL_PIXDBL (1 << 31) -#define GDRCTRL_PIXHLV (1 << 30) -#define GDRCTRL_LNDBL (1 << 29) -#define GDRCTRL_LNHLV (1 << 28) -#define GDRCTRL_COLKEYM Fld(24,0) -#define Gdrctrl_Colkeym(x) ((x) << FShft(GDRCTRL_COLKEYM)) - -/* GSCADR graphics stream control address register fields */ -#define GSCADR_STR_EN (1 << 31) -#define GSCADR_COLKEY_EN (1 << 30) -#define GSCADR_COLKEYSRC (1 << 29) -#define GSCADR_BLEND_M Fld(2,27) -#define GSCADR_BLEND_NONE ((0x0) << FShft(GSCADR_BLEND_M)) -#define GSCADR_BLEND_INV ((0x1) << FShft(GSCADR_BLEND_M)) -#define GSCADR_BLEND_GLOB ((0x2) << FShft(GSCADR_BLEND_M)) -#define GSCADR_BLEND_PIX ((0x3) << FShft(GSCADR_BLEND_M)) -#define GSCADR_BLEND_POS Fld(2,24) -#define GSCADR_BLEND_GFX ((0x0) << FShft(GSCADR_BLEND_POS)) -#define GSCADR_BLEND_VID ((0x1) << FShft(GSCADR_BLEND_POS)) -#define GSCADR_BLEND_CUR ((0x2) << FShft(GSCADR_BLEND_POS)) -#define GSCADR_GBASE_ADR Fld(23,0) -#define Gscadr_Gbase_Adr(x) ((x) << FShft(GSCADR_GBASE_ADR)) - -/* GSADR graphics stride address register fields */ -#define GSADR_SRCSTRIDE Fld(10,22) -#define Gsadr_Srcstride(x) ((x) << FShft(GSADR_SRCSTRIDE)) -#define GSADR_XSTART Fld(11,11) -#define Gsadr_Xstart(x) ((x) << FShft(GSADR_XSTART)) -#define GSADR_YSTART Fld(11,0) -#define Gsadr_Ystart(y) ((y) << FShft(GSADR_YSTART)) - -/* GPLUT graphics palette register fields */ -#define GPLUT_LUTADR Fld(8,24) -#define Gplut_Lutadr(x) ((x) << FShft(GPLUT_LUTADR)) -#define GPLUT_LUTDATA Fld(24,0) -#define Gplut_Lutdata(x) ((x) << FShft(GPLUT_LUTDATA)) - -/* VSCTRL - Video Surface Control Register */ -#define VSCTRL_VPIXFMT Fld(4,27) -#define VSCTRL_VPIXFMT_YUV12 ((0x9) << FShft(VSCTRL_VPIXFMT)) -#define VSCTRL_VPIXFMT_UY0VY1 ((0xc) << FShft(VSCTRL_VPIXFMT)) -#define VSCTRL_VPIXFMT_VY0UY1 ((0xd) << FShft(VSCTRL_VPIXFMT)) -#define VSCTRL_VPIXFMT_Y0UY1V ((0xe) << FShft(VSCTRL_VPIXFMT)) -#define VSCTRL_VPIXFMT_Y0VY1U ((0xf) << FShft(VSCTRL_VPIXFMT)) -#define VSCTRL_GAMMA_EN (1 << 26) -#define VSCTRL_CSC_EN (1 << 25) -#define VSCTRL_COSITED (1 << 22) -#define VSCTRL_VSWIDTH Fld(11,11) -#define Vsctrl_Width(Pixels) /* Video Width [1-2048] */ \ - (((Pixels) - 1) << FShft(VSCTRL_VSWIDTH)) -#define VSCTRL_VSHEIGHT Fld(11,0) -#define Vsctrl_Height(Pixels) /* Video Height [1-2048] */ \ - (((Pixels) - 1) << FShft(VSCTRL_VSHEIGHT)) - -/* VBBASE - Video Blending Base Register */ -#define VBBASE_GLALPHA Fld(8,24) -#define Vbbase_Glalpha(x) ((x) << FShft(VBBASE_GLALPHA)) - -#define VBBASE_COLKEY Fld(24,0) -#define Vbbase_Colkey(x) ((x) << FShft(VBBASE_COLKEY)) - -/* VCMSK - Video Color Key Mask Register */ -#define VCMSK_COLKEY_M Fld(24,0) -#define Vcmsk_colkey_m(x) ((x) << FShft(VCMSK_COLKEY_M)) - -/* VSCADR - Video Stream Control Rddress Register */ -#define VSCADR_STR_EN (1 << 31) -#define VSCADR_COLKEY_EN (1 << 30) -#define VSCADR_COLKEYSRC (1 << 29) -#define VSCADR_BLEND_M Fld(2,27) -#define VSCADR_BLEND_NONE ((0x0) << FShft(VSCADR_BLEND_M)) -#define VSCADR_BLEND_INV ((0x1) << FShft(VSCADR_BLEND_M)) -#define VSCADR_BLEND_GLOB ((0x2) << FShft(VSCADR_BLEND_M)) -#define VSCADR_BLEND_PIX ((0x3) << FShft(VSCADR_BLEND_M)) -#define VSCADR_BLEND_POS Fld(2,24) -#define VSCADR_BLEND_GFX ((0x0) << FShft(VSCADR_BLEND_POS)) -#define VSCADR_BLEND_VID ((0x1) << FShft(VSCADR_BLEND_POS)) -#define VSCADR_BLEND_CUR ((0x2) << FShft(VSCADR_BLEND_POS)) -#define VSCADR_VBASE_ADR Fld(23,0) -#define Vscadr_Vbase_Adr(x) ((x) << FShft(VSCADR_VBASE_ADR)) - -/* VUBASE - Video U Base Register */ -#define VUBASE_UVHALFSTR (1 << 31) -#define VUBASE_UBASE_ADR Fld(24,0) -#define Vubase_Ubase_Adr(x) ((x) << FShft(VUBASE_UBASE_ADR)) - -/* VVBASE - Video V Base Register */ -#define VVBASE_VBASE_ADR Fld(24,0) -#define Vvbase_Vbase_Adr(x) ((x) << FShft(VVBASE_VBASE_ADR)) - -/* VSADR - Video Stride Address Register */ -#define VSADR_SRCSTRIDE Fld(10,22) -#define Vsadr_Srcstride(x) ((x) << FShft(VSADR_SRCSTRIDE)) -#define VSADR_XSTART Fld(11,11) -#define Vsadr_Xstart(x) ((x) << FShft(VSADR_XSTART)) -#define VSADR_YSTART Fld(11,0) -#define Vsadr_Ystart(x) ((x) << FShft(VSADR_YSTART)) - -/* VSCTRL - Video Surface Control Register */ -#define VSCTRL_VPIXFMT Fld(4,27) -#define VSCTRL_VPIXFMT_YUV12 ((0x9) << FShft(VSCTRL_VPIXFMT)) -#define VSCTRL_VPIXFMT_UY0VY1 ((0xc) << FShft(VSCTRL_VPIXFMT)) -#define VSCTRL_VPIXFMT_VY0UY1 ((0xd) << FShft(VSCTRL_VPIXFMT)) -#define VSCTRL_VPIXFMT_Y0UY1V ((0xe) << FShft(VSCTRL_VPIXFMT)) -#define VSCTRL_VPIXFMT_Y0VY1U ((0xf) << FShft(VSCTRL_VPIXFMT)) -#define VSCTRL_GAMMA_EN (1 << 26) -#define VSCTRL_CSC_EN (1 << 25) -#define VSCTRL_COSITED (1 << 22) -#define VSCTRL_VSWIDTH Fld(11,11) -#define Vsctrl_Width(Pixels) /* Video Width [1-2048] */ \ - (((Pixels) - 1) << FShft(VSCTRL_VSWIDTH)) -#define VSCTRL_VSHEIGHT Fld(11,0) -#define Vsctrl_Height(Pixels) /* Video Height [1-2048] */ \ - (((Pixels) - 1) << FShft(VSCTRL_VSHEIGHT)) - -/* VBBASE - Video Blending Base Register */ -#define VBBASE_GLALPHA Fld(8,24) -#define Vbbase_Glalpha(x) ((x) << FShft(VBBASE_GLALPHA)) - -#define VBBASE_COLKEY Fld(24,0) -#define Vbbase_Colkey(x) ((x) << FShft(VBBASE_COLKEY)) - -/* VCMSK - Video Color Key Mask Register */ -#define VCMSK_COLKEY_M Fld(24,0) -#define Vcmsk_colkey_m(x) ((x) << FShft(VCMSK_COLKEY_M)) - -/* VSCADR - Video Stream Control Rddress Register */ -#define VSCADR_STR_EN (1 << 31) -#define VSCADR_COLKEY_EN (1 << 30) -#define VSCADR_COLKEYSRC (1 << 29) -#define VSCADR_BLEND_M Fld(2,27) -#define VSCADR_BLEND_NONE ((0x0) << FShft(VSCADR_BLEND_M)) -#define VSCADR_BLEND_INV ((0x1) << FShft(VSCADR_BLEND_M)) -#define VSCADR_BLEND_GLOB ((0x2) << FShft(VSCADR_BLEND_M)) -#define VSCADR_BLEND_PIX ((0x3) << FShft(VSCADR_BLEND_M)) -#define VSCADR_BLEND_POS Fld(2,24) -#define VSCADR_BLEND_GFX ((0x0) << FShft(VSCADR_BLEND_POS)) -#define VSCADR_BLEND_VID ((0x1) << FShft(VSCADR_BLEND_POS)) -#define VSCADR_BLEND_CUR ((0x2) << FShft(VSCADR_BLEND_POS)) -#define VSCADR_VBASE_ADR Fld(23,0) -#define Vscadr_Vbase_Adr(x) ((x) << FShft(VSCADR_VBASE_ADR)) - -/* VUBASE - Video U Base Register */ -#define VUBASE_UVHALFSTR (1 << 31) -#define VUBASE_UBASE_ADR Fld(24,0) -#define Vubase_Ubase_Adr(x) ((x) << FShft(VUBASE_UBASE_ADR)) - -/* VVBASE - Video V Base Register */ -#define VVBASE_VBASE_ADR Fld(24,0) -#define Vvbase_Vbase_Adr(x) ((x) << FShft(VVBASE_VBASE_ADR)) - -/* VSADR - Video Stride Address Register */ -#define VSADR_SRCSTRIDE Fld(10,22) -#define Vsadr_Srcstride(x) ((x) << FShft(VSADR_SRCSTRIDE)) -#define VSADR_XSTART Fld(11,11) -#define Vsadr_Xstart(x) ((x) << FShft(VSADR_XSTART)) -#define VSADR_YSTART Fld(11,0) -#define Vsadr_Ystart(x) ((x) << FShft(VSADR_YSTART)) - -/* HCCTRL - Hardware Cursor Register fields */ -#define HCCTRL_CUR_EN (1 << 31) -#define HCCTRL_COLKEY_EN (1 << 29) -#define HCCTRL_COLKEYSRC (1 << 28) -#define HCCTRL_BLEND_M Fld(2,26) -#define HCCTRL_BLEND_NONE ((0x0) << FShft(HCCTRL_BLEND_M)) -#define HCCTRL_BLEND_INV ((0x1) << FShft(HCCTRL_BLEND_M)) -#define HCCTRL_BLEND_GLOB ((0x2) << FShft(HCCTRL_BLEND_M)) -#define HCCTRL_BLEND_PIX ((0x3) << FShft(HCCTRL_BLEND_M)) -#define HCCTRL_CPIXFMT Fld(3,23) -#define HCCTRL_CPIXFMT_RGB332 ((0x3) << FShft(HCCTRL_CPIXFMT)) -#define HCCTRL_CPIXFMT_ARGB4444 ((0x4) << FShft(HCCTRL_CPIXFMT)) -#define HCCTRL_CPIXFMT_ARGB1555 ((0x5) << FShft(HCCTRL_CPIXFMT)) -#define HCCTRL_CBASE_ADR Fld(23,0) -#define Hcctrl_Cbase_Adr(x) ((x) << FShft(HCCTRL_CBASE_ADR)) - -/* HCSIZE Hardware Cursor Size Register fields */ -#define HCSIZE_BLEND_POS Fld(2,29) -#define HCSIZE_BLEND_GFX ((0x0) << FShft(HCSIZE_BLEND_POS)) -#define HCSIZE_BLEND_VID ((0x1) << FShft(HCSIZE_BLEND_POS)) -#define HCSIZE_BLEND_CUR ((0x2) << FShft(HCSIZE_BLEND_POS)) -#define HCSIZE_CWIDTH Fld(3,16) -#define Hcsize_Cwidth(x) ((x) << FShft(HCSIZE_CWIDTH)) -#define HCSIZE_CHEIGHT Fld(3,0) -#define Hcsize_Cheight(x) ((x) << FShft(HCSIZE_CHEIGHT)) - -/* HCPOS Hardware Cursor Position Register fields */ -#define HCPOS_SWITCHSRC (1 << 30) -#define HCPOS_CURBLINK Fld(6,24) -#define Hcpos_Curblink(x) ((x) << FShft(HCPOS_CURBLINK)) -#define HCPOS_XSTART Fld(12,12) -#define Hcpos_Xstart(x) ((x) << FShft(HCPOS_XSTART)) -#define HCPOS_YSTART Fld(12,0) -#define Hcpos_Ystart(y) ((y) << FShft(HCPOS_YSTART)) - -/* HCBADR Hardware Cursor Blend Address Register */ -#define HCBADR_GLALPHA Fld(8,24) -#define Hcbadr_Glalpha(x) ((x) << FShft(HCBADR_GLALPHA)) -#define HCBADR_COLKEY Fld(24,0) -#define Hcbadr_Colkey(x) ((x) << FShft(HCBADR_COLKEY)) - -/* HCCKMSK - Hardware Cursor Color Key Mask Register */ -#define HCCKMSK_COLKEY_M Fld(24,0) -#define Hcckmsk_Colkey_M(x) ((x) << FShft(HCCKMSK_COLKEY_M)) - -/* DSCTRL - Display sync control register */ -#define DSCTRL_SYNCGEN_EN (1 << 31) -#define DSCTRL_DPL_RST (1 << 29) -#define DSCTRL_PWRDN_M (1 << 28) -#define DSCTRL_UPDSYNCCNT (1 << 26) -#define DSCTRL_UPDINTCNT (1 << 25) -#define DSCTRL_UPDCNT (1 << 24) -#define DSCTRL_UPDWAIT Fld(4,16) -#define Dsctrl_Updwait(x) ((x) << FShft(DSCTRL_UPDWAIT)) -#define DSCTRL_CLKPOL (1 << 11) -#define DSCTRL_CSYNC_EN (1 << 10) -#define DSCTRL_VS_SLAVE (1 << 7) -#define DSCTRL_HS_SLAVE (1 << 6) -#define DSCTRL_BLNK_POL (1 << 5) -#define DSCTRL_BLNK_DIS (1 << 4) -#define DSCTRL_VS_POL (1 << 3) -#define DSCTRL_VS_DIS (1 << 2) -#define DSCTRL_HS_POL (1 << 1) -#define DSCTRL_HS_DIS (1 << 0) - -/* DHT01 - Display horizontal timing register 01 */ -#define DHT01_HBPS Fld(12,16) -#define Dht01_Hbps(x) ((x) << FShft(DHT01_HBPS)) -#define DHT01_HT Fld(12,0) -#define Dht01_Ht(x) ((x) << FShft(DHT01_HT)) - -/* DHT02 - Display horizontal timing register 02 */ -#define DHT02_HAS Fld(12,16) -#define Dht02_Has(x) ((x) << FShft(DHT02_HAS)) -#define DHT02_HLBS Fld(12,0) -#define Dht02_Hlbs(x) ((x) << FShft(DHT02_HLBS)) - -/* DHT03 - Display horizontal timing register 03 */ -#define DHT03_HFPS Fld(12,16) -#define Dht03_Hfps(x) ((x) << FShft(DHT03_HFPS)) -#define DHT03_HRBS Fld(12,0) -#define Dht03_Hrbs(x) ((x) << FShft(DHT03_HRBS)) - -/* DVT01 - Display vertical timing register 01 */ -#define DVT01_VBPS Fld(12,16) -#define Dvt01_Vbps(x) ((x) << FShft(DVT01_VBPS)) -#define DVT01_VT Fld(12,0) -#define Dvt01_Vt(x) ((x) << FShft(DVT01_VT)) - -/* DVT02 - Display vertical timing register 02 */ -#define DVT02_VAS Fld(12,16) -#define Dvt02_Vas(x) ((x) << FShft(DVT02_VAS)) -#define DVT02_VTBS Fld(12,0) -#define Dvt02_Vtbs(x) ((x) << FShft(DVT02_VTBS)) - -/* DVT03 - Display vertical timing register 03 */ -#define DVT03_VFPS Fld(12,16) -#define Dvt03_Vfps(x) ((x) << FShft(DVT03_VFPS)) -#define DVT03_VBBS Fld(12,0) -#define Dvt03_Vbbs(x) ((x) << FShft(DVT03_VBBS)) - -/* DVECTRL - display vertical event control register */ -#define DVECTRL_VEVENT Fld(12,16) -#define Dvectrl_Vevent(x) ((x) << FShft(DVECTRL_VEVENT)) -#define DVECTRL_VFETCH Fld(12,0) -#define Dvectrl_Vfetch(x) ((x) << FShft(DVECTRL_VFETCH)) - -/* DHDET - display horizontal DE timing register */ -#define DHDET_HDES Fld(12,16) -#define Dhdet_Hdes(x) ((x) << FShft(DHDET_HDES)) -#define DHDET_HDEF Fld(12,0) -#define Dhdet_Hdef(x) ((x) << FShft(DHDET_HDEF)) - -/* DVDET - display vertical DE timing register */ -#define DVDET_VDES Fld(12,16) -#define Dvdet_Vdes(x) ((x) << FShft(DVDET_VDES)) -#define DVDET_VDEF Fld(12,0) -#define Dvdet_Vdef(x) ((x) << FShft(DVDET_VDEF)) - -/* DODMSK - display output data mask register */ -#define DODMSK_MASK_LVL (1 << 31) -#define DODMSK_BLNK_LVL (1 << 30) -#define DODMSK_MASK_B Fld(8,16) -#define Dodmsk_Mask_B(x) ((x) << FShft(DODMSK_MASK_B)) -#define DODMSK_MASK_G Fld(8,8) -#define Dodmsk_Mask_G(x) ((x) << FShft(DODMSK_MASK_G)) -#define DODMSK_MASK_R Fld(8,0) -#define Dodmsk_Mask_R(x) ((x) << FShft(DODMSK_MASK_R)) - -/* DBCOL - display border color control register */ -#define DBCOL_BORDCOL Fld(24,0) -#define Dbcol_Bordcol(x) ((x) << FShft(DBCOL_BORDCOL)) - -/* DVLNUM - display vertical line number register */ -#define DVLNUM_VLINE Fld(12,0) -#define Dvlnum_Vline(x) ((x) << FShft(DVLNUM_VLINE)) - -/* DMCTRL - Display Memory Control Register */ -#define DMCTRL_MEM_REF Fld(2,30) -#define DMCTRL_MEM_REF_ACT ((0x0) << FShft(DMCTRL_MEM_REF)) -#define DMCTRL_MEM_REF_HB ((0x1) << FShft(DMCTRL_MEM_REF)) -#define DMCTRL_MEM_REF_VB ((0x2) << FShft(DMCTRL_MEM_REF)) -#define DMCTRL_MEM_REF_BOTH ((0x3) << FShft(DMCTRL_MEM_REF)) -#define DMCTRL_UV_THRHLD Fld(6,24) -#define Dmctrl_Uv_Thrhld(x) ((x) << FShft(DMCTRL_UV_THRHLD)) -#define DMCTRL_V_THRHLD Fld(7,16) -#define Dmctrl_V_Thrhld(x) ((x) << FShft(DMCTRL_V_THRHLD)) -#define DMCTRL_D_THRHLD Fld(7,8) -#define Dmctrl_D_Thrhld(x) ((x) << FShft(DMCTRL_D_THRHLD)) -#define DMCTRL_BURSTLEN Fld(6,0) -#define Dmctrl_Burstlen(x) ((x) << FShft(DMCTRL_BURSTLEN)) - -/* DINTRS - Display Interrupt Status Register */ -#define DINTRS_CUR_OR_S (1 << 18) -#define DINTRS_STR2_OR_S (1 << 17) -#define DINTRS_STR1_OR_S (1 << 16) -#define DINTRS_CUR_UR_S (1 << 6) -#define DINTRS_STR2_UR_S (1 << 5) -#define DINTRS_STR1_UR_S (1 << 4) -#define DINTRS_VEVENT1_S (1 << 3) -#define DINTRS_VEVENT0_S (1 << 2) -#define DINTRS_HBLNK1_S (1 << 1) -#define DINTRS_HBLNK0_S (1 << 0) - -/* DINTRE - Display Interrupt Enable Register */ -#define DINTRE_CUR_OR_EN (1 << 18) -#define DINTRE_STR2_OR_EN (1 << 17) -#define DINTRE_STR1_OR_EN (1 << 16) -#define DINTRE_CUR_UR_EN (1 << 6) -#define DINTRE_STR2_UR_EN (1 << 5) -#define DINTRE_STR1_UR_EN (1 << 4) -#define DINTRE_VEVENT1_EN (1 << 3) -#define DINTRE_VEVENT0_EN (1 << 2) -#define DINTRE_HBLNK1_EN (1 << 1) -#define DINTRE_HBLNK0_EN (1 << 0) - -/* DINTRS - Display Interrupt Status Register */ -#define DINTRS_CUR_OR_S (1 << 18) -#define DINTRS_STR2_OR_S (1 << 17) -#define DINTRS_STR1_OR_S (1 << 16) -#define DINTRS_CUR_UR_S (1 << 6) -#define DINTRS_STR2_UR_S (1 << 5) -#define DINTRS_STR1_UR_S (1 << 4) -#define DINTRS_VEVENT1_S (1 << 3) -#define DINTRS_VEVENT0_S (1 << 2) -#define DINTRS_HBLNK1_S (1 << 1) -#define DINTRS_HBLNK0_S (1 << 0) - -/* DINTRE - Display Interrupt Enable Register */ -#define DINTRE_CUR_OR_EN (1 << 18) -#define DINTRE_STR2_OR_EN (1 << 17) -#define DINTRE_STR1_OR_EN (1 << 16) -#define DINTRE_CUR_UR_EN (1 << 6) -#define DINTRE_STR2_UR_EN (1 << 5) -#define DINTRE_STR1_UR_EN (1 << 4) -#define DINTRE_VEVENT1_EN (1 << 3) -#define DINTRE_VEVENT0_EN (1 << 2) -#define DINTRE_HBLNK1_EN (1 << 1) -#define DINTRE_HBLNK0_EN (1 << 0) - - -/* DLSTS - display load status register */ -#define DLSTS_RLD_ADONE (1 << 23) -/* #define DLSTS_RLD_ADOUT Fld(23,0) */ - -/* DLLCTRL - display list load control register */ -#define DLLCTRL_RLD_ADRLN Fld(8,24) -#define Dllctrl_Rld_Adrln(x) ((x) << FShft(DLLCTRL_RLD_ADRLN)) - -/* CLIPCTRL - Clipping Control Register */ -#define CLIPCTRL_HSKIP Fld(11,16) -#define Clipctrl_Hskip ((x) << FShft(CLIPCTRL_HSKIP)) -#define CLIPCTRL_VSKIP Fld(11,0) -#define Clipctrl_Vskip ((x) << FShft(CLIPCTRL_VSKIP)) - -/* SPOCTRL - Scale Pitch/Order Control Register */ -#define SPOCTRL_H_SC_BP (1 << 31) -#define SPOCTRL_V_SC_BP (1 << 30) -#define SPOCTRL_HV_SC_OR (1 << 29) -#define SPOCTRL_VS_UR_C (1 << 27) -#define SPOCTRL_VORDER Fld(2,16) -#define SPOCTRL_VORDER_1TAP ((0x0) << FShft(SPOCTRL_VORDER)) -#define SPOCTRL_VORDER_2TAP ((0x1) << FShft(SPOCTRL_VORDER)) -#define SPOCTRL_VORDER_4TAP ((0x3) << FShft(SPOCTRL_VORDER)) -#define SPOCTRL_VPITCH Fld(16,0) -#define Spoctrl_Vpitch(x) ((x) << FShft(SPOCTRL_VPITCH)) - -/* SVCTRL - Scale Vertical Control Register */ -#define SVCTRL_INITIAL1 Fld(16,16) -#define Svctrl_Initial1(x) ((x) << FShft(SVCTRL_INITIAL1)) -#define SVCTRL_INITIAL2 Fld(16,0) -#define Svctrl_Initial2(x) ((x) << FShft(SVCTRL_INITIAL2)) - -/* SHCTRL - Scale Horizontal Control Register */ -#define SHCTRL_HINITIAL Fld(16,16) -#define Shctrl_Hinitial(x) ((x) << FShft(SHCTRL_HINITIAL)) -#define SHCTRL_HDECIM (1 << 15) -#define SHCTRL_HPITCH Fld(15,0) -#define Shctrl_Hpitch(x) ((x) << FShft(SHCTRL_HPITCH)) - -/* SSSIZE - Scale Surface Size Register */ -#define SSSIZE_SC_WIDTH Fld(11,16) -#define Sssize_Sc_Width(x) ((x) << FShft(SSSIZE_SC_WIDTH)) -#define SSSIZE_SC_HEIGHT Fld(11,0) -#define Sssize_Sc_Height(x) ((x) << FShft(SSSIZE_SC_HEIGHT)) - -#endif /* __REG_BITS_2700G_ */ diff --git a/drivers/video/fbdev/mbx/regs.h b/drivers/video/fbdev/mbx/regs.h deleted file mode 100644 index 591fc9d26084..000000000000 --- a/drivers/video/fbdev/mbx/regs.h +++ /dev/null @@ -1,196 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __REGS_2700G_ -#define __REGS_2700G_ - -/* extern unsigned long virt_base_2700; */ -/* #define __REG_2700G(x) (*(volatile unsigned long*)((x)+virt_base_2700)) */ -#define __REG_2700G(x) ((x)+virt_base_2700) - -/* System Configuration Registers (0x0000_0000 0x0000_0010) */ -#define SYSCFG __REG_2700G(0x00000000) -#define PFBASE __REG_2700G(0x00000004) -#define PFCEIL __REG_2700G(0x00000008) -#define POLLFLAG __REG_2700G(0x0000000c) -#define SYSRST __REG_2700G(0x00000010) - -/* Interrupt Control Registers (0x0000_0014 0x0000_002F) */ -#define NINTPW __REG_2700G(0x00000014) -#define MINTENABLE __REG_2700G(0x00000018) -#define MINTSTAT __REG_2700G(0x0000001c) -#define SINTENABLE __REG_2700G(0x00000020) -#define SINTSTAT __REG_2700G(0x00000024) -#define SINTCLR __REG_2700G(0x00000028) - -/* Clock Control Registers (0x0000_002C 0x0000_005F) */ -#define SYSCLKSRC __REG_2700G(0x0000002c) -#define PIXCLKSRC __REG_2700G(0x00000030) -#define CLKSLEEP __REG_2700G(0x00000034) -#define COREPLL __REG_2700G(0x00000038) -#define DISPPLL __REG_2700G(0x0000003c) -#define PLLSTAT __REG_2700G(0x00000040) -#define VOVRCLK __REG_2700G(0x00000044) -#define PIXCLK __REG_2700G(0x00000048) -#define MEMCLK __REG_2700G(0x0000004c) -#define M24CLK __REG_2700G(0x00000050) -#define MBXCLK __REG_2700G(0x00000054) -#define SDCLK __REG_2700G(0x00000058) -#define PIXCLKDIV __REG_2700G(0x0000005c) - -/* LCD Port Control Register (0x0000_0060 0x0000_006F) */ -#define LCD_CONFIG __REG_2700G(0x00000060) - -/* On-Die Frame Buffer Registers (0x0000_0064 0x0000_006B) */ -#define ODFBPWR __REG_2700G(0x00000064) -#define ODFBSTAT __REG_2700G(0x00000068) - -/* GPIO Registers (0x0000_006C 0x0000_007F) */ -#define GPIOCGF __REG_2700G(0x0000006c) -#define GPIOHI __REG_2700G(0x00000070) -#define GPIOLO __REG_2700G(0x00000074) -#define GPIOSTAT __REG_2700G(0x00000078) - -/* Pulse Width Modulator (PWM) Registers (0x0000_0200 0x0000_02FF) */ -#define PWMRST __REG_2700G(0x00000200) -#define PWMCFG __REG_2700G(0x00000204) -#define PWM0DIV __REG_2700G(0x00000210) -#define PWM0DUTY __REG_2700G(0x00000214) -#define PWM0PER __REG_2700G(0x00000218) -#define PWM1DIV __REG_2700G(0x00000220) -#define PWM1DUTY __REG_2700G(0x00000224) -#define PWM1PER __REG_2700G(0x00000228) - -/* Identification (ID) Registers (0x0000_0300 0x0000_0FFF) */ -#define ID __REG_2700G(0x00000FF0) - -/* Local Memory (SDRAM) Interface Registers (0x0000_1000 0x0000_1FFF) */ -#define LMRST __REG_2700G(0x00001000) -#define LMCFG __REG_2700G(0x00001004) -#define LMPWR __REG_2700G(0x00001008) -#define LMPWRSTAT __REG_2700G(0x0000100c) -#define LMCEMR __REG_2700G(0x00001010) -#define LMTYPE __REG_2700G(0x00001014) -#define LMTIM __REG_2700G(0x00001018) -#define LMREFRESH __REG_2700G(0x0000101c) -#define LMPROTMIN __REG_2700G(0x00001020) -#define LMPROTMAX __REG_2700G(0x00001024) -#define LMPROTCFG __REG_2700G(0x00001028) -#define LMPROTERR __REG_2700G(0x0000102c) - -/* Plane Controller Registers (0x0000_2000 0x0000_2FFF) */ -#define GSCTRL __REG_2700G(0x00002000) -#define VSCTRL __REG_2700G(0x00002004) -#define GBBASE __REG_2700G(0x00002020) -#define VBBASE __REG_2700G(0x00002024) -#define GDRCTRL __REG_2700G(0x00002040) -#define VCMSK __REG_2700G(0x00002044) -#define GSCADR __REG_2700G(0x00002060) -#define VSCADR __REG_2700G(0x00002064) -#define VUBASE __REG_2700G(0x00002084) -#define VVBASE __REG_2700G(0x000020a4) -#define GSADR __REG_2700G(0x000020c0) -#define VSADR __REG_2700G(0x000020c4) -#define HCCTRL __REG_2700G(0x00002100) -#define HCSIZE __REG_2700G(0x00002110) -#define HCPOS __REG_2700G(0x00002120) -#define HCBADR __REG_2700G(0x00002130) -#define HCCKMSK __REG_2700G(0x00002140) -#define GPLUT __REG_2700G(0x00002150) -#define DSCTRL __REG_2700G(0x00002154) -#define DHT01 __REG_2700G(0x00002158) -#define DHT02 __REG_2700G(0x0000215c) -#define DHT03 __REG_2700G(0x00002160) -#define DVT01 __REG_2700G(0x00002164) -#define DVT02 __REG_2700G(0x00002168) -#define DVT03 __REG_2700G(0x0000216c) -#define DBCOL __REG_2700G(0x00002170) -#define BGCOLOR __REG_2700G(0x00002174) -#define DINTRS __REG_2700G(0x00002178) -#define DINTRE __REG_2700G(0x0000217c) -#define DINTRCNT __REG_2700G(0x00002180) -#define DSIG __REG_2700G(0x00002184) -#define DMCTRL __REG_2700G(0x00002188) -#define CLIPCTRL __REG_2700G(0x0000218c) -#define SPOCTRL __REG_2700G(0x00002190) -#define SVCTRL __REG_2700G(0x00002194) - -/* 0x0000_2198 */ -/* 0x0000_21A8 VSCOEFF[0:4] Video Scalar Vertical Coefficient [0:4] 4.14.5 */ -#define VSCOEFF0 __REG_2700G(0x00002198) -#define VSCOEFF1 __REG_2700G(0x0000219c) -#define VSCOEFF2 __REG_2700G(0x000021a0) -#define VSCOEFF3 __REG_2700G(0x000021a4) -#define VSCOEFF4 __REG_2700G(0x000021a8) - -#define SHCTRL __REG_2700G(0x000021b0) - -/* 0x0000_21B4 */ -/* 0x0000_21D4 HSCOEFF[0:8] Video Scalar Horizontal Coefficient [0:8] 4.14.7 */ -#define HSCOEFF0 __REG_2700G(0x000021b4) -#define HSCOEFF1 __REG_2700G(0x000021b8) -#define HSCOEFF2 __REG_2700G(0x000021bc) -#define HSCOEFF3 __REG_2700G(0x000021c0) -#define HSCOEFF4 __REG_2700G(0x000021c4) -#define HSCOEFF5 __REG_2700G(0x000021c8) -#define HSCOEFF6 __REG_2700G(0x000021cc) -#define HSCOEFF7 __REG_2700G(0x000021d0) -#define HSCOEFF8 __REG_2700G(0x000021d4) - -#define SSSIZE __REG_2700G(0x000021D8) - -/* 0x0000_2200 */ -/* 0x0000_2240 VIDGAM[0:16] Video Gamma LUT Index [0:16] 4.15.2 */ -#define VIDGAM0 __REG_2700G(0x00002200) -#define VIDGAM1 __REG_2700G(0x00002204) -#define VIDGAM2 __REG_2700G(0x00002208) -#define VIDGAM3 __REG_2700G(0x0000220c) -#define VIDGAM4 __REG_2700G(0x00002210) -#define VIDGAM5 __REG_2700G(0x00002214) -#define VIDGAM6 __REG_2700G(0x00002218) -#define VIDGAM7 __REG_2700G(0x0000221c) -#define VIDGAM8 __REG_2700G(0x00002220) -#define VIDGAM9 __REG_2700G(0x00002224) -#define VIDGAM10 __REG_2700G(0x00002228) -#define VIDGAM11 __REG_2700G(0x0000222c) -#define VIDGAM12 __REG_2700G(0x00002230) -#define VIDGAM13 __REG_2700G(0x00002234) -#define VIDGAM14 __REG_2700G(0x00002238) -#define VIDGAM15 __REG_2700G(0x0000223c) -#define VIDGAM16 __REG_2700G(0x00002240) - -/* 0x0000_2250 */ -/* 0x0000_2290 GFXGAM[0:16] Graphics Gamma LUT Index [0:16] 4.15.3 */ -#define GFXGAM0 __REG_2700G(0x00002250) -#define GFXGAM1 __REG_2700G(0x00002254) -#define GFXGAM2 __REG_2700G(0x00002258) -#define GFXGAM3 __REG_2700G(0x0000225c) -#define GFXGAM4 __REG_2700G(0x00002260) -#define GFXGAM5 __REG_2700G(0x00002264) -#define GFXGAM6 __REG_2700G(0x00002268) -#define GFXGAM7 __REG_2700G(0x0000226c) -#define GFXGAM8 __REG_2700G(0x00002270) -#define GFXGAM9 __REG_2700G(0x00002274) -#define GFXGAM10 __REG_2700G(0x00002278) -#define GFXGAM11 __REG_2700G(0x0000227c) -#define GFXGAM12 __REG_2700G(0x00002280) -#define GFXGAM13 __REG_2700G(0x00002284) -#define GFXGAM14 __REG_2700G(0x00002288) -#define GFXGAM15 __REG_2700G(0x0000228c) -#define GFXGAM16 __REG_2700G(0x00002290) - -#define DLSTS __REG_2700G(0x00002300) -#define DLLCTRL __REG_2700G(0x00002304) -#define DVLNUM __REG_2700G(0x00002308) -#define DUCTRL __REG_2700G(0x0000230c) -#define DVECTRL __REG_2700G(0x00002310) -#define DHDET __REG_2700G(0x00002314) -#define DVDET __REG_2700G(0x00002318) -#define DODMSK __REG_2700G(0x0000231c) -#define CSC01 __REG_2700G(0x00002330) -#define CSC02 __REG_2700G(0x00002334) -#define CSC03 __REG_2700G(0x00002338) -#define CSC04 __REG_2700G(0x0000233c) -#define CSC05 __REG_2700G(0x00002340) - -#define FB_MEMORY_START __REG_2700G(0x00060000) - -#endif /* __REGS_2700G_ */ diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c index c6820e21875d..a372a183c1f0 100644 --- a/drivers/video/fbdev/nvidia/nvidia.c +++ b/drivers/video/fbdev/nvidia/nvidia.c @@ -1037,10 +1037,9 @@ static struct fb_ops nvidia_fb_ops = { .fb_sync = nvidiafb_sync, }; -#ifdef CONFIG_PM -static int nvidiafb_suspend(struct pci_dev *dev, pm_message_t mesg) +static int nvidiafb_suspend_late(struct device *dev, pm_message_t mesg) { - struct fb_info *info = pci_get_drvdata(dev); + struct fb_info *info = dev_get_drvdata(dev); struct nvidia_par *par = info->par; if (mesg.event == PM_EVENT_PRETHAW) @@ -1052,46 +1051,54 @@ static int nvidiafb_suspend(struct pci_dev *dev, pm_message_t mesg) fb_set_suspend(info, 1); nvidiafb_blank(FB_BLANK_POWERDOWN, info); nvidia_write_regs(par, &par->SavedReg); - pci_save_state(dev); - pci_disable_device(dev); - pci_set_power_state(dev, pci_choose_state(dev, mesg)); } - dev->dev.power.power_state = mesg; + dev->power.power_state = mesg; console_unlock(); return 0; } -static int nvidiafb_resume(struct pci_dev *dev) +static int __maybe_unused nvidiafb_suspend(struct device *dev) { - struct fb_info *info = pci_get_drvdata(dev); - struct nvidia_par *par = info->par; + return nvidiafb_suspend_late(dev, PMSG_SUSPEND); +} - console_lock(); - pci_set_power_state(dev, PCI_D0); +static int __maybe_unused nvidiafb_hibernate(struct device *dev) +{ + return nvidiafb_suspend_late(dev, PMSG_HIBERNATE); +} - if (par->pm_state != PM_EVENT_FREEZE) { - pci_restore_state(dev); +static int __maybe_unused nvidiafb_freeze(struct device *dev) +{ + return nvidiafb_suspend_late(dev, PMSG_FREEZE); +} - if (pci_enable_device(dev)) - goto fail; +static int __maybe_unused nvidiafb_resume(struct device *dev) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct nvidia_par *par = info->par; - pci_set_master(dev); - } + console_lock(); par->pm_state = PM_EVENT_ON; nvidiafb_set_par(info); fb_set_suspend (info, 0); nvidiafb_blank(FB_BLANK_UNBLANK, info); -fail: console_unlock(); return 0; } -#else -#define nvidiafb_suspend NULL -#define nvidiafb_resume NULL -#endif + +static const struct dev_pm_ops nvidiafb_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .suspend = nvidiafb_suspend, + .resume = nvidiafb_resume, + .freeze = nvidiafb_freeze, + .thaw = nvidiafb_resume, + .poweroff = nvidiafb_hibernate, + .restore = nvidiafb_resume, +#endif /* CONFIG_PM_SLEEP */ +}; static int nvidia_set_fbinfo(struct fb_info *info) { @@ -1492,12 +1499,11 @@ static int nvidiafb_setup(char *options) #endif /* !MODULE */ static struct pci_driver nvidiafb_driver = { - .name = "nvidiafb", - .id_table = nvidiafb_pci_tbl, - .probe = nvidiafb_probe, - .suspend = nvidiafb_suspend, - .resume = nvidiafb_resume, - .remove = nvidiafb_remove, + .name = "nvidiafb", + .id_table = nvidiafb_pci_tbl, + .probe = nvidiafb_probe, + .driver.pm = &nvidiafb_pm_ops, + .remove = nvidiafb_remove, }; /* ------------------------------------------------------------------------- * diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c index 0b0ad20afd63..f560fa4d7786 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c @@ -787,7 +787,7 @@ static int venc_probe_of(struct platform_device *pdev) venc.type = OMAP_DSS_VENC_TYPE_SVIDEO; break; default: - dev_err(&pdev->dev, "bad channel propert '%d'\n", channels); + dev_err(&pdev->dev, "bad channel property '%d'\n", channels); r = -EINVAL; goto err; } diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c index 60c424fae988..5c74253e7b2c 100644 --- a/drivers/video/fbdev/s3fb.c +++ b/drivers/video/fbdev/s3fb.c @@ -1410,9 +1410,9 @@ static void s3_pci_remove(struct pci_dev *dev) /* PCI suspend */ -static int s3_pci_suspend(struct pci_dev* dev, pm_message_t state) +static int __maybe_unused s3_pci_suspend(struct device *dev) { - struct fb_info *info = pci_get_drvdata(dev); + struct fb_info *info = dev_get_drvdata(dev); struct s3fb_info *par = info->par; dev_info(info->device, "suspend\n"); @@ -1420,7 +1420,7 @@ static int s3_pci_suspend(struct pci_dev* dev, pm_message_t state) console_lock(); mutex_lock(&(par->open_lock)); - if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) { + if (par->ref_count == 0) { mutex_unlock(&(par->open_lock)); console_unlock(); return 0; @@ -1428,10 +1428,6 @@ static int s3_pci_suspend(struct pci_dev* dev, pm_message_t state) fb_set_suspend(info, 1); - pci_save_state(dev); - pci_disable_device(dev); - pci_set_power_state(dev, pci_choose_state(dev, state)); - mutex_unlock(&(par->open_lock)); console_unlock(); @@ -1441,11 +1437,10 @@ static int s3_pci_suspend(struct pci_dev* dev, pm_message_t state) /* PCI resume */ -static int s3_pci_resume(struct pci_dev* dev) +static int __maybe_unused s3_pci_resume(struct device *dev) { - struct fb_info *info = pci_get_drvdata(dev); + struct fb_info *info = dev_get_drvdata(dev); struct s3fb_info *par = info->par; - int err; dev_info(info->device, "resume\n"); @@ -1458,17 +1453,6 @@ static int s3_pci_resume(struct pci_dev* dev) return 0; } - pci_set_power_state(dev, PCI_D0); - pci_restore_state(dev); - err = pci_enable_device(dev); - if (err) { - mutex_unlock(&(par->open_lock)); - console_unlock(); - dev_err(info->device, "error %d enabling device for resume\n", err); - return err; - } - pci_set_master(dev); - s3fb_set_par(info); fb_set_suspend(info, 0); @@ -1478,6 +1462,16 @@ static int s3_pci_resume(struct pci_dev* dev) return 0; } +static const struct dev_pm_ops s3_pci_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .suspend = s3_pci_suspend, + .resume = s3_pci_resume, + .freeze = NULL, + .thaw = s3_pci_resume, + .poweroff = s3_pci_suspend, + .restore = s3_pci_resume, +#endif +}; /* List of boards that we are trying to support */ @@ -1510,8 +1504,7 @@ static struct pci_driver s3fb_pci_driver = { .id_table = s3_devices, .probe = s3_pci_probe, .remove = s3_pci_remove, - .suspend = s3_pci_suspend, - .resume = s3_pci_resume, + .driver.pm = &s3_pci_pm_ops, }; /* Parse user specified options */ diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c index 661398e40ff4..0ac750cc5ea1 100644 --- a/drivers/video/fbdev/savage/savagefb_driver.c +++ b/drivers/video/fbdev/savage/savagefb_driver.c @@ -2347,9 +2347,9 @@ static void savagefb_remove(struct pci_dev *dev) } } -static int savagefb_suspend(struct pci_dev *dev, pm_message_t mesg) +static int savagefb_suspend_late(struct device *dev, pm_message_t mesg) { - struct fb_info *info = pci_get_drvdata(dev); + struct fb_info *info = dev_get_drvdata(dev); struct savagefb_par *par = info->par; DBG("savagefb_suspend"); @@ -2357,7 +2357,7 @@ static int savagefb_suspend(struct pci_dev *dev, pm_message_t mesg) if (mesg.event == PM_EVENT_PRETHAW) mesg.event = PM_EVENT_FREEZE; par->pm_state = mesg.event; - dev->dev.power.power_state = mesg; + dev->power.power_state = mesg; /* * For PM_EVENT_FREEZE, do not power down so the console @@ -2375,17 +2375,29 @@ static int savagefb_suspend(struct pci_dev *dev, pm_message_t mesg) savagefb_blank(FB_BLANK_POWERDOWN, info); savage_set_default_par(par, &par->save); savage_disable_mmio(par); - pci_save_state(dev); - pci_disable_device(dev); - pci_set_power_state(dev, pci_choose_state(dev, mesg)); console_unlock(); return 0; } -static int savagefb_resume(struct pci_dev* dev) +static int __maybe_unused savagefb_suspend(struct device *dev) { - struct fb_info *info = pci_get_drvdata(dev); + return savagefb_suspend_late(dev, PMSG_SUSPEND); +} + +static int __maybe_unused savagefb_hibernate(struct device *dev) +{ + return savagefb_suspend_late(dev, PMSG_HIBERNATE); +} + +static int __maybe_unused savagefb_freeze(struct device *dev) +{ + return savagefb_suspend_late(dev, PMSG_FREEZE); +} + +static int __maybe_unused savagefb_resume(struct device *dev) +{ + struct fb_info *info = dev_get_drvdata(dev); struct savagefb_par *par = info->par; int cur_state = par->pm_state; @@ -2397,20 +2409,11 @@ static int savagefb_resume(struct pci_dev* dev) * The adapter was not powered down coming back from a * PM_EVENT_FREEZE. */ - if (cur_state == PM_EVENT_FREEZE) { - pci_set_power_state(dev, PCI_D0); + if (cur_state == PM_EVENT_FREEZE) return 0; - } console_lock(); - pci_set_power_state(dev, PCI_D0); - pci_restore_state(dev); - - if (pci_enable_device(dev)) - DBG("err"); - - pci_set_master(dev); savage_enable_mmio(par); savage_init_hw(par); savagefb_set_par(info); @@ -2421,6 +2424,16 @@ static int savagefb_resume(struct pci_dev* dev) return 0; } +static const struct dev_pm_ops savagefb_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .suspend = savagefb_suspend, + .resume = savagefb_resume, + .freeze = savagefb_freeze, + .thaw = savagefb_resume, + .poweroff = savagefb_hibernate, + .restore = savagefb_resume, +#endif +}; static const struct pci_device_id savagefb_devices[] = { {PCI_VENDOR_ID_S3, PCI_CHIP_SUPSAV_MX128, @@ -2501,8 +2514,7 @@ static struct pci_driver savagefb_driver = { .name = "savagefb", .id_table = savagefb_devices, .probe = savagefb_probe, - .suspend = savagefb_suspend, - .resume = savagefb_resume, + .driver.pm = &savagefb_pm_ops, .remove = savagefb_remove, }; diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c index dfe3eb769638..fde27feae5d0 100644 --- a/drivers/video/fbdev/sis/init.c +++ b/drivers/video/fbdev/sis/init.c @@ -2428,6 +2428,11 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, i = 0; + if (SiS_Pr->ChipType == SIS_730) + queuedata = &FQBQData730[0]; + else + queuedata = &FQBQData[0]; + if(ModeNo > 0x13) { /* Get VCLK */ @@ -2445,12 +2450,6 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, /* Get half colordepth */ colorth = colortharray[(SiS_Pr->SiS_ModeType - ModeEGA)]; - if(SiS_Pr->ChipType == SIS_730) { - queuedata = &FQBQData730[0]; - } else { - queuedata = &FQBQData[0]; - } - do { templ = SiS_CalcDelay2(SiS_Pr, queuedata[i]) * VCLK * colorth; diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c index bdbe9c68e274..0dbc6bf8268a 100644 --- a/drivers/video/fbdev/sm712fb.c +++ b/drivers/video/fbdev/sm712fb.c @@ -1604,6 +1604,14 @@ static int smtcfb_pci_probe(struct pci_dev *pdev, sfb->fb->fix.mmio_start = mmio_base; sfb->fb->fix.mmio_len = 0x00200000; sfb->dp_regs = ioremap(mmio_base, 0x00200000 + smem_size); + if (!sfb->dp_regs) { + dev_err(&pdev->dev, + "%s: unable to map memory mapped IO!\n", + sfb->fb->fix.id); + err = -ENOMEM; + goto failed_fb; + } + sfb->lfb = sfb->dp_regs + 0x00200000; sfb->mmio = (smtc_regbaseaddress = sfb->dp_regs + 0x000c0000); diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c index 09425ec317ba..eda448b7a0c9 100644 --- a/drivers/video/fbdev/ssd1307fb.c +++ b/drivers/video/fbdev/ssd1307fb.c @@ -74,6 +74,7 @@ struct ssd1307fb_par { struct fb_info *info; u8 lookup_table[4]; u32 page_offset; + u32 col_offset; u32 prechargep1; u32 prechargep2; struct pwm_device *pwm; @@ -458,11 +459,11 @@ static int ssd1307fb_init(struct ssd1307fb_par *par) if (ret < 0) return ret; - ret = ssd1307fb_write_cmd(par->client, 0x0); + ret = ssd1307fb_write_cmd(par->client, par->col_offset); if (ret < 0) return ret; - ret = ssd1307fb_write_cmd(par->client, par->width - 1); + ret = ssd1307fb_write_cmd(par->client, par->col_offset + par->width - 1); if (ret < 0) return ret; @@ -626,6 +627,9 @@ static int ssd1307fb_probe(struct i2c_client *client) if (device_property_read_u32(dev, "solomon,page-offset", &par->page_offset)) par->page_offset = 1; + if (device_property_read_u32(dev, "solomon,col-offset", &par->col_offset)) + par->col_offset = 0; + if (device_property_read_u32(dev, "solomon,com-offset", &par->com_offset)) par->com_offset = 0; diff --git a/drivers/video/fbdev/sstfb.c b/drivers/video/fbdev/sstfb.c index afe6d1b7c3a0..c05cdabeb11c 100644 --- a/drivers/video/fbdev/sstfb.c +++ b/drivers/video/fbdev/sstfb.c @@ -733,7 +733,7 @@ static ssize_t show_vgapass(struct device *device, struct device_attribute *attr { struct fb_info *info = dev_get_drvdata(device); struct sstfb_par *par = info->par; - return snprintf(buf, PAGE_SIZE, "%d\n", par->vgapass); + return sprintf(buf, "%d\n", par->vgapass); } static struct device_attribute device_attrs[] = { diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c index e9869135d833..666fbe2f671c 100644 --- a/drivers/video/fbdev/tgafb.c +++ b/drivers/video/fbdev/tgafb.c @@ -989,8 +989,10 @@ tgafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) /* We can fill 2k pixels per operation. Notice blocks that fit the width of the screen so that we can take advantage of this and fill more than one line per write. */ - if (width == line_length) - width *= height, height = 1; + if (width == line_length) { + width *= height; + height = 1; + } /* The write into the frame buffer must be aligned to 4 bytes, but we are allowed to encode the offset within the word in @@ -1171,8 +1173,10 @@ copyarea_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy, More than anything else, these control how we do copies. */ depos = dy * line_length + dx; sepos = sy * line_length + sx; - if (backward) - depos += width, sepos += width; + if (backward) { + depos += width; + sepos += width; + } /* Next copy full words at a time. */ n32 = width / 32; diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index 5b014b479f83..f9b3c1cb9530 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -1457,7 +1457,7 @@ static ssize_t edid_show( struct file *filp, struct kobject *kobj, struct bin_attribute *a, char *buf, loff_t off, size_t count) { - struct device *fbdev = container_of(kobj, struct device, kobj); + struct device *fbdev = kobj_to_dev(kobj); struct fb_info *fb_info = dev_get_drvdata(fbdev); struct dlfb_data *dlfb = fb_info->par; @@ -1479,7 +1479,7 @@ static ssize_t edid_store( struct file *filp, struct kobject *kobj, struct bin_attribute *a, char *src, loff_t src_off, size_t src_size) { - struct device *fbdev = container_of(kobj, struct device, kobj); + struct device *fbdev = kobj_to_dev(kobj); struct fb_info *fb_info = dev_get_drvdata(fbdev); struct dlfb_data *dlfb = fb_info->par; int ret; diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c index 578d3541e3d6..1e8a38a7967d 100644 --- a/drivers/video/fbdev/vga16fb.c +++ b/drivers/video/fbdev/vga16fb.c @@ -243,7 +243,7 @@ static void vga16fb_update_fix(struct fb_info *info) } static void vga16fb_clock_chip(struct vga16fb_par *par, - unsigned int pixclock, + unsigned int *pixclock, const struct fb_info *info, int mul, int div) { @@ -259,14 +259,14 @@ static void vga16fb_clock_chip(struct vga16fb_par *par, { 0 /* bad */, 0x00, 0x00}}; int err; - pixclock = (pixclock * mul) / div; + *pixclock = (*pixclock * mul) / div; best = vgaclocks; - err = pixclock - best->pixclock; + err = *pixclock - best->pixclock; if (err < 0) err = -err; for (ptr = vgaclocks + 1; ptr->pixclock; ptr++) { int tmp; - tmp = pixclock - ptr->pixclock; + tmp = *pixclock - ptr->pixclock; if (tmp < 0) tmp = -tmp; if (tmp < err) { err = tmp; @@ -275,7 +275,7 @@ static void vga16fb_clock_chip(struct vga16fb_par *par, } par->misc |= best->misc; par->clkdiv = best->seq_clock_mode; - pixclock = (best->pixclock * div) / mul; + *pixclock = (best->pixclock * div) / mul; } #define FAIL(X) return -EINVAL @@ -497,10 +497,10 @@ static int vga16fb_check_var(struct fb_var_screeninfo *var, if (mode & MODE_8BPP) /* pixel clock == vga clock / 2 */ - vga16fb_clock_chip(par, var->pixclock, info, 1, 2); + vga16fb_clock_chip(par, &var->pixclock, info, 1, 2); else /* pixel clock == vga clock */ - vga16fb_clock_chip(par, var->pixclock, info, 1, 1); + vga16fb_clock_chip(par, &var->pixclock, info, 1, 1); var->red.offset = var->green.offset = var->blue.offset = var->transp.offset = 0; diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c index 703ddee9a244..89d75079b730 100644 --- a/drivers/video/fbdev/via/via-core.c +++ b/drivers/video/fbdev/via/via-core.c @@ -558,9 +558,8 @@ static void via_teardown_subdevs(void) /* * Power management functions */ -#ifdef CONFIG_PM -static LIST_HEAD(viafb_pm_hooks); -static DEFINE_MUTEX(viafb_pm_hooks_lock); +static __maybe_unused LIST_HEAD(viafb_pm_hooks); +static __maybe_unused DEFINE_MUTEX(viafb_pm_hooks_lock); void viafb_pm_register(struct viafb_pm_hooks *hooks) { @@ -580,12 +579,10 @@ void viafb_pm_unregister(struct viafb_pm_hooks *hooks) } EXPORT_SYMBOL_GPL(viafb_pm_unregister); -static int via_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused via_suspend(struct device *dev) { struct viafb_pm_hooks *hooks; - if (state.event != PM_EVENT_SUSPEND) - return 0; /* * "I've occasionally hit a few drivers that caused suspend * failures, and each and every time it was a driver bug, and @@ -600,24 +597,13 @@ static int via_suspend(struct pci_dev *pdev, pm_message_t state) hooks->suspend(hooks->private); mutex_unlock(&viafb_pm_hooks_lock); - pci_save_state(pdev); - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } -static int via_resume(struct pci_dev *pdev) +static int __maybe_unused via_resume(struct device *dev) { struct viafb_pm_hooks *hooks; - /* Get the bus side powered up */ - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - if (pci_enable_device(pdev)) - return 0; - - pci_set_master(pdev); - /* Now bring back any subdevs */ mutex_lock(&viafb_pm_hooks_lock); list_for_each_entry(hooks, &viafb_pm_hooks, list) @@ -626,7 +612,6 @@ static int via_resume(struct pci_dev *pdev) return 0; } -#endif /* CONFIG_PM */ static int via_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -712,15 +697,23 @@ static const struct pci_device_id via_pci_table[] = { }; MODULE_DEVICE_TABLE(pci, via_pci_table); +static const struct dev_pm_ops via_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .suspend = via_suspend, + .resume = via_resume, + .freeze = NULL, + .thaw = via_resume, + .poweroff = NULL, + .restore = via_resume, +#endif +}; + static struct pci_driver via_driver = { .name = "viafb", .id_table = via_pci_table, .probe = via_pci_probe, .remove = via_pci_remove, -#ifdef CONFIG_PM - .suspend = via_suspend, - .resume = via_resume, -#endif + .driver.pm = &via_pm_ops, }; static int __init via_core_init(void) diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c index 98ff8235c9e9..7a959e5ba90b 100644 --- a/drivers/video/fbdev/vt8623fb.c +++ b/drivers/video/fbdev/vt8623fb.c @@ -815,12 +815,11 @@ static void vt8623_pci_remove(struct pci_dev *dev) } -#ifdef CONFIG_PM /* PCI suspend */ -static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state) +static int __maybe_unused vt8623_pci_suspend(struct device *dev) { - struct fb_info *info = pci_get_drvdata(dev); + struct fb_info *info = dev_get_drvdata(dev); struct vt8623fb_info *par = info->par; dev_info(info->device, "suspend\n"); @@ -828,7 +827,7 @@ static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state) console_lock(); mutex_lock(&(par->open_lock)); - if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) { + if (par->ref_count == 0) { mutex_unlock(&(par->open_lock)); console_unlock(); return 0; @@ -836,10 +835,6 @@ static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state) fb_set_suspend(info, 1); - pci_save_state(dev); - pci_disable_device(dev); - pci_set_power_state(dev, pci_choose_state(dev, state)); - mutex_unlock(&(par->open_lock)); console_unlock(); @@ -849,9 +844,9 @@ static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state) /* PCI resume */ -static int vt8623_pci_resume(struct pci_dev* dev) +static int __maybe_unused vt8623_pci_resume(struct device *dev) { - struct fb_info *info = pci_get_drvdata(dev); + struct fb_info *info = dev_get_drvdata(dev); struct vt8623fb_info *par = info->par; dev_info(info->device, "resume\n"); @@ -862,14 +857,6 @@ static int vt8623_pci_resume(struct pci_dev* dev) if (par->ref_count == 0) goto fail; - pci_set_power_state(dev, PCI_D0); - pci_restore_state(dev); - - if (pci_enable_device(dev)) - goto fail; - - pci_set_master(dev); - vt8623fb_set_par(info); fb_set_suspend(info, 0); @@ -879,10 +866,17 @@ fail: return 0; } -#else -#define vt8623_pci_suspend NULL -#define vt8623_pci_resume NULL -#endif /* CONFIG_PM */ + +static const struct dev_pm_ops vt8623_pci_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .suspend = vt8623_pci_suspend, + .resume = vt8623_pci_resume, + .freeze = NULL, + .thaw = vt8623_pci_resume, + .poweroff = vt8623_pci_suspend, + .restore = vt8623_pci_resume, +#endif /* CONFIG_PM_SLEEP */ +}; /* List of boards that we are trying to support */ @@ -898,8 +892,7 @@ static struct pci_driver vt8623fb_pci_driver = { .id_table = vt8623_devices, .probe = vt8623_pci_probe, .remove = vt8623_pci_remove, - .suspend = vt8623_pci_suspend, - .resume = vt8623_pci_resume, + .driver.pm = &vt8623_pci_pm_ops, }; /* Cleanup */ diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 627ac0487494..238383ff1064 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -498,6 +498,36 @@ static const char *vm_bus_name(struct virtio_device *vdev) return vm_dev->pdev->name; } +static bool vm_get_shm_region(struct virtio_device *vdev, + struct virtio_shm_region *region, u8 id) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + u64 len, addr; + + /* Select the region we're interested in */ + writel(id, vm_dev->base + VIRTIO_MMIO_SHM_SEL); + + /* Read the region size */ + len = (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_LOW); + len |= (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_HIGH) << 32; + + region->len = len; + + /* Check if region length is -1. If that's the case, the shared memory + * region does not exist and there is no need to proceed further. + */ + if (len == ~(u64)0) + return false; + + /* Read the region base address */ + addr = (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_BASE_LOW); + addr |= (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_BASE_HIGH) << 32; + + region->addr = addr; + + return true; +} + static const struct virtio_config_ops virtio_mmio_config_ops = { .get = vm_get, .set = vm_set, @@ -510,6 +540,7 @@ static const struct virtio_config_ops virtio_mmio_config_ops = { .get_features = vm_get_features, .finalize_features = vm_finalize_features, .bus_name = vm_bus_name, + .get_shm_region = vm_get_shm_region, }; diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 3e14e700b231..3d6ae5a5e252 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -444,6 +444,99 @@ static void del_vq(struct virtio_pci_vq_info *info) vring_del_virtqueue(vq); } +static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id, + u8 *bar, u64 *offset, u64 *len) +{ + int pos; + + for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); pos > 0; + pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) { + u8 type, cap_len, id; + u32 tmp32; + u64 res_offset, res_length; + + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, + cfg_type), &type); + if (type != VIRTIO_PCI_CAP_SHARED_MEMORY_CFG) + continue; + + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, + cap_len), &cap_len); + if (cap_len != sizeof(struct virtio_pci_cap64)) { + dev_err(&dev->dev, "%s: shm cap with bad size offset:" + " %d size: %d\n", __func__, pos, cap_len); + continue; + } + + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, + id), &id); + if (id != required_id) + continue; + + /* Type, and ID match, looks good */ + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, + bar), bar); + + /* Read the lower 32bit of length and offset */ + pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap, + offset), &tmp32); + res_offset = tmp32; + pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap, + length), &tmp32); + res_length = tmp32; + + /* and now the top half */ + pci_read_config_dword(dev, + pos + offsetof(struct virtio_pci_cap64, + offset_hi), &tmp32); + res_offset |= ((u64)tmp32) << 32; + pci_read_config_dword(dev, + pos + offsetof(struct virtio_pci_cap64, + length_hi), &tmp32); + res_length |= ((u64)tmp32) << 32; + + *offset = res_offset; + *len = res_length; + + return pos; + } + return 0; +} + +static bool vp_get_shm_region(struct virtio_device *vdev, + struct virtio_shm_region *region, u8 id) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct pci_dev *pci_dev = vp_dev->pci_dev; + u8 bar; + u64 offset, len; + phys_addr_t phys_addr; + size_t bar_len; + + if (!virtio_pci_find_shm_cap(pci_dev, id, &bar, &offset, &len)) + return false; + + phys_addr = pci_resource_start(pci_dev, bar); + bar_len = pci_resource_len(pci_dev, bar); + + if ((offset + len) < offset) { + dev_err(&pci_dev->dev, "%s: cap offset+len overflow detected\n", + __func__); + return false; + } + + if (offset + len > bar_len) { + dev_err(&pci_dev->dev, "%s: bar shorter than cap offset+len\n", + __func__); + return false; + } + + region->len = len; + region->addr = (u64) phys_addr + offset; + + return true; +} + static const struct virtio_config_ops virtio_pci_config_nodev_ops = { .get = NULL, .set = NULL, @@ -458,6 +551,7 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = { .bus_name = vp_bus_name, .set_vq_affinity = vp_set_vq_affinity, .get_vq_affinity = vp_get_vq_affinity, + .get_shm_region = vp_get_shm_region, }; static const struct virtio_config_ops virtio_pci_config_ops = { @@ -474,6 +568,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = { .bus_name = vp_bus_name, .set_vq_affinity = vp_set_vq_affinity, .get_vq_affinity = vp_get_vq_affinity, + .get_shm_region = vp_get_shm_region, }; /** diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c index b1b6eebafd5d..4c13cbc99896 100644 --- a/drivers/xen/gntdev-dmabuf.c +++ b/drivers/xen/gntdev-dmabuf.c @@ -247,10 +247,9 @@ static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf, if (sgt) { if (gntdev_dmabuf_attach->dir != DMA_NONE) - dma_unmap_sg_attrs(attach->dev, sgt->sgl, - sgt->nents, - gntdev_dmabuf_attach->dir, - DMA_ATTR_SKIP_CPU_SYNC); + dma_unmap_sgtable(attach->dev, sgt, + gntdev_dmabuf_attach->dir, + DMA_ATTR_SKIP_CPU_SYNC); sg_free_table(sgt); } @@ -288,8 +287,8 @@ dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach, sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages, gntdev_dmabuf->nr_pages); if (!IS_ERR(sgt)) { - if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, - DMA_ATTR_SKIP_CPU_SYNC)) { + if (dma_map_sgtable(attach->dev, sgt, dir, + DMA_ATTR_SKIP_CPU_SYNC)) { sg_free_table(sgt); kfree(sgt); sgt = ERR_PTR(-ENOMEM); @@ -633,7 +632,7 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev, /* Now convert sgt to array of pages and check for page validity. */ i = 0; - for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) { + for_each_sgtable_page(sgt, &sg_iter, 0) { struct page *page = sg_page_iter_page(&sg_iter); /* * Check if page is valid: this can happen if we are given diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h index b0e390b3288e..bda8aa7c2280 100644 --- a/include/drm/bridge/dw_mipi_dsi.h +++ b/include/drm/bridge/dw_mipi_dsi.h @@ -36,6 +36,7 @@ struct dw_mipi_dsi_phy_ops { unsigned int *lane_mbps); int (*get_timing)(void *priv_data, unsigned int lane_mbps, struct dw_mipi_dsi_dphy_timing *timing); + int (*get_esc_clk_rate)(void *priv_data, unsigned int *esc_clk_rate); }; struct dw_mipi_dsi_host_ops { diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index b268180c97eb..85df04c8e62f 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -74,6 +74,9 @@ void drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, struct drm_atomic_state *old_state); +void +drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state); + void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, struct drm_atomic_state *state); void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index 0988351d743c..f4f68e7a9149 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -92,7 +92,7 @@ struct drm_device { * NULL. * * Instead of using this pointer it is recommended that drivers use - * drm_dev_init() and embed struct &drm_device in their larger + * devm_drm_dev_alloc() and embed struct &drm_device in their larger * per-device structure. */ void *dev_private; diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 85513eeb2196..da53aebb7230 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -28,6 +28,8 @@ #include <linux/types.h> #include <drm/drm_connector.h> +struct drm_device; + /* * Unless otherwise noted, all values are from the DP 1.1a spec. Note that * DP and DPCD versions are independent. Differences from 1.0 are not noted, @@ -385,13 +387,30 @@ # define DP_DS_PORT_TYPE_DP_DUALMODE 5 # define DP_DS_PORT_TYPE_WIRELESS 6 # define DP_DS_PORT_HPD (1 << 3) +# define DP_DS_NON_EDID_MASK (0xf << 4) +# define DP_DS_NON_EDID_720x480i_60 (1 << 4) +# define DP_DS_NON_EDID_720x480i_50 (2 << 4) +# define DP_DS_NON_EDID_1920x1080i_60 (3 << 4) +# define DP_DS_NON_EDID_1920x1080i_50 (4 << 4) +# define DP_DS_NON_EDID_1280x720_60 (5 << 4) +# define DP_DS_NON_EDID_1280x720_50 (7 << 4) /* offset 1 for VGA is maximum megapixels per second / 8 */ -/* offset 2 */ +/* offset 1 for DVI/HDMI is maximum TMDS clock in Mbps / 2.5 */ +/* offset 2 for VGA/DVI/HDMI */ # define DP_DS_MAX_BPC_MASK (3 << 0) # define DP_DS_8BPC 0 # define DP_DS_10BPC 1 # define DP_DS_12BPC 2 # define DP_DS_16BPC 3 +/* offset 3 for DVI */ +# define DP_DS_DVI_DUAL_LINK (1 << 1) +# define DP_DS_DVI_HIGH_COLOR_DEPTH (1 << 2) +/* offset 3 for HDMI */ +# define DP_DS_HDMI_FRAME_SEQ_TO_FRAME_PACK (1 << 0) +# define DP_DS_HDMI_YCBCR422_PASS_THROUGH (1 << 1) +# define DP_DS_HDMI_YCBCR420_PASS_THROUGH (1 << 2) +# define DP_DS_HDMI_YCBCR444_TO_422_CONV (1 << 3) +# define DP_DS_HDMI_YCBCR444_TO_420_CONV (1 << 4) #define DP_MAX_DOWNSTREAM_PORTS 0x10 @@ -984,6 +1003,16 @@ #define DP_CEC_TX_MESSAGE_BUFFER 0x3020 #define DP_CEC_MESSAGE_BUFFER_LENGTH 0x10 +#define DP_PROTOCOL_CONVERTER_CONTROL_0 0x3050 /* DP 1.3 */ +# define DP_HDMI_DVI_OUTPUT_CONFIG (1 << 0) /* DP 1.3 */ +#define DP_PROTOCOL_CONVERTER_CONTROL_1 0x3051 /* DP 1.3 */ +# define DP_CONVERSION_TO_YCBCR420_ENABLE (1 << 0) /* DP 1.3 */ +# define DP_HDMI_EDID_PROCESSING_DISABLE (1 << 1) /* DP 1.4 */ +# define DP_HDMI_AUTONOMOUS_SCRAMBLING_DISABLE (1 << 2) /* DP 1.4 */ +# define DP_HDMI_FORCE_SCRAMBLING (1 << 3) /* DP 1.4 */ +#define DP_PROTOCOL_CONVERTER_CONTROL_2 0x3052 /* DP 1.3 */ +# define DP_CONVERSION_TO_YCBCR422_ENABLE (1 << 0) /* DP 1.3 */ + #define DP_AUX_HDCP_BKSV 0x68000 #define DP_AUX_HDCP_RI_PRIME 0x68005 #define DP_AUX_HDCP_AKSV 0x68007 @@ -1109,6 +1138,9 @@ #define DP_POWER_DOWN_PHY 0x25 #define DP_SINK_EVENT_NOTIFY 0x30 #define DP_QUERY_STREAM_ENC_STATUS 0x38 +#define DP_QUERY_STREAM_ENC_STATUS_STATE_NO_EXIST 0 +#define DP_QUERY_STREAM_ENC_STATUS_STATE_INACTIVE 1 +#define DP_QUERY_STREAM_ENC_STATUS_STATE_ACTIVE 2 /* DP 1.2 MST sideband reply types */ #define DP_SIDEBAND_REPLY_ACK 0x00 @@ -1135,6 +1167,7 @@ #define DP_MST_PHYSICAL_PORT_0 0 #define DP_MST_LOGICAL_PORT_0 8 +#define DP_LINK_CONSTANT_N_VALUE 0x8000 #define DP_LINK_STATUS_SIZE 6 bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], int lane_count); @@ -1619,13 +1652,35 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux, int drm_dp_read_downstream_info(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE], u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]); -int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4]); +bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], u8 type); +bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid); +int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid); +int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid); int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4]); + const u8 port_cap[4], + const struct edid *edid); +bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +struct drm_display_mode *drm_dp_downstream_mode(struct drm_device *dev, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]); -void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4], struct drm_dp_aux *aux); +void drm_dp_downstream_debug(struct seq_file *m, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid, + struct drm_dp_aux *aux); enum drm_mode_subconnector drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4]); diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 6ae5860d8644..f5e92fe9151c 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -313,6 +313,34 @@ struct drm_dp_remote_i2c_write_ack_reply { u8 port_number; }; +struct drm_dp_query_stream_enc_status_ack_reply { + /* Bit[23:16]- Stream Id */ + u8 stream_id; + + /* Bit[15]- Signed */ + bool reply_signed; + + /* Bit[10:8]- Stream Output Sink Type */ + bool unauthorizable_device_present; + bool legacy_device_present; + bool query_capable_device_present; + + /* Bit[12:11]- Stream Output CP Type */ + bool hdcp_1x_device_present; + bool hdcp_2x_device_present; + + /* Bit[4]- Stream Authentication */ + bool auth_completed; + + /* Bit[3]- Stream Encryption */ + bool encryption_enabled; + + /* Bit[2]- Stream Repeater Function Present */ + bool repeater_present; + + /* Bit[1:0]- Stream State */ + u8 state; +}; #define DRM_DP_MAX_SDP_STREAMS 16 struct drm_dp_allocate_payload { @@ -374,6 +402,15 @@ struct drm_dp_remote_i2c_write { u8 *bytes; }; +struct drm_dp_query_stream_enc_status { + u8 stream_id; + u8 client_id[7]; /* 56-bit nonce */ + u8 stream_event; + bool valid_stream_event; + u8 stream_behavior; + u8 valid_stream_behavior; +}; + /* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */ struct drm_dp_port_number_req { u8 port_number; @@ -422,6 +459,8 @@ struct drm_dp_sideband_msg_req_body { struct drm_dp_remote_i2c_read i2c_read; struct drm_dp_remote_i2c_write i2c_write; + + struct drm_dp_query_stream_enc_status enc_status; } u; }; @@ -444,6 +483,8 @@ struct drm_dp_sideband_msg_reply_body { struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack; struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack; struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack; + + struct drm_dp_query_stream_enc_status_ack_reply enc_status; } u; }; @@ -807,6 +848,9 @@ drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, struct drm_dp_mst_port *port); int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, bool power_up); +int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + struct drm_dp_query_stream_enc_status_ack_reply *status); int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state); void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port); diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 7116abc1a04e..e57d0440f00f 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -163,13 +163,12 @@ struct drm_driver { /** * @load: * - * Backward-compatible driver callback to complete - * initialization steps after the driver is registered. For - * this reason, may suffer from race conditions and its use is - * deprecated for new drivers. It is therefore only supported - * for existing drivers not yet converted to the new scheme. - * See drm_dev_init() and drm_dev_register() for proper and - * race-free way to set up a &struct drm_device. + * Backward-compatible driver callback to complete initialization steps + * after the driver is registered. For this reason, may suffer from + * race conditions and its use is deprecated for new drivers. It is + * therefore only supported for existing drivers not yet converted to + * the new scheme. See devm_drm_dev_alloc() and drm_dev_register() for + * proper and race-free way to set up a &struct drm_device. * * This is deprecated, do not use! * @@ -589,13 +588,6 @@ struct drm_driver { int dev_priv_size; }; -int drm_dev_init(struct drm_device *dev, - struct drm_driver *driver, - struct device *parent); -int devm_drm_dev_init(struct device *parent, - struct drm_device *dev, - struct drm_driver *driver); - void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver, size_t size, size_t offset); diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index cfa4f5af49af..b27a0e2169c8 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -517,4 +517,8 @@ void drm_edid_get_monitor_name(struct edid *edid, char *name, struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, int hsize, int vsize, int fresh, bool rb); +struct drm_display_mode * +drm_display_mode_from_cea_vic(struct drm_device *dev, + u8 video_code); + #endif /* __DRM_EDID_H__ */ diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index 035332f3723f..62cc6e6c3a4f 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -9,7 +9,6 @@ #include <drm/drm_modes.h> #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_placement.h> #include <linux/kernel.h> /* for container_of() */ @@ -20,9 +19,9 @@ struct drm_simple_display_pipe; struct filp; struct vm_area_struct; -#define DRM_GEM_VRAM_PL_FLAG_VRAM TTM_PL_FLAG_VRAM -#define DRM_GEM_VRAM_PL_FLAG_SYSTEM TTM_PL_FLAG_SYSTEM -#define DRM_GEM_VRAM_PL_FLAG_TOPDOWN TTM_PL_FLAG_TOPDOWN +#define DRM_GEM_VRAM_PL_FLAG_SYSTEM (1 << 0) +#define DRM_GEM_VRAM_PL_FLAG_VRAM (1 << 1) +#define DRM_GEM_VRAM_PL_FLAG_TOPDOWN (1 << 2) /* * Buffer-object helpers @@ -101,9 +100,6 @@ u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo); s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo); int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag); int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo); -void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map, - bool *is_iomem); -void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo); void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo); void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr); diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index a01bc6fac83c..9b4292f229c6 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -338,7 +338,7 @@ static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node) /** * drm_mm_nodes - list of nodes under the drm_mm range manager - * @mm: the struct drm_mm range manger + * @mm: the struct drm_mm range manager * * As the drm_mm range manager hides its node_list deep with its * structure, extracting it looks painful and repetitive. This is diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index eee3c9de6c4f..cdf2a299ccd4 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -350,14 +350,15 @@ struct drm_display_mode { u8 type; /** - * @private_flags: + * @expose_to_userspace: * - * Driver private flags. private_flags can only be used for mode - * objects passed to drivers in modeset operations. It shouldn't be used - * by atomic drivers since they can store any additional data by - * subclassing state structures. + * Indicates whether the mode is to be exposed to the userspace. + * This is to maintain a set of exposed modes while preparing + * user-mode's list in drm_mode_getconnector ioctl. The purpose of + * this only lies in the ioctl function, and is not to be used + * outside the function. */ - int private_flags; + bool expose_to_userspace; /** * @head: @@ -367,19 +368,6 @@ struct drm_display_mode { struct list_head head; /** - * @export_head: - * - * struct list_head for modes to be exposed to the userspace. - * This is to maintain a list of exposed modes while preparing - * user-mode's list in drm_mode_getconnector ioctl. The purpose of this - * list_head only lies in the ioctl function, and is not expected to be - * used outside the function. - * Once used, the stale pointers are not reset, but left as it is, to - * avoid overhead of protecting it by mode_config.mutex. - */ - struct list_head export_head; - - /** * @name: * * Human-readable name of the mode, filled out with drm_mode_set_name(). diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h index 9af7422b44cf..0f69f9fbf12c 100644 --- a/include/drm/drm_prime.h +++ b/include/drm/drm_prime.h @@ -88,10 +88,13 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr); int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma); -struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages); +struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, + struct page **pages, unsigned int nr_pages); struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj, int flags); +unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt); + /* helper functions for importing */ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, struct dma_buf *dma_buf, diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 8e7ae30ebcbb..7eeecb07c9a1 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -594,19 +594,25 @@ INTEL_VGA_DEVICE(0x4E51, info) /* TGL */ -#define INTEL_TGL_12_IDS(info) \ +#define INTEL_TGL_12_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x9A60, info), \ + INTEL_VGA_DEVICE(0x9A68, info), \ + INTEL_VGA_DEVICE(0x9A70, info) + +#define INTEL_TGL_12_GT2_IDS(info) \ INTEL_VGA_DEVICE(0x9A40, info), \ INTEL_VGA_DEVICE(0x9A49, info), \ INTEL_VGA_DEVICE(0x9A59, info), \ - INTEL_VGA_DEVICE(0x9A60, info), \ - INTEL_VGA_DEVICE(0x9A68, info), \ - INTEL_VGA_DEVICE(0x9A70, info), \ INTEL_VGA_DEVICE(0x9A78, info), \ INTEL_VGA_DEVICE(0x9AC0, info), \ INTEL_VGA_DEVICE(0x9AC9, info), \ INTEL_VGA_DEVICE(0x9AD9, info), \ INTEL_VGA_DEVICE(0x9AF8, info) +#define INTEL_TGL_12_IDS(info) \ + INTEL_TGL_12_GT1_IDS(info), \ + INTEL_TGL_12_GT2_IDS(info) + /* RKL */ #define INTEL_RKL_IDS(info) \ INTEL_VGA_DEVICE(0x4C80, info), \ diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 6c580987ba16..0f7cd21d6d74 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -141,7 +141,6 @@ struct ttm_buffer_object { struct ttm_resource mem; struct file *persistent_swap_storage; struct ttm_tt *ttm; - bool evicted; bool deleted; /** @@ -151,7 +150,6 @@ struct ttm_buffer_object { struct list_head lru; struct list_head ddestroy; struct list_head swap; - struct list_head io_reserve_lru; /** * Members protected by a bo reservation. @@ -353,18 +351,6 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched); bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place); -/** - * ttm_bo_acc_size - * - * @bdev: Pointer to a ttm_bo_device struct. - * @bo_size: size of the buffer object in byte. - * @struct_size: size of the structure holding buffer object datas - * - * Returns size to account for a buffer object - */ -size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, - unsigned long bo_size, - unsigned struct_size); size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, unsigned long bo_size, unsigned struct_size); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index bc8d0ebb7568..864afa8f6f18 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -77,8 +77,9 @@ struct ttm_bo_driver { * Returns: * -ENOMEM: Out of memory. */ - int (*ttm_tt_populate)(struct ttm_tt *ttm, - struct ttm_operation_ctx *ctx); + int (*ttm_tt_populate)(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx); /** * ttm_tt_unpopulate @@ -87,7 +88,43 @@ struct ttm_bo_driver { * * Free all backing page */ - void (*ttm_tt_unpopulate)(struct ttm_tt *ttm); + void (*ttm_tt_unpopulate)(struct ttm_bo_device *bdev, struct ttm_tt *ttm); + + /** + * ttm_tt_bind + * + * @bdev: Pointer to a ttm device + * @ttm: Pointer to a struct ttm_tt. + * @bo_mem: Pointer to a struct ttm_resource describing the + * memory type and location for binding. + * + * Bind the backend pages into the aperture in the location + * indicated by @bo_mem. This function should be able to handle + * differences between aperture and system page sizes. + */ + int (*ttm_tt_bind)(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem); + + /** + * ttm_tt_unbind + * + * @bdev: Pointer to a ttm device + * @ttm: Pointer to a struct ttm_tt. + * + * Unbind previously bound backend pages. This function should be + * able to handle differences between aperture and system page sizes. + */ + void (*ttm_tt_unbind)(struct ttm_bo_device *bdev, struct ttm_tt *ttm); + + /** + * ttm_tt_destroy + * + * @bdev: Pointer to a ttm device + * @ttm: Pointer to a struct ttm_tt. + * + * Destroy the backend. This will be call back from ttm_tt_destroy so + * don't call ttm_tt_destroy from the callback or infinite loop. + */ + void (*ttm_tt_destroy)(struct ttm_bo_device *bdev, struct ttm_tt *ttm); /** * struct ttm_bo_driver member eviction_valuable @@ -356,23 +393,6 @@ struct ttm_lru_bulk_move { struct ttm_lru_bulk_move_pos swap[TTM_MAX_BO_PRIORITY]; }; -/** - * ttm_flag_masked - * - * @old: Pointer to the result and original value. - * @new: New value of bits. - * @mask: Mask of bits to change. - * - * Convenience function to change a number of bits identified by a mask. - */ - -static inline uint32_t -ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) -{ - *old ^= (*old ^ new) & mask; - return *old; -} - /* * ttm_bo.c */ @@ -441,11 +461,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); */ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); -int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); -void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); -int ttm_mem_io_lock(struct ttm_resource_manager *man, bool interruptible); -void ttm_mem_io_unlock(struct ttm_resource_manager *man); - /** * ttm_bo_reserve: * @@ -524,6 +539,29 @@ static inline void ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo spin_unlock(&ttm_bo_glob.lru_lock); } +static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, + struct ttm_resource *new_mem) +{ + bo->mem = *new_mem; + new_mem->mm_node = NULL; +} + +/** + * ttm_bo_move_null = assign memory for a buffer object. + * @bo: The bo to assign the memory to + * @new_mem: The memory to be assigned. + * + * Assign the memory from new_mem to the memory of the buffer object bo. + */ +static inline void ttm_bo_move_null(struct ttm_buffer_object *bo, + struct ttm_resource *new_mem) +{ + struct ttm_resource *old_mem = &bo->mem; + + WARN_ON(old_mem->mm_node != NULL); + ttm_bo_assign_mem(bo, new_mem); +} + /** * ttm_bo_unreserve * @@ -604,6 +642,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo); * @bo: A pointer to a struct ttm_buffer_object. * @fence: A fence object that signals when moving is complete. * @evict: This is an evict move. Don't return until the buffer is idle. + * @pipeline: evictions are to be pipelined. * @new_mem: struct ttm_resource indicating where to move. * * Accelerated move function to be called when an accelerated move @@ -615,24 +654,10 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo); */ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct dma_fence *fence, bool evict, + bool pipeline, struct ttm_resource *new_mem); /** - * ttm_bo_pipeline_move. - * - * @bo: A pointer to a struct ttm_buffer_object. - * @fence: A fence object that signals when moving is complete. - * @evict: This is an evict move. Don't return until the buffer is idle. - * @new_mem: struct ttm_resource indicating where to move. - * - * Function for pipelining accelerated moves. Either free the memory - * immediately or hang it on a temporary buffer object. - */ -int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, - struct dma_fence *fence, bool evict, - struct ttm_resource *new_mem); - -/** * ttm_bo_pipeline_gutting. * * @bo: A pointer to a struct ttm_buffer_object. @@ -653,12 +678,29 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo); pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); /** + * ttm_bo_tt_bind + * + * Bind the object tt to a memory resource. + */ +int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem); + +/** + * ttm_bo_tt_bind + * + * Unbind the object tt from a memory resource. + */ +void ttm_bo_tt_unbind(struct ttm_buffer_object *bo); + +/** + * ttm_bo_tt_destroy. + */ +void ttm_bo_tt_destroy(struct ttm_buffer_object *bo); + +/** * ttm_range_man_init * * @bdev: ttm device * @type: memory manager type - * @available_caching: TTM_PL_FLAG_* for allowed caching modes - * @default_caching: default caching mode * @use_tt: if the memory manager uses tt * @p_size: size of area to be managed in pages. * @@ -666,10 +708,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); * The range manager is installed for this device in the type slot. */ int ttm_range_man_init(struct ttm_bo_device *bdev, - unsigned type, - uint32_t available_caching, - uint32_t default_caching, - bool use_tt, + unsigned type, bool use_tt, unsigned long p_size); /** diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index 5a19843bb80d..a99d7fdf2964 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -58,9 +58,8 @@ struct ttm_validate_buffer { * Undoes all buffer validation reservations for bos pointed to by * the list entries. */ - -extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, - struct list_head *list); +void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, + struct list_head *list); /** * function ttm_eu_reserve_buffers @@ -96,10 +95,9 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, * ttm_eu_fence_buffer_objects() when command submission is complete or * has failed. */ - -extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, - struct list_head *list, bool intr, - struct list_head *dups); +int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, + struct list_head *list, bool intr, + struct list_head *dups); /** * function ttm_eu_fence_buffer_objects. @@ -113,9 +111,8 @@ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, * It also unreserves all buffers, putting them on lru lists. * */ - -extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, - struct list_head *list, - struct dma_fence *fence); +void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, + struct list_head *list, + struct dma_fence *fence); #endif diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h index c78ea99c42cf..c1f167881e33 100644 --- a/include/drm/ttm/ttm_memory.h +++ b/include/drm/ttm/ttm_memory.h @@ -79,19 +79,17 @@ extern struct ttm_mem_global { #endif } ttm_mem_glob; -extern int ttm_mem_global_init(struct ttm_mem_global *glob); -extern void ttm_mem_global_release(struct ttm_mem_global *glob); -extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, +int ttm_mem_global_init(struct ttm_mem_global *glob); +void ttm_mem_global_release(struct ttm_mem_global *glob); +int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, + struct ttm_operation_ctx *ctx); +void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount); +int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, + struct page *page, uint64_t size, + struct ttm_operation_ctx *ctx); +void ttm_mem_global_free_page(struct ttm_mem_global *glob, + struct page *page, uint64_t size); +size_t ttm_round_pot(size_t size); +bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob, uint64_t num_pages, struct ttm_operation_ctx *ctx); -extern void ttm_mem_global_free(struct ttm_mem_global *glob, - uint64_t amount); -extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size, - struct ttm_operation_ctx *ctx); -extern void ttm_mem_global_free_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size); -extern size_t ttm_round_pot(size_t size); -extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob); -extern bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob, - uint64_t num_pages, struct ttm_operation_ctx *ctx); #endif diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h index e88a8e39767b..d4022655eae4 100644 --- a/include/drm/ttm/ttm_placement.h +++ b/include/drm/ttm/ttm_placement.h @@ -42,12 +42,6 @@ #define TTM_PL_VRAM 2 #define TTM_PL_PRIV 3 -#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM) -#define TTM_PL_FLAG_TT (1 << TTM_PL_TT) -#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM) -#define TTM_PL_FLAG_PRIV (1 << TTM_PL_PRIV) -#define TTM_PL_MASK_MEM 0x0000FFFF - /* * Other flags that affects data placement. * TTM_PL_FLAG_CACHED indicates cache-coherent mappings @@ -71,8 +65,6 @@ TTM_PL_FLAG_UNCACHED | \ TTM_PL_FLAG_WC) -#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING) - /** * struct ttm_place * @@ -85,6 +77,7 @@ struct ttm_place { unsigned fpfn; unsigned lpfn; + uint32_t mem_type; uint32_t flags; }; diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 6d4226190480..0e172d94a0c1 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -108,15 +108,7 @@ struct ttm_resource_manager_func { * @gpu_offset: If used, the GPU offset of the first managed page of * fixed memory or the first managed location in an aperture. * @size: Size of the managed region. - * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, - * as defined in ttm_placement_common.h - * @default_caching: The default caching policy used for a buffer object - * placed in this memory type if the user doesn't provide one. * @func: structure pointer implementing the range manager. See above - * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures - * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions - * reserved by the TTM vm system. - * @io_reserve_lru: Optional lru list for unreserving io mem regions. * @move_lock: lock for move fence * static information. bdev::driver::io_mem_free is never used. * @lru: The lru list for this memory type. @@ -131,20 +123,10 @@ struct ttm_resource_manager { bool use_type; bool use_tt; uint64_t size; - uint32_t available_caching; - uint32_t default_caching; const struct ttm_resource_manager_func *func; - struct mutex io_reserve_mutex; - bool use_io_reserve_lru; spinlock_t move_lock; /* - * Protected by @io_reserve_mutex: - */ - - struct list_head io_reserve_lru; - - /* * Protected by the global->lru_lock. */ @@ -160,21 +142,15 @@ struct ttm_resource_manager { * struct ttm_bus_placement * * @addr: mapped virtual address - * @base: bus base address + * @offset: physical addr * @is_iomem: is this io memory ? - * @offset: offset from the base address - * @io_reserved_vm: The VM system has a refcount in @io_reserved_count - * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve * * Structure indicating the bus placement of an object. */ struct ttm_bus_placement { void *addr; - phys_addr_t base; - unsigned long offset; + phys_addr_t offset; bool is_iomem; - bool io_reserved_vm; - uint64_t io_reserved_count; }; /** diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index 241cc40839ed..75208c0a0cac 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -42,54 +42,17 @@ struct ttm_operation_ctx; #define TTM_PAGE_FLAG_SG (1 << 8) #define TTM_PAGE_FLAG_NO_RETRY (1 << 9) +#define TTM_PAGE_FLAG_PRIV_POPULATED (1 << 31) + enum ttm_caching_state { tt_uncached, tt_wc, tt_cached }; -struct ttm_backend_func { - /** - * struct ttm_backend_func member bind - * - * @ttm: Pointer to a struct ttm_tt. - * @bo_mem: Pointer to a struct ttm_resource describing the - * memory type and location for binding. - * - * Bind the backend pages into the aperture in the location - * indicated by @bo_mem. This function should be able to handle - * differences between aperture and system page sizes. - */ - int (*bind) (struct ttm_tt *ttm, struct ttm_resource *bo_mem); - - /** - * struct ttm_backend_func member unbind - * - * @ttm: Pointer to a struct ttm_tt. - * - * Unbind previously bound backend pages. This function should be - * able to handle differences between aperture and system page sizes. - */ - void (*unbind) (struct ttm_tt *ttm); - - /** - * struct ttm_backend_func member destroy - * - * @ttm: Pointer to a struct ttm_tt. - * - * Destroy the backend. This will be call back from ttm_tt_destroy so - * don't call ttm_tt_destroy from the callback or infinite loop. - */ - void (*destroy) (struct ttm_tt *ttm); -}; - /** * struct ttm_tt * - * @bdev: Pointer to a struct ttm_bo_device. - * @func: Pointer to a struct ttm_backend_func that describes - * the backend methods. - * pointer. * @pages: Array of pages backing the data. * @num_pages: Number of pages in the page array. * @bdev: Pointer to the current struct ttm_bo_device. @@ -103,21 +66,29 @@ struct ttm_backend_func { * memory. */ struct ttm_tt { - struct ttm_bo_device *bdev; - struct ttm_backend_func *func; struct page **pages; uint32_t page_flags; unsigned long num_pages; struct sg_table *sg; /* for SG objects via dma-buf */ struct file *swap_storage; enum ttm_caching_state caching_state; - enum { - tt_bound, - tt_unbound, - tt_unpopulated, - } state; }; +static inline bool ttm_tt_is_populated(struct ttm_tt *tt) +{ + return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED; +} + +static inline void ttm_tt_set_unpopulated(struct ttm_tt *tt) +{ + tt->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED; +} + +static inline void ttm_tt_set_populated(struct ttm_tt *tt) +{ + tt->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED; +} + /** * struct ttm_dma_tt * @@ -176,33 +147,20 @@ void ttm_tt_fini(struct ttm_tt *ttm); void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); /** - * ttm_ttm_bind: - * - * @ttm: The struct ttm_tt containing backing pages. - * @bo_mem: The struct ttm_resource identifying the binding location. - * - * Bind the pages of @ttm to an aperture location identified by @bo_mem - */ -int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem, - struct ttm_operation_ctx *ctx); - -/** * ttm_ttm_destroy: * * @ttm: The struct ttm_tt. * * Unbind, unpopulate and destroy common struct ttm_tt. */ -void ttm_tt_destroy(struct ttm_tt *ttm); +void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm); /** - * ttm_ttm_unbind: - * - * @ttm: The struct ttm_tt. + * ttm_tt_destroy_common: * - * Unbind a struct ttm_tt. + * Called from driver to destroy common path. */ -void ttm_tt_unbind(struct ttm_tt *ttm); +void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm); /** * ttm_tt_swapin: @@ -227,7 +185,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm); * and cache flushes and potential page splitting / combining. */ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); -int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage); +int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct file *persistent_swap_storage); /** * ttm_tt_populate - allocate pages for a ttm @@ -236,7 +194,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage); * * Calls the driver method to allocate pages for a ttm */ -int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); +int ttm_tt_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); /** * ttm_tt_unpopulate - free pages from a ttm @@ -245,7 +203,7 @@ int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); * * Calls the driver method to free all pages from a ttm */ -void ttm_tt_unpopulate(struct ttm_tt *ttm); +void ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm); #if IS_ENABLED(CONFIG_AGP) #include <linux/agp_backend.h> @@ -265,8 +223,10 @@ void ttm_tt_unpopulate(struct ttm_tt *ttm); struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo, struct agp_bridge_data *bridge, uint32_t page_flags); -int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); -void ttm_agp_tt_unpopulate(struct ttm_tt *ttm); +int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem); +void ttm_agp_unbind(struct ttm_tt *ttm); +void ttm_agp_destroy(struct ttm_tt *ttm); +bool ttm_agp_is_bound(struct ttm_tt *ttm); #endif #endif diff --git a/include/linux/adreno-smmu-priv.h b/include/linux/adreno-smmu-priv.h new file mode 100644 index 000000000000..a889f28afb42 --- /dev/null +++ b/include/linux/adreno-smmu-priv.h @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Google, Inc + */ + +#ifndef __ADRENO_SMMU_PRIV_H +#define __ADRENO_SMMU_PRIV_H + +#include <linux/io-pgtable.h> + +/** + * struct adreno_smmu_priv - private interface between adreno-smmu and GPU + * + * @cookie: An opque token provided by adreno-smmu and passed + * back into the callbacks + * @get_ttbr1_cfg: Get the TTBR1 config for the GPUs context-bank + * @set_ttbr0_cfg: Set the TTBR0 config for the GPUs context bank. A + * NULL config disables TTBR0 translation, otherwise + * TTBR0 translation is enabled with the specified cfg + * + * The GPU driver (drm/msm) and adreno-smmu work together for controlling + * the GPU's SMMU instance. This is by necessity, as the GPU is directly + * updating the SMMU for context switches, while on the other hand we do + * not want to duplicate all of the initial setup logic from arm-smmu. + * + * This private interface is used for the two drivers to coordinate. The + * cookie and callback functions are populated when the GPU driver attaches + * it's domain. + */ +struct adreno_smmu_priv { + const void *cookie; + const struct io_pgtable_cfg *(*get_ttbr1_cfg)(const void *cookie); + int (*set_ttbr0_cfg)(const void *cookie, const struct io_pgtable_cfg *cfg); +}; + +#endif /* __ADRENO_SMMU_PRIV_H */
\ No newline at end of file diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index a2ca294eaebe..957b398d30e5 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -283,6 +283,7 @@ struct dma_buf_ops { * @exp_name: name of the exporter; useful for debugging. * @name: userspace-provided name; useful for accounting and debugging, * protected by @resv. + * @name_lock: spinlock to protect name access * @owner: pointer to exporter module; used for refcounting when exporter is a * kernel module. * @list_node: node for dma_buf accounting and debugging. @@ -311,7 +312,7 @@ struct dma_buf { void *vmap_ptr; const char *exp_name; const char *name; - spinlock_t name_lock; /* spinlock to protect name access */ + spinlock_t name_lock; struct module *owner; struct list_head list_node; void *priv; diff --git a/include/linux/font.h b/include/linux/font.h index 51b91c8b69d5..4a3f8741bb7e 100644 --- a/include/linux/font.h +++ b/include/linux/font.h @@ -33,6 +33,7 @@ struct font_desc { #define MINI4x6_IDX 9 #define FONT6x10_IDX 10 #define TER16x32_IDX 11 +#define FONT6x8_IDX 12 extern const struct font_desc font_vga_8x8, font_vga_8x16, @@ -45,7 +46,8 @@ extern const struct font_desc font_vga_8x8, font_acorn_8x8, font_mini_4x6, font_6x10, - font_ter_16x32; + font_ter_16x32, + font_6x8; /* Find a font with a specific name */ diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index bcee8eba62b3..e435bdb0bab3 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -115,10 +115,12 @@ struct phy_ops { /** * struct phy_attrs - represents phy attributes * @bus_width: Data path width implemented by PHY + * @max_link_rate: Maximum link rate supported by PHY (in Mbps) * @mode: PHY mode */ struct phy_attrs { u32 bus_width; + u32 max_link_rate; enum phy_mode mode; }; diff --git a/include/linux/via-core.h b/include/linux/via-core.h index 9e802deedb2d..8737599b9148 100644 --- a/include/linux/via-core.h +++ b/include/linux/via-core.h @@ -47,7 +47,6 @@ struct via_port_cfg { /* * Allow subdevs to register suspend/resume hooks. */ -#ifdef CONFIG_PM struct viafb_pm_hooks { struct list_head list; int (*suspend)(void *private); @@ -57,7 +56,6 @@ struct viafb_pm_hooks { void viafb_pm_register(struct viafb_pm_hooks *hooks); void viafb_pm_unregister(struct viafb_pm_hooks *hooks); -#endif /* CONFIG_PM */ /* * This is the global viafb "device" containing stuff needed by diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 8fe857e27ef3..4b8e38c5c4d8 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -11,6 +11,11 @@ struct irq_affinity; +struct virtio_shm_region { + u64 addr; + u64 len; +}; + /** * virtio_config_ops - operations for configuring a virtio device * Note: Do not assume that a transport implements all of the operations @@ -66,6 +71,7 @@ struct irq_affinity; * the caller can then copy. * @set_vq_affinity: set the affinity for a virtqueue (optional). * @get_vq_affinity: get the affinity for a virtqueue (optional). + * @get_shm_region: get a shared memory region based on the index. */ typedef void vq_callback_t(struct virtqueue *); struct virtio_config_ops { @@ -89,6 +95,8 @@ struct virtio_config_ops { const struct cpumask *cpu_mask); const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev, int index); + bool (*get_shm_region)(struct virtio_device *vdev, + struct virtio_shm_region *region, u8 id); }; /* If driver didn't advertise the feature, it will never appear. */ @@ -251,6 +259,15 @@ int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask) return 0; } +static inline +bool virtio_get_shm_region(struct virtio_device *vdev, + struct virtio_shm_region *region, u8 id) +{ + if (!vdev->config->get_shm_region) + return false; + return vdev->config->get_shm_region(vdev, region, id); +} + static inline bool virtio_is_little_endian(struct virtio_device *vdev) { return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) || diff --git a/include/uapi/linux/virtio_mmio.h b/include/uapi/linux/virtio_mmio.h index c4b09689ab64..0650f91bea6c 100644 --- a/include/uapi/linux/virtio_mmio.h +++ b/include/uapi/linux/virtio_mmio.h @@ -122,6 +122,17 @@ #define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0 #define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4 +/* Shared memory region id */ +#define VIRTIO_MMIO_SHM_SEL 0x0ac + +/* Shared memory region length, 64 bits in two halves */ +#define VIRTIO_MMIO_SHM_LEN_LOW 0x0b0 +#define VIRTIO_MMIO_SHM_LEN_HIGH 0x0b4 + +/* Shared memory region base address, 64 bits in two halves */ +#define VIRTIO_MMIO_SHM_BASE_LOW 0x0b8 +#define VIRTIO_MMIO_SHM_BASE_HIGH 0x0bc + /* Configuration atomicity value */ #define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index 90007a1abcab..3a86f36d7e3d 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -113,6 +113,8 @@ #define VIRTIO_PCI_CAP_DEVICE_CFG 4 /* PCI configuration access */ #define VIRTIO_PCI_CAP_PCI_CFG 5 +/* Additional shared memory capability */ +#define VIRTIO_PCI_CAP_SHARED_MEMORY_CFG 8 /* This is the PCI capability header: */ struct virtio_pci_cap { @@ -121,11 +123,18 @@ struct virtio_pci_cap { __u8 cap_len; /* Generic PCI field: capability length */ __u8 cfg_type; /* Identifies the structure. */ __u8 bar; /* Where to find it. */ - __u8 padding[3]; /* Pad to full dword. */ + __u8 id; /* Multiple capabilities of the same type */ + __u8 padding[2]; /* Pad to full dword. */ __le32 offset; /* Offset within bar. */ __le32 length; /* Length of the structure, in bytes. */ }; +struct virtio_pci_cap64 { + struct virtio_pci_cap cap; + __le32 offset_hi; /* Most sig 32 bits of offset */ + __le32 length_hi; /* Most sig 32 bits of length */ +}; + struct virtio_pci_notify_cap { struct virtio_pci_cap cap; __le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */ diff --git a/include/video/mbxfb.h b/include/video/mbxfb.h deleted file mode 100644 index 35921cb6d1e5..000000000000 --- a/include/video/mbxfb.h +++ /dev/null @@ -1,99 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __MBX_FB_H -#define __MBX_FB_H - -#include <asm/ioctl.h> -#include <asm/types.h> - -struct mbxfb_val { - unsigned int defval; - unsigned int min; - unsigned int max; -}; - -struct fb_info; - -struct mbxfb_platform_data { - /* Screen info */ - struct mbxfb_val xres; - struct mbxfb_val yres; - struct mbxfb_val bpp; - - /* Memory info */ - unsigned long memsize; /* if 0 use ODFB? */ - unsigned long timings1; - unsigned long timings2; - unsigned long timings3; - - int (*probe)(struct fb_info *fb); - int (*remove)(struct fb_info *fb); -}; - -/* planar */ -#define MBXFB_FMT_YUV16 0 -#define MBXFB_FMT_YUV12 1 - -/* packed */ -#define MBXFB_FMT_UY0VY1 2 -#define MBXFB_FMT_VY0UY1 3 -#define MBXFB_FMT_Y0UY1V 4 -#define MBXFB_FMT_Y0VY1U 5 -struct mbxfb_overlaySetup { - __u32 enable; - __u32 x, y; - __u32 width, height; - __u32 fmt; - __u32 mem_offset; - __u32 scaled_width; - __u32 scaled_height; - - /* Filled by the driver */ - __u32 U_offset; - __u32 V_offset; - - __u16 Y_stride; - __u16 UV_stride; -}; - -#define MBXFB_ALPHABLEND_NONE 0 -#define MBXFB_ALPHABLEND_GLOBAL 1 -#define MBXFB_ALPHABLEND_PIXEL 2 - -#define MBXFB_COLORKEY_DISABLED 0 -#define MBXFB_COLORKEY_PREVIOUS 1 -#define MBXFB_COLORKEY_CURRENT 2 -struct mbxfb_alphaCtl { - __u8 overlay_blend_mode; - __u8 overlay_colorkey_mode; - __u8 overlay_global_alpha; - __u32 overlay_colorkey; - __u32 overlay_colorkey_mask; - - __u8 graphics_blend_mode; - __u8 graphics_colorkey_mode; - __u8 graphics_global_alpha; - __u32 graphics_colorkey; - __u32 graphics_colorkey_mask; -}; - -#define MBXFB_PLANE_GRAPHICS 0 -#define MBXFB_PLANE_VIDEO 1 -struct mbxfb_planeorder { - __u8 bottom; - __u8 top; -}; - -struct mbxfb_reg { - __u32 addr; /* offset from 0x03fe 0000 */ - __u32 val; /* value */ - __u32 mask; /* which bits to touch (for write) */ -}; - -#define MBXFB_IOCX_OVERLAY _IOWR(0xF4, 0x00,struct mbxfb_overlaySetup) -#define MBXFB_IOCG_ALPHA _IOR(0xF4, 0x01,struct mbxfb_alphaCtl) -#define MBXFB_IOCS_ALPHA _IOW(0xF4, 0x02,struct mbxfb_alphaCtl) -#define MBXFB_IOCS_PLANEORDER _IOR(0xF4, 0x03,struct mbxfb_planeorder) -#define MBXFB_IOCS_REG _IOW(0xF4, 0x04,struct mbxfb_reg) -#define MBXFB_IOCX_REG _IOWR(0xF4, 0x05,struct mbxfb_reg) - -#endif /* __MBX_FB_H */ diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig index 37baa79cdd71..c035fde66aeb 100644 --- a/lib/fonts/Kconfig +++ b/lib/fonts/Kconfig @@ -119,6 +119,12 @@ config FONT_TER16x32 This is the high resolution, large version for use with HiDPI screens. If the standard font is unreadable for you, say Y, otherwise say N. +config FONT_6x8 + bool "OLED 6x8 font" if FONTS + depends on FRAMEBUFFER_CONSOLE + help + This font is useful for small displays (OLED). + config FONT_AUTOSELECT def_bool y depends on !FONT_8x8 @@ -132,6 +138,7 @@ config FONT_AUTOSELECT depends on !FONT_SUN12x22 depends on !FONT_10x18 depends on !FONT_TER16x32 + depends on !FONT_6x8 select FONT_8x16 endif # FONT_SUPPORT diff --git a/lib/fonts/Makefile b/lib/fonts/Makefile index ed95070860de..e16f68492174 100644 --- a/lib/fonts/Makefile +++ b/lib/fonts/Makefile @@ -15,6 +15,7 @@ font-objs-$(CONFIG_FONT_ACORN_8x8) += font_acorn_8x8.o font-objs-$(CONFIG_FONT_MINI_4x6) += font_mini_4x6.o font-objs-$(CONFIG_FONT_6x10) += font_6x10.o font-objs-$(CONFIG_FONT_TER16x32) += font_ter16x32.o +font-objs-$(CONFIG_FONT_6x8) += font_6x8.o font-objs += $(font-objs-y) diff --git a/lib/fonts/font_6x8.c b/lib/fonts/font_6x8.c new file mode 100644 index 000000000000..e06447788418 --- /dev/null +++ b/lib/fonts/font_6x8.c @@ -0,0 +1,2576 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/font.h> + +#define FONTDATAMAX 2048 + +static const unsigned char fontdata_6x8[FONTDATAMAX] = { + + /* 0 0x00 '^@' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 1 0x01 '^A' */ + 0x78, /* 011110 */ + 0x84, /* 100001 */ + 0xCC, /* 110011 */ + 0x84, /* 100001 */ + 0xCC, /* 110011 */ + 0xB4, /* 101101 */ + 0x78, /* 011110 */ + 0x00, /* 000000 */ + + /* 2 0x02 '^B' */ + 0x78, /* 011110 */ + 0xFC, /* 111111 */ + 0xB4, /* 101101 */ + 0xFC, /* 111111 */ + 0xB4, /* 101101 */ + 0xCC, /* 110011 */ + 0x78, /* 011110 */ + 0x00, /* 000000 */ + + /* 3 0x03 '^C' */ + 0x00, /* 000000 */ + 0x28, /* 001010 */ + 0x7C, /* 011111 */ + 0x7C, /* 011111 */ + 0x38, /* 001110 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 4 0x04 '^D' */ + 0x00, /* 000000 */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x7C, /* 011111 */ + 0x38, /* 001110 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 5 0x05 '^E' */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x38, /* 001110 */ + 0x6C, /* 011011 */ + 0x6C, /* 011011 */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 6 0x06 '^F' */ + 0x00, /* 000000 */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x7C, /* 011111 */ + 0x7C, /* 011111 */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 7 0x07 '^G' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x30, /* 001100 */ + 0x78, /* 011110 */ + 0x30, /* 001100 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 8 0x08 '^H' */ + 0xFC, /* 111111 */ + 0xFC, /* 111111 */ + 0xCC, /* 110011 */ + 0x84, /* 100001 */ + 0xCC, /* 110011 */ + 0xFC, /* 111111 */ + 0xFC, /* 111111 */ + 0xFC, /* 111111 */ + + /* 9 0x09 '^I' */ + 0x00, /* 000000 */ + 0x30, /* 001100 */ + 0x48, /* 010010 */ + 0x84, /* 100001 */ + 0x48, /* 010010 */ + 0x30, /* 001100 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 10 0x0A '^J' */ + 0xFC, /* 111111 */ + 0xCC, /* 110011 */ + 0xB4, /* 101101 */ + 0x78, /* 011110 */ + 0xB4, /* 101101 */ + 0xCC, /* 110011 */ + 0xFC, /* 111111 */ + 0xFC, /* 111111 */ + + /* 11 0x0B '^K' */ + 0x3C, /* 001111 */ + 0x14, /* 000101 */ + 0x20, /* 001000 */ + 0x78, /* 011110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 12 0x0C '^L' */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + + /* 13 0x0D '^M' */ + 0x18, /* 000110 */ + 0x14, /* 000101 */ + 0x14, /* 000101 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x70, /* 011100 */ + 0x60, /* 011000 */ + 0x00, /* 000000 */ + + /* 14 0x0E '^N' */ + 0x3C, /* 001111 */ + 0x24, /* 001001 */ + 0x3C, /* 001111 */ + 0x24, /* 001001 */ + 0x24, /* 001001 */ + 0x6C, /* 011011 */ + 0x6C, /* 011011 */ + 0x00, /* 000000 */ + + /* 15 0x0F '^O' */ + 0x10, /* 000100 */ + 0x54, /* 010101 */ + 0x38, /* 001110 */ + 0x6C, /* 011011 */ + 0x38, /* 001110 */ + 0x54, /* 010101 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + + /* 16 0x10 '^P' */ + 0x40, /* 010000 */ + 0x60, /* 011000 */ + 0x70, /* 011100 */ + 0x78, /* 011110 */ + 0x70, /* 011100 */ + 0x60, /* 011000 */ + 0x40, /* 010000 */ + 0x00, /* 000000 */ + + /* 17 0x11 '^Q' */ + 0x04, /* 000001 */ + 0x0C, /* 000011 */ + 0x1C, /* 000111 */ + 0x3C, /* 001111 */ + 0x1C, /* 000111 */ + 0x0C, /* 000011 */ + 0x04, /* 000001 */ + 0x00, /* 000000 */ + + /* 18 0x12 '^R' */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x54, /* 010101 */ + 0x10, /* 000100 */ + 0x54, /* 010101 */ + 0x38, /* 001110 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + + /* 19 0x13 '^S' */ + 0x48, /* 010010 */ + 0x48, /* 010010 */ + 0x48, /* 010010 */ + 0x48, /* 010010 */ + 0x48, /* 010010 */ + 0x00, /* 000000 */ + 0x48, /* 010010 */ + 0x00, /* 000000 */ + + /* 20 0x14 '^T' */ + 0x3C, /* 001111 */ + 0x54, /* 010101 */ + 0x54, /* 010101 */ + 0x3C, /* 001111 */ + 0x14, /* 000101 */ + 0x14, /* 000101 */ + 0x14, /* 000101 */ + 0x00, /* 000000 */ + + /* 21 0x15 '^U' */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x30, /* 001100 */ + 0x28, /* 001010 */ + 0x14, /* 000101 */ + 0x0C, /* 000011 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + + /* 22 0x16 '^V' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0xF8, /* 111110 */ + 0xF8, /* 111110 */ + 0xF8, /* 111110 */ + 0x00, /* 000000 */ + + /* 23 0x17 '^W' */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x54, /* 010101 */ + 0x10, /* 000100 */ + 0x54, /* 010101 */ + 0x38, /* 001110 */ + 0x10, /* 000100 */ + 0x7C, /* 011111 */ + + /* 24 0x18 '^X' */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x54, /* 010101 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + + /* 25 0x19 '^Y' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x54, /* 010101 */ + 0x38, /* 001110 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + + /* 26 0x1A '^Z' */ + 0x00, /* 000000 */ + 0x10, /* 000100 */ + 0x08, /* 000010 */ + 0x7C, /* 011111 */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 27 0x1B '^[' */ + 0x00, /* 000000 */ + 0x10, /* 000100 */ + 0x20, /* 001000 */ + 0x7C, /* 011111 */ + 0x20, /* 001000 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 28 0x1C '^\' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x78, /* 011110 */ + 0x00, /* 000000 */ + + /* 29 0x1D '^]' */ + 0x00, /* 000000 */ + 0x48, /* 010010 */ + 0x84, /* 100001 */ + 0xFC, /* 111111 */ + 0x84, /* 100001 */ + 0x48, /* 010010 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 30 0x1E '^^' */ + 0x00, /* 000000 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x38, /* 001110 */ + 0x7C, /* 011111 */ + 0x7C, /* 011111 */ + 0x00, /* 000000 */ + + /* 31 0x1F '^_' */ + 0x00, /* 000000 */ + 0x7C, /* 011111 */ + 0x7C, /* 011111 */ + 0x38, /* 001110 */ + 0x38, /* 001110 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + + /* 32 0x20 ' ' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 33 0x21 '!' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + + /* 34 0x22 '"' */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 35 0x23 '#' */ + 0x00, /* 000000 */ + 0x28, /* 001010 */ + 0x7C, /* 011111 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x7C, /* 011111 */ + 0x28, /* 001010 */ + 0x00, /* 000000 */ + + /* 36 0x24 '$' */ + 0x10, /* 000000 */ + 0x38, /* 001000 */ + 0x40, /* 010000 */ + 0x30, /* 001000 */ + 0x08, /* 000000 */ + 0x70, /* 011000 */ + 0x20, /* 001000 */ + 0x00, /* 000000 */ + + /* 37 0x25 '%' */ + 0x64, /* 011001 */ + 0x64, /* 011001 */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x20, /* 001000 */ + 0x4C, /* 010011 */ + 0x4C, /* 010011 */ + 0x00, /* 000000 */ + + /* 38 0x26 '&' */ + 0x30, /* 001100 */ + 0x48, /* 010010 */ + 0x50, /* 010100 */ + 0x20, /* 001000 */ + 0x54, /* 010101 */ + 0x48, /* 010010 */ + 0x34, /* 001101 */ + 0x00, /* 000000 */ + + /* 39 0x27 ''' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 40 0x28 '(' */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x20, /* 001000 */ + 0x20, /* 001000 */ + 0x20, /* 001000 */ + 0x10, /* 000100 */ + 0x08, /* 000010 */ + 0x00, /* 000000 */ + + /* 41 0x29 ')' */ + 0x20, /* 001000 */ + 0x10, /* 000100 */ + 0x08, /* 000010 */ + 0x08, /* 000010 */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x20, /* 001000 */ + 0x00, /* 000000 */ + + /* 42 0x2A '*' */ + 0x10, /* 000100 */ + 0x54, /* 010101 */ + 0x38, /* 001110 */ + 0x54, /* 010101 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 43 0x2B '+' */ + 0x00, /* 000000 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x7C, /* 011111 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 44 0x2C ',' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x30, /* 001100 */ + 0x30, /* 001100 */ + 0x20, /* 001000 */ + + /* 45 0x2D '-' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x7C, /* 011111 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 46 0x2E '.' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x18, /* 000110 */ + 0x18, /* 000110 */ + 0x00, /* 000000 */ + + /* 47 0x2F '/' */ + 0x04, /* 000001 */ + 0x08, /* 000010 */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x20, /* 001000 */ + 0x20, /* 001000 */ + 0x40, /* 010000 */ + + /* 48 0x30 '0' */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x4C, /* 010011 */ + 0x54, /* 010101 */ + 0x64, /* 011001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 49 0x31 '1' */ + 0x10, /* 000100 */ + 0x30, /* 001100 */ + 0x50, /* 010100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x7C, /* 011111 */ + 0x00, /* 000000 */ + + /* 50 0x32 '2' */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x04, /* 000001 */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x20, /* 001000 */ + 0x7C, /* 011111 */ + 0x00, /* 000000 */ + + /* 51 0x33 '3' */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x04, /* 000001 */ + 0x18, /* 000110 */ + 0x04, /* 000001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 52 0x34 '4' */ + 0x08, /* 000010 */ + 0x18, /* 000110 */ + 0x28, /* 001010 */ + 0x48, /* 010010 */ + 0x7C, /* 011111 */ + 0x08, /* 000010 */ + 0x08, /* 000010 */ + 0x00, /* 000000 */ + + /* 53 0x35 '5' */ + 0x7C, /* 011111 */ + 0x40, /* 010000 */ + 0x78, /* 011110 */ + 0x04, /* 000001 */ + 0x04, /* 000001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 54 0x36 '6' */ + 0x18, /* 000110 */ + 0x20, /* 001000 */ + 0x40, /* 010000 */ + 0x78, /* 011110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 55 0x37 '7' */ + 0x7C, /* 011111 */ + 0x04, /* 000001 */ + 0x04, /* 000001 */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + + /* 56 0x38 '8' */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 57 0x39 '9' */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x3C, /* 001111 */ + 0x04, /* 000001 */ + 0x08, /* 000010 */ + 0x30, /* 001100 */ + 0x00, /* 000000 */ + + /* 58 0x3A ':' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x18, /* 000110 */ + 0x18, /* 000110 */ + 0x00, /* 000000 */ + 0x18, /* 000110 */ + 0x18, /* 000110 */ + 0x00, /* 000000 */ + + /* 59 0x3B ';' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x30, /* 001100 */ + 0x30, /* 001100 */ + 0x00, /* 000000 */ + 0x30, /* 001100 */ + 0x30, /* 001100 */ + 0x20, /* 001000 */ + + /* 60 0x3C '<' */ + 0x04, /* 000001 */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x20, /* 001000 */ + 0x10, /* 000100 */ + 0x08, /* 000010 */ + 0x04, /* 000001 */ + 0x00, /* 000000 */ + + /* 61 0x3D '=' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x7C, /* 011111 */ + 0x00, /* 000000 */ + 0x7C, /* 011111 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 62 0x3E '>' */ + 0x20, /* 001000 */ + 0x10, /* 000100 */ + 0x08, /* 000010 */ + 0x04, /* 000001 */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x20, /* 001000 */ + 0x00, /* 000000 */ + + /* 63 0x3F '?' */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x04, /* 000001 */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + + /* 64 0x40 '@' */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x5C, /* 010111 */ + 0x54, /* 010101 */ + 0x5C, /* 010111 */ + 0x40, /* 010000 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 65 0x41 'A' */ + 0x10, /* 000100 */ + 0x28, /* 001010 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x7C, /* 011111 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x00, /* 000000 */ + + /* 66 0x42 'B' */ + 0x78, /* 011110 */ + 0x24, /* 001001 */ + 0x24, /* 001001 */ + 0x38, /* 001110 */ + 0x24, /* 001001 */ + 0x24, /* 001001 */ + 0x78, /* 011110 */ + 0x00, /* 000000 */ + + /* 67 0x43 'C' */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 68 0x44 'D' */ + 0x78, /* 011110 */ + 0x24, /* 001001 */ + 0x24, /* 001001 */ + 0x24, /* 001001 */ + 0x24, /* 001001 */ + 0x24, /* 001001 */ + 0x78, /* 011110 */ + 0x00, /* 000000 */ + + /* 69 0x45 'E' */ + 0x7C, /* 011111 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x78, /* 011110 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x7C, /* 011111 */ + 0x00, /* 000000 */ + + /* 70 0x46 'F' */ + 0x7C, /* 011111 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x78, /* 011110 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x00, /* 000000 */ + + /* 71 0x47 'G' */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x40, /* 010000 */ + 0x5C, /* 010111 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 72 0x48 'H' */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x7C, /* 011111 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x00, /* 000000 */ + + /* 73 0x49 'I' */ + 0x38, /* 001110 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 74 0x4A 'J' */ + 0x1C, /* 000111 */ + 0x08, /* 000010 */ + 0x08, /* 000010 */ + 0x08, /* 000010 */ + 0x48, /* 010010 */ + 0x48, /* 010010 */ + 0x30, /* 001100 */ + 0x00, /* 000000 */ + + /* 75 0x4B 'K' */ + 0x44, /* 010001 */ + 0x48, /* 010010 */ + 0x50, /* 010100 */ + 0x60, /* 011000 */ + 0x50, /* 010100 */ + 0x48, /* 010010 */ + 0x44, /* 010001 */ + 0x00, /* 000000 */ + + /* 76 0x4C 'L' */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x7C, /* 011111 */ + 0x00, /* 000000 */ + + /* 77 0x4D 'M' */ + 0x44, /* 010001 */ + 0x6C, /* 011011 */ + 0x54, /* 010101 */ + 0x54, /* 010101 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x00, /* 000000 */ + + /* 78 0x4E 'N' */ + 0x44, /* 010001 */ + 0x64, /* 011001 */ + 0x54, /* 010101 */ + 0x4C, /* 010011 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x00, /* 000000 */ + + /* 79 0x4F 'O' */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 80 0x50 'P' */ + 0x78, /* 011110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x78, /* 011110 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x00, /* 000000 */ + + /* 81 0x51 'Q' */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x54, /* 010101 */ + 0x48, /* 010010 */ + 0x34, /* 001101 */ + 0x00, /* 000000 */ + + /* 82 0x52 'R' */ + 0x78, /* 011110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x78, /* 011110 */ + 0x50, /* 010100 */ + 0x48, /* 010010 */ + 0x44, /* 010001 */ + 0x00, /* 000000 */ + + /* 83 0x53 'S' */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x40, /* 010000 */ + 0x38, /* 001110 */ + 0x04, /* 000001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 84 0x54 'T' */ + 0x7C, /* 011111 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + + /* 85 0x55 'U' */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 86 0x56 'V' */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x28, /* 001010 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + + /* 87 0x57 'W' */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x54, /* 010101 */ + 0x54, /* 010101 */ + 0x6C, /* 011011 */ + 0x44, /* 010001 */ + 0x00, /* 000000 */ + + /* 88 0x58 'X' */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x28, /* 001010 */ + 0x10, /* 000100 */ + 0x28, /* 001010 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x00, /* 000000 */ + + /* 89 0x59 'Y' */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x28, /* 001010 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + + /* 90 0x5A 'Z' */ + 0x7C, /* 011111 */ + 0x04, /* 000001 */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x20, /* 001000 */ + 0x40, /* 010000 */ + 0x7C, /* 011111 */ + 0x00, /* 000000 */ + + /* 91 0x5B '[' */ + 0x18, /* 000110 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x18, /* 000110 */ + 0x00, /* 000000 */ + + /* 92 0x5C '\' */ + 0x40, /* 010000 */ + 0x20, /* 001000 */ + 0x20, /* 001000 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x08, /* 000010 */ + 0x08, /* 000010 */ + 0x04, /* 000001 */ + + /* 93 0x5D ']' */ + 0x30, /* 001100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x30, /* 001100 */ + 0x00, /* 000000 */ + + /* 94 0x5E '^' */ + 0x10, /* 000100 */ + 0x28, /* 001010 */ + 0x44, /* 010001 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 95 0x5F '_' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x7C, /* 011111 */ + + /* 96 0x60 '`' */ + 0x20, /* 001000 */ + 0x10, /* 000100 */ + 0x08, /* 000010 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 97 0x61 'a' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x04, /* 000001 */ + 0x3C, /* 001111 */ + 0x44, /* 010001 */ + 0x3C, /* 001111 */ + 0x00, /* 000000 */ + + /* 98 0x62 'b' */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x58, /* 010110 */ + 0x64, /* 011001 */ + 0x44, /* 010001 */ + 0x64, /* 011001 */ + 0x58, /* 010110 */ + 0x00, /* 000000 */ + + /* 99 0x63 'c' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x40, /* 010000 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 100 0x64 'd' */ + 0x04, /* 000001 */ + 0x04, /* 000001 */ + 0x34, /* 001101 */ + 0x4C, /* 010011 */ + 0x44, /* 010001 */ + 0x4C, /* 010011 */ + 0x34, /* 001101 */ + 0x00, /* 000000 */ + + /* 101 0x65 'e' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x7C, /* 011111 */ + 0x40, /* 010000 */ + 0x3C, /* 001111 */ + 0x00, /* 000000 */ + + /* 102 0x66 'f' */ + 0x0C, /* 000011 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + + /* 103 0x67 'g' */ + 0x00, /* 000000 */ + 0x34, /* 001101 */ + 0x4C, /* 010011 */ + 0x44, /* 010001 */ + 0x4C, /* 010011 */ + 0x34, /* 001101 */ + 0x04, /* 000001 */ + 0x38, /* 001110 */ + + /* 104 0x68 'h' */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x78, /* 011110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x00, /* 000000 */ + + /* 105 0x69 'i' */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x30, /* 001100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 106 0x6A 'j' */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x30, /* 001100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x60, /* 011000 */ + + /* 107 0x6B 'k' */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x48, /* 010010 */ + 0x50, /* 010100 */ + 0x70, /* 011100 */ + 0x48, /* 010010 */ + 0x44, /* 010001 */ + 0x00, /* 000000 */ + + /* 108 0x6C 'l' */ + 0x30, /* 001100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 109 0x6D 'm' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x68, /* 011010 */ + 0x54, /* 010101 */ + 0x54, /* 010101 */ + 0x54, /* 010101 */ + 0x54, /* 010101 */ + 0x00, /* 000000 */ + + /* 110 0x6E 'n' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x58, /* 010110 */ + 0x64, /* 011001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x00, /* 000000 */ + + /* 111 0x6F 'o' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 112 0x70 'p' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x78, /* 011110 */ + 0x44, /* 010001 */ + 0x64, /* 011001 */ + 0x58, /* 010110 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + + /* 113 0x71 'q' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x3C, /* 001111 */ + 0x44, /* 010001 */ + 0x4C, /* 010011 */ + 0x34, /* 001101 */ + 0x04, /* 000001 */ + 0x04, /* 000001 */ + + /* 114 0x72 'r' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x58, /* 010110 */ + 0x64, /* 011001 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x00, /* 000000 */ + + /* 115 0x73 's' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x3C, /* 001111 */ + 0x40, /* 010000 */ + 0x38, /* 001110 */ + 0x04, /* 000001 */ + 0x78, /* 011110 */ + 0x00, /* 000000 */ + + /* 116 0x74 't' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x0C, /* 000011 */ + 0x00, /* 000000 */ + + /* 117 0x75 'u' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x4C, /* 010011 */ + 0x34, /* 001101 */ + 0x00, /* 000000 */ + + /* 118 0x76 'v' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x28, /* 001010 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + + /* 119 0x77 'w' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x54, /* 010101 */ + 0x54, /* 010101 */ + 0x54, /* 010101 */ + 0x54, /* 010101 */ + 0x28, /* 001010 */ + 0x00, /* 000000 */ + + /* 120 0x78 'x' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x44, /* 010001 */ + 0x28, /* 001010 */ + 0x10, /* 000100 */ + 0x28, /* 001010 */ + 0x44, /* 010001 */ + 0x00, /* 000000 */ + + /* 121 0x79 'y' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x3C, /* 001111 */ + 0x04, /* 000001 */ + 0x38, /* 001110 */ + + /* 122 0x7A 'z' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x7C, /* 011111 */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x20, /* 001000 */ + 0x7C, /* 011111 */ + 0x00, /* 000000 */ + + /* 123 0x7B '{' */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x20, /* 001000 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x08, /* 000010 */ + 0x00, /* 000000 */ + + /* 124 0x7C '|' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + + /* 125 0x7D '}' */ + 0x20, /* 001000 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x20, /* 001000 */ + 0x00, /* 000000 */ + + /* 126 0x7E '~' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x20, /* 001000 */ + 0x54, /* 010101 */ + 0x08, /* 000010 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 127 0x7F '' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x10, /* 000100 */ + 0x28, /* 001010 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x7C, /* 011111 */ + 0x00, /* 000000 */ + + /* 128 0x80 '\200' */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x40, /* 010000 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x10, /* 000100 */ + 0x20, /* 001000 */ + + /* 129 0x81 '\201' */ + 0x00, /* 000000 */ + 0x28, /* 001010 */ + 0x00, /* 000000 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x4C, /* 010011 */ + 0x34, /* 001101 */ + 0x00, /* 000000 */ + + /* 130 0x82 '\202' */ + 0x18, /* 000110 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x7C, /* 011111 */ + 0x40, /* 010000 */ + 0x3C, /* 001111 */ + 0x00, /* 000000 */ + + /* 131 0x83 '\203' */ + 0x18, /* 000110 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x04, /* 000001 */ + 0x3C, /* 001111 */ + 0x44, /* 010001 */ + 0x3C, /* 001111 */ + 0x00, /* 000000 */ + + /* 132 0x84 '\204' */ + 0x28, /* 001010 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x04, /* 000001 */ + 0x3C, /* 001111 */ + 0x44, /* 010001 */ + 0x3C, /* 001111 */ + 0x00, /* 000000 */ + + /* 133 0x85 '\205' */ + 0x18, /* 000110 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x04, /* 000001 */ + 0x3C, /* 001111 */ + 0x44, /* 010001 */ + 0x3C, /* 001111 */ + 0x00, /* 000000 */ + + /* 134 0x86 '\206' */ + 0x3C, /* 001111 */ + 0x18, /* 000110 */ + 0x38, /* 001110 */ + 0x04, /* 000001 */ + 0x3C, /* 001111 */ + 0x44, /* 010001 */ + 0x3C, /* 001111 */ + 0x00, /* 000000 */ + + /* 135 0x87 '\207' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x40, /* 010000 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x10, /* 000100 */ + + /* 136 0x88 '\210' */ + 0x18, /* 000110 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x7C, /* 011111 */ + 0x40, /* 010000 */ + 0x3C, /* 001111 */ + 0x00, /* 000000 */ + + /* 137 0x89 '\211' */ + 0x28, /* 001010 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x7C, /* 011111 */ + 0x40, /* 010000 */ + 0x3C, /* 001111 */ + 0x00, /* 000000 */ + + /* 138 0x8A '\212' */ + 0x18, /* 000110 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x7C, /* 011111 */ + 0x40, /* 010000 */ + 0x3C, /* 001111 */ + 0x00, /* 000000 */ + + /* 139 0x8B '\213' */ + 0x28, /* 001010 */ + 0x00, /* 000000 */ + 0x30, /* 001100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 140 0x8C '\214' */ + 0x18, /* 000110 */ + 0x00, /* 000000 */ + 0x30, /* 001100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 141 0x8D '\215' */ + 0x18, /* 000110 */ + 0x00, /* 000000 */ + 0x30, /* 001100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 142 0x8E '\216' */ + 0x44, /* 010001 */ + 0x10, /* 000100 */ + 0x28, /* 001010 */ + 0x44, /* 010001 */ + 0x7C, /* 011111 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x00, /* 000000 */ + + /* 143 0x8F '\217' */ + 0x30, /* 001100 */ + 0x48, /* 010010 */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x7C, /* 011111 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x00, /* 000000 */ + + /* 144 0x90 '\220' */ + 0x10, /* 000100 */ + 0x7C, /* 011111 */ + 0x40, /* 010000 */ + 0x78, /* 011110 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x7C, /* 011111 */ + 0x00, /* 000000 */ + + /* 145 0x91 '\221' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x78, /* 011110 */ + 0x14, /* 000101 */ + 0x7C, /* 011111 */ + 0x50, /* 010100 */ + 0x3C, /* 001111 */ + 0x00, /* 000000 */ + + /* 146 0x92 '\222' */ + 0x3C, /* 001111 */ + 0x50, /* 010100 */ + 0x50, /* 010100 */ + 0x78, /* 011110 */ + 0x50, /* 010100 */ + 0x50, /* 010100 */ + 0x5C, /* 010111 */ + 0x00, /* 000000 */ + + /* 147 0x93 '\223' */ + 0x18, /* 000110 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 148 0x94 '\224' */ + 0x28, /* 001010 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 149 0x95 '\225' */ + 0x18, /* 000110 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 150 0x96 '\226' */ + 0x10, /* 000100 */ + 0x28, /* 001010 */ + 0x00, /* 000000 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x4C, /* 010011 */ + 0x34, /* 001101 */ + 0x00, /* 000000 */ + + /* 151 0x97 '\227' */ + 0x20, /* 001000 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x4C, /* 010011 */ + 0x34, /* 001101 */ + 0x00, /* 000000 */ + + /* 152 0x98 '\230' */ + 0x28, /* 001010 */ + 0x00, /* 000000 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x3C, /* 001111 */ + 0x04, /* 000001 */ + 0x38, /* 001110 */ + + /* 153 0x99 '\231' */ + 0x84, /* 100001 */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 154 0x9A '\232' */ + 0x88, /* 100010 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 155 0x9B '\233' */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x54, /* 010101 */ + 0x50, /* 010100 */ + 0x54, /* 010101 */ + 0x38, /* 001110 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + + /* 156 0x9C '\234' */ + 0x30, /* 001100 */ + 0x48, /* 010010 */ + 0x40, /* 010000 */ + 0x70, /* 011100 */ + 0x40, /* 010000 */ + 0x44, /* 010001 */ + 0x78, /* 011110 */ + 0x00, /* 000000 */ + + /* 157 0x9D '\235' */ + 0x44, /* 010001 */ + 0x28, /* 001010 */ + 0x7C, /* 011111 */ + 0x10, /* 000100 */ + 0x7C, /* 011111 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + + /* 158 0x9E '\236' */ + 0x70, /* 011100 */ + 0x48, /* 010010 */ + 0x70, /* 011100 */ + 0x48, /* 010010 */ + 0x5C, /* 010111 */ + 0x48, /* 010010 */ + 0x44, /* 010001 */ + 0x00, /* 000000 */ + + /* 159 0x9F '\237' */ + 0x0C, /* 000011 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x60, /* 011000 */ + 0x00, /* 000000 */ + + /* 160 0xA0 '\240' */ + 0x18, /* 000110 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x04, /* 000001 */ + 0x3C, /* 001111 */ + 0x44, /* 010001 */ + 0x3C, /* 001111 */ + 0x00, /* 000000 */ + + /* 161 0xA1 '\241' */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x30, /* 001100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 162 0xA2 '\242' */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 163 0xA3 '\243' */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x4C, /* 010011 */ + 0x34, /* 001101 */ + 0x00, /* 000000 */ + + /* 164 0xA4 '\244' */ + 0x34, /* 001101 */ + 0x58, /* 010110 */ + 0x00, /* 000000 */ + 0x58, /* 010110 */ + 0x64, /* 011001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x00, /* 000000 */ + + /* 165 0xA5 '\245' */ + 0x58, /* 010110 */ + 0x44, /* 010001 */ + 0x64, /* 011001 */ + 0x54, /* 010101 */ + 0x4C, /* 010011 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x00, /* 000000 */ + + /* 166 0xA6 '\246' */ + 0x38, /* 001110 */ + 0x04, /* 000001 */ + 0x3C, /* 001111 */ + 0x44, /* 010001 */ + 0x3C, /* 001111 */ + 0x00, /* 000000 */ + 0x7C, /* 011111 */ + 0x00, /* 000000 */ + + /* 167 0xA7 '\247' */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + 0x7C, /* 011111 */ + 0x00, /* 000000 */ + + /* 168 0xA8 '\250' */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x10, /* 000100 */ + 0x20, /* 001000 */ + 0x40, /* 010000 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 169 0xA9 '\251' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x7C, /* 011111 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 170 0xAA '\252' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x7C, /* 011111 */ + 0x04, /* 000001 */ + 0x04, /* 000001 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 171 0xAB '\253' */ + 0x20, /* 001000 */ + 0x24, /* 001001 */ + 0x28, /* 001010 */ + 0x10, /* 000100 */ + 0x28, /* 001010 */ + 0x44, /* 010001 */ + 0x08, /* 000010 */ + 0x1C, /* 000111 */ + + /* 172 0xAC '\254' */ + 0x20, /* 001000 */ + 0x24, /* 001001 */ + 0x28, /* 001010 */ + 0x10, /* 000100 */ + 0x28, /* 001010 */ + 0x58, /* 010110 */ + 0x3C, /* 001111 */ + 0x08, /* 000010 */ + + /* 173 0xAD '\255' */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + + /* 174 0xAE '\256' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x24, /* 001001 */ + 0x48, /* 010010 */ + 0x90, /* 100100 */ + 0x48, /* 010010 */ + 0x24, /* 001001 */ + 0x00, /* 000000 */ + + /* 175 0xAF '\257' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x90, /* 100100 */ + 0x48, /* 010010 */ + 0x24, /* 001001 */ + 0x48, /* 010010 */ + 0x90, /* 100100 */ + 0x00, /* 000000 */ + + /* 176 0xB0 '\260' */ + 0x10, /* 000100 */ + 0x44, /* 010001 */ + 0x10, /* 000100 */ + 0x44, /* 010001 */ + 0x10, /* 000100 */ + 0x44, /* 010001 */ + 0x10, /* 000100 */ + 0x44, /* 010001 */ + + /* 177 0xB1 '\261' */ + 0xA8, /* 101010 */ + 0x54, /* 010101 */ + 0xA8, /* 101010 */ + 0x54, /* 010101 */ + 0xA8, /* 101010 */ + 0x54, /* 010101 */ + 0xA8, /* 101010 */ + 0x54, /* 010101 */ + + /* 178 0xB2 '\262' */ + 0xDC, /* 110111 */ + 0x74, /* 011101 */ + 0xDC, /* 110111 */ + 0x74, /* 011101 */ + 0xDC, /* 110111 */ + 0x74, /* 011101 */ + 0xDC, /* 110111 */ + 0x74, /* 011101 */ + + /* 179 0xB3 '\263' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + + /* 180 0xB4 '\264' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0xF0, /* 111100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + + /* 181 0xB5 '\265' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0xF0, /* 111100 */ + 0x10, /* 000100 */ + 0xF0, /* 111100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + + /* 182 0xB6 '\266' */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0xE8, /* 111010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + + /* 183 0xB7 '\267' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0xF8, /* 111110 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + + /* 184 0xB8 '\270' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0xF0, /* 111100 */ + 0x10, /* 000100 */ + 0xF0, /* 111100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + + /* 185 0xB9 '\271' */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0xE8, /* 111010 */ + 0x08, /* 000010 */ + 0xE8, /* 111010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + + /* 186 0xBA '\272' */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + + /* 187 0xBB '\273' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0xF8, /* 111110 */ + 0x08, /* 000010 */ + 0xE8, /* 111010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + + /* 188 0xBC '\274' */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0xE8, /* 111010 */ + 0x08, /* 000010 */ + 0xF8, /* 111110 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 189 0xBD '\275' */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0xF8, /* 111110 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 190 0xBE '\276' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0xF0, /* 111100 */ + 0x10, /* 000100 */ + 0xF0, /* 111100 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 191 0xBF '\277' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0xF0, /* 111100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + + /* 192 0xC0 '\300' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x1C, /* 000111 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 193 0xC1 '\301' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0xFC, /* 111111 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 194 0xC2 '\302' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0xFC, /* 111111 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + + /* 195 0xC3 '\303' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x1C, /* 000111 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + + /* 196 0xC4 '\304' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0xFC, /* 111111 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 197 0xC5 '\305' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0xFC, /* 111111 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + + /* 198 0xC6 '\306' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x1C, /* 000111 */ + 0x10, /* 000100 */ + 0x1C, /* 000111 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + + /* 199 0xC7 '\307' */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x2C, /* 001011 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + + /* 200 0xC8 '\310' */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x2C, /* 001011 */ + 0x20, /* 001000 */ + 0x3C, /* 001111 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 201 0xC9 '\311' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x3C, /* 001111 */ + 0x20, /* 001000 */ + 0x2C, /* 001011 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + + /* 202 0xCA '\312' */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0xEC, /* 111011 */ + 0x00, /* 000000 */ + 0xFC, /* 111111 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 203 0xCB '\313' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0xFC, /* 111111 */ + 0x00, /* 000000 */ + 0xEC, /* 111011 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + + /* 204 0xCC '\314' */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x2C, /* 001011 */ + 0x20, /* 001000 */ + 0x2C, /* 001011 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + + /* 205 0xCD '\315' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0xFC, /* 111111 */ + 0x00, /* 000000 */ + 0xFC, /* 111111 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 206 0xCE '\316' */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0xEC, /* 111011 */ + 0x00, /* 000000 */ + 0xEC, /* 111011 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + + /* 207 0xCF '\317' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0xFC, /* 111111 */ + 0x00, /* 000000 */ + 0xFC, /* 111111 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 208 0xD0 '\320' */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0xFC, /* 111111 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 209 0xD1 '\321' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0xFC, /* 111111 */ + 0x00, /* 000000 */ + 0xFC, /* 111111 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + + /* 210 0xD2 '\322' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0xFC, /* 111111 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + + /* 211 0xD3 '\323' */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x3C, /* 001111 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 212 0xD4 '\324' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x1C, /* 000111 */ + 0x10, /* 000100 */ + 0x1C, /* 000111 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 213 0xD5 '\325' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x1C, /* 000111 */ + 0x10, /* 000100 */ + 0x1C, /* 000111 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + + /* 214 0xD6 '\326' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x3C, /* 001111 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + + /* 215 0xD7 '\327' */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0xFC, /* 111111 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + + /* 216 0xD8 '\330' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0xFC, /* 111111 */ + 0x10, /* 000100 */ + 0xFC, /* 111111 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + + /* 217 0xD9 '\331' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0xF0, /* 111100 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 218 0xDA '\332' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x1C, /* 000111 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + + /* 219 0xDB '\333' */ + 0xFC, /* 111111 */ + 0xFC, /* 111111 */ + 0xFC, /* 111111 */ + 0xFC, /* 111111 */ + 0xFC, /* 111111 */ + 0xFC, /* 111111 */ + 0xFC, /* 111111 */ + 0xFC, /* 111111 */ + + /* 220 0xDC '\334' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0xFC, /* 111111 */ + 0xFC, /* 111111 */ + 0xFC, /* 111111 */ + 0xFC, /* 111111 */ + + /* 221 0xDD '\335' */ + 0xE0, /* 111000 */ + 0xE0, /* 111000 */ + 0xE0, /* 111000 */ + 0xE0, /* 111000 */ + 0xE0, /* 111000 */ + 0xE0, /* 111000 */ + 0xE0, /* 111000 */ + 0xE0, /* 111000 */ + + /* 222 0xDE '\336' */ + 0x1C, /* 000111 */ + 0x1C, /* 000111 */ + 0x1C, /* 000111 */ + 0x1C, /* 000111 */ + 0x1C, /* 000111 */ + 0x1C, /* 000111 */ + 0x1C, /* 000111 */ + 0x1C, /* 000111 */ + + /* 223 0xDF '\337' */ + 0xFC, /* 111111 */ + 0xFC, /* 111111 */ + 0xFC, /* 111111 */ + 0xFC, /* 111111 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 224 0xE0 '\340' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x34, /* 001101 */ + 0x48, /* 010010 */ + 0x48, /* 010010 */ + 0x48, /* 010010 */ + 0x34, /* 001101 */ + 0x00, /* 000000 */ + + /* 225 0xE1 '\341' */ + 0x24, /* 001001 */ + 0x44, /* 010001 */ + 0x48, /* 010010 */ + 0x48, /* 010010 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x58, /* 010110 */ + 0x40, /* 010000 */ + + /* 226 0xE2 '\342' */ + 0x7C, /* 011111 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x00, /* 000000 */ + + /* 227 0xE3 '\343' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x7C, /* 011111 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x28, /* 001010 */ + 0x00, /* 000000 */ + + /* 228 0xE4 '\344' */ + 0x7C, /* 011111 */ + 0x24, /* 001001 */ + 0x10, /* 000100 */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x24, /* 001001 */ + 0x7C, /* 011111 */ + 0x00, /* 000000 */ + + /* 229 0xE5 '\345' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x3C, /* 001111 */ + 0x48, /* 010010 */ + 0x48, /* 010010 */ + 0x48, /* 010010 */ + 0x30, /* 001100 */ + 0x00, /* 000000 */ + + /* 230 0xE6 '\346' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x48, /* 010010 */ + 0x48, /* 010010 */ + 0x48, /* 010010 */ + 0x48, /* 010010 */ + 0x74, /* 011101 */ + 0x40, /* 010000 */ + + /* 231 0xE7 '\347' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x7C, /* 011111 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x0C, /* 000011 */ + 0x00, /* 000000 */ + + /* 232 0xE8 '\350' */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 233 0xE9 '\351' */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x7C, /* 011111 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 234 0xEA '\352' */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x28, /* 001010 */ + 0x6C, /* 011011 */ + 0x00, /* 000000 */ + + /* 235 0xEB '\353' */ + 0x18, /* 000110 */ + 0x20, /* 001000 */ + 0x18, /* 000110 */ + 0x24, /* 001001 */ + 0x24, /* 001001 */ + 0x24, /* 001001 */ + 0x18, /* 000110 */ + 0x00, /* 000000 */ + + /* 236 0xEC '\354' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x54, /* 010101 */ + 0x54, /* 010101 */ + 0x54, /* 010101 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 237 0xED '\355' */ + 0x00, /* 000000 */ + 0x04, /* 000001 */ + 0x38, /* 001110 */ + 0x54, /* 010101 */ + 0x54, /* 010101 */ + 0x38, /* 001110 */ + 0x40, /* 010000 */ + 0x00, /* 000000 */ + + /* 238 0xEE '\356' */ + 0x3C, /* 001111 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x38, /* 001110 */ + 0x40, /* 010000 */ + 0x40, /* 010000 */ + 0x3C, /* 001111 */ + 0x00, /* 000000 */ + + /* 239 0xEF '\357' */ + 0x38, /* 001110 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x44, /* 010001 */ + 0x00, /* 000000 */ + + /* 240 0xF0 '\360' */ + 0x00, /* 000000 */ + 0xFC, /* 111111 */ + 0x00, /* 000000 */ + 0xFC, /* 111111 */ + 0x00, /* 000000 */ + 0xFC, /* 111111 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 241 0xF1 '\361' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x7C, /* 011111 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x7C, /* 011111 */ + 0x00, /* 000000 */ + + /* 242 0xF2 '\362' */ + 0x20, /* 001000 */ + 0x10, /* 000100 */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x20, /* 001000 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 243 0xF3 '\363' */ + 0x08, /* 000010 */ + 0x10, /* 000100 */ + 0x20, /* 001000 */ + 0x10, /* 000100 */ + 0x08, /* 000010 */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 244 0xF4 '\364' */ + 0x0C, /* 000011 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + + /* 245 0xF5 '\365' */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x10, /* 000100 */ + 0x60, /* 011000 */ + + /* 246 0xF6 '\366' */ + 0x00, /* 000000 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x7C, /* 011111 */ + 0x00, /* 000000 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 247 0xF7 '\367' */ + 0x00, /* 000000 */ + 0x20, /* 001000 */ + 0x54, /* 010101 */ + 0x08, /* 000010 */ + 0x20, /* 001000 */ + 0x54, /* 010101 */ + 0x08, /* 000010 */ + 0x00, /* 000000 */ + + /* 248 0xF8 '\370' */ + 0x30, /* 001100 */ + 0x48, /* 010010 */ + 0x48, /* 010010 */ + 0x30, /* 001100 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 249 0xF9 '\371' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x10, /* 000100 */ + 0x38, /* 001110 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 250 0xFA '\372' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x10, /* 000100 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 251 0xFB '\373' */ + 0x04, /* 000001 */ + 0x08, /* 000010 */ + 0x08, /* 000010 */ + 0x50, /* 010100 */ + 0x50, /* 010100 */ + 0x20, /* 001000 */ + 0x20, /* 001000 */ + 0x00, /* 000000 */ + + /* 252 0xFC '\374' */ + 0x60, /* 011000 */ + 0x50, /* 010100 */ + 0x50, /* 010100 */ + 0x50, /* 010100 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 253 0xFD '\375' */ + 0x60, /* 011000 */ + 0x10, /* 000100 */ + 0x20, /* 001000 */ + 0x70, /* 011100 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + + /* 254 0xFE '\376' */ + 0x00, /* 000000 */ + 0x38, /* 001110 */ + 0x38, /* 001110 */ + 0x38, /* 001110 */ + 0x38, /* 001110 */ + 0x38, /* 001110 */ + 0x38, /* 001110 */ + 0x00, /* 000000 */ + + /* 255 0xFF '\377' */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ + 0x00, /* 000000 */ +}; + +const struct font_desc font_6x8 = { + .idx = FONT6x8_IDX, + .name = "6x8", + .width = 6, + .height = 8, + .data = fontdata_6x8, + .pref = 0, +}; diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c index e7258d8c252b..5f4b07b56cd9 100644 --- a/lib/fonts/fonts.c +++ b/lib/fonts/fonts.c @@ -57,6 +57,9 @@ static const struct font_desc *fonts[] = { #ifdef CONFIG_FONT_TER16x32 &font_ter_16x32, #endif +#ifdef CONFIG_FONT_6x8 + &font_6x8, +#endif }; #define num_fonts ARRAY_SIZE(fonts) diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c index 3cc5e5921682..e03068917273 100644 --- a/samples/vfio-mdev/mbochs.c +++ b/samples/vfio-mdev/mbochs.c @@ -846,7 +846,7 @@ static struct sg_table *mbochs_map_dmabuf(struct dma_buf_attachment *at, if (sg_alloc_table_from_pages(sg, dmabuf->pages, dmabuf->pagecount, 0, dmabuf->mode.size, GFP_KERNEL) < 0) goto err2; - if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction)) + if (dma_map_sgtable(at->dev, sg, direction, 0)) goto err3; return sg; @@ -868,6 +868,7 @@ static void mbochs_unmap_dmabuf(struct dma_buf_attachment *at, dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id); + dma_unmap_sgtable(at->dev, sg, direction, 0); sg_free_table(sg); kfree(sg); } |