summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2022-11-15 17:29:49 +1000
committerDave Airlie <airlied@redhat.com>2022-11-16 07:17:32 +1000
commit4e291f2f585313efa5200cce655e17c94906e50a (patch)
treea35bc4aa5e84ce6ae0df1b43ca431f6cd8f38997
parent3076e09f36a58d796d7fea0c92a5011420228404 (diff)
parent6b818c533dd8615a803a72733eace58fd06e5a3c (diff)
downloadlinux-4e291f2f585313efa5200cce655e17c94906e50a.tar.bz2
Merge tag 'drm-misc-next-2022-11-10-1' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 6.2: UAPI Changes: Cross-subsystem Changes: Core Changes: - atomic-helper: Add begin_fb_access and end_fb_access hooks - fb-helper: Rework to move fb emulation into helpers - scheduler: rework entity flush, kill and fini - ttm: Optimize pool allocations Driver Changes: - amdgpu: scheduler rework - hdlcd: Switch to DRM-managed resources - ingenic: Fix registration error path - lcdif: FIFO threshold tuning - meson: Fix return type of cvbs' mode_valid - ofdrm: multiple fixes (kconfig, types, endianness) - sun4i: A100 and D1 support - panel: - New Panel: Jadard JD9365DA-H3 Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maxime Ripard <maxime@cerno.tech> Link: https://patchwork.freedesktop.org/patch/msgid/20221110083612.g63eaocoaa554soh@houat
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml30
-rw-r--r--Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml70
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml4
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst3
-rw-r--r--MAINTAINERS8
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c2
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c25
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c43
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.h2
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c2
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c6
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c1
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c2
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c2
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c34
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c1006
-rw-r--r--drivers/gpu/drm/drm_fbdev_generic.c494
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c66
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c1
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c1
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c26
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c6
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c2
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c6
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c2
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c2
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c8
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c3
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c2
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c2
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c1
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c2
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c9
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c2
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c1
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_drm.c2
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_mode.c2
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c2
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h1
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c4
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c2
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_kms.c16
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_regs.h1
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c613
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c6
-rw-r--r--drivers/gpu/drm/panel/Kconfig10
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c3
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c473
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c1
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c196
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c75
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c2
-rw-r--r--drivers/gpu/drm/stm/drv.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c61
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h7
-rw-r--r--drivers/gpu/drm/tegra/fb.c8
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c2
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c2
-rw-r--r--drivers/gpu/drm/tiny/Kconfig2
-rw-r--r--drivers/gpu/drm/tiny/arcpgu.c2
-rw-r--r--drivers/gpu/drm/tiny/bochs.c2
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c2
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c2
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c2
-rw-r--r--drivers/gpu/drm/tiny/ili9163.c2
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c2
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c2
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c2
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c2
-rw-r--r--drivers/gpu/drm/tiny/ofdrm.c29
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c2
-rw-r--r--drivers/gpu/drm/tiny/repaper.c2
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c2
-rw-r--r--drivers/gpu/drm/tiny/st7586.c2
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c82
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c1
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c4
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h1
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c2
-rw-r--r--include/drm/drm_fb_helper.h68
-rw-r--r--include/drm/drm_fbdev_generic.h15
-rw-r--r--include/drm/drm_gem_atomic_helper.h20
-rw-r--r--include/drm/drm_modeset_helper_vtables.h41
-rw-r--r--include/drm/drm_simple_kms_helper.h20
-rw-r--r--include/drm/gpu_scheduler.h20
154 files changed, 2921 insertions, 1381 deletions
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
index 7910831fa4b8..c731fbdc2fe0 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
@@ -12,9 +12,14 @@ maintainers:
properties:
compatible:
- enum:
- - allwinner,sun6i-a31-mipi-dsi
- - allwinner,sun50i-a64-mipi-dsi
+ oneOf:
+ - enum:
+ - allwinner,sun6i-a31-mipi-dsi
+ - allwinner,sun50i-a64-mipi-dsi
+ - allwinner,sun50i-a100-mipi-dsi
+ - items:
+ - const: allwinner,sun20i-d1-mipi-dsi
+ - const: allwinner,sun50i-a100-mipi-dsi
reg:
maxItems: 1
@@ -59,7 +64,6 @@ required:
- phys
- phy-names
- resets
- - vcc-dsi-supply
- port
allOf:
@@ -68,7 +72,9 @@ allOf:
properties:
compatible:
contains:
- const: allwinner,sun6i-a31-mipi-dsi
+ enum:
+ - allwinner,sun6i-a31-mipi-dsi
+ - allwinner,sun50i-a100-mipi-dsi
then:
properties:
@@ -78,16 +84,22 @@ allOf:
required:
- clock-names
+ else:
+ properties:
+ clocks:
+ maxItems: 1
+
- if:
properties:
compatible:
contains:
- const: allwinner,sun50i-a64-mipi-dsi
+ enum:
+ - allwinner,sun6i-a31-mipi-dsi
+ - allwinner,sun50i-a64-mipi-dsi
then:
- properties:
- clocks:
- minItems: 1
+ required:
+ - vcc-dsi-supply
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml b/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
new file mode 100644
index 000000000000..c06902e4fe70
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/jadard,jd9365da-h3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Jadard JD9365DA-HE WXGA DSI panel
+
+maintainers:
+ - Jagan Teki <jagan@edgeble.ai>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - chongzhou,cz101b4001
+ - const: jadard,jd9365da-h3
+
+ reg: true
+
+ vdd-supply:
+ description: supply regulator for VDD, usually 3.3V
+
+ vccio-supply:
+ description: supply regulator for VCCIO, usually 1.8V
+
+ reset-gpios: true
+
+ backlight: true
+
+ port: true
+
+required:
+ - compatible
+ - reg
+ - vdd-supply
+ - vccio-supply
+ - reset-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/pinctrl/rockchip.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "chongzhou,cz101b4001", "jadard,jd9365da-h3";
+ reg = <0>;
+ vdd-supply = <&lcd_3v3>;
+ vccio-supply = <&vcca_1v8>;
+ reset-gpios = <&gpio1 RK_PC2 GPIO_ACTIVE_HIGH>;
+ backlight = <&backlight>;
+
+ port {
+ mipi_in_panel: endpoint {
+ remote-endpoint = <&mipi_out_panel>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 6e323a380294..c2c615cb0f48 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -246,6 +246,8 @@ patternProperties:
description: ChipOne
"^chipspark,.*":
description: ChipSPARK
+ "^chongzhou,.*":
+ description: Shenzhen Chongzhou Electronic Technology Co., Ltd
"^chrontel,.*":
description: Chrontel, Inc.
"^chrp,.*":
@@ -639,6 +641,8 @@ patternProperties:
description: ITian Corporation
"^iwave,.*":
description: iWave Systems Technologies Pvt. Ltd.
+ "^jadard,.*":
+ description: Jadard Technology Inc.
"^jdi,.*":
description: Japan Display Inc.
"^jedec,.*":
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index dbc85fd7a971..a4860ffd6e86 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -116,6 +116,9 @@ fbdev Helper Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_fb_helper.c
:export:
+.. kernel-doc:: drivers/gpu/drm/drm_fbdev_generic.c
+ :export:
+
format Helper Functions Reference
=================================
diff --git a/MAINTAINERS b/MAINTAINERS
index 30e3df70daec..178a7a383b5f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6503,6 +6503,12 @@ S: Orphan / Obsolete
F: drivers/gpu/drm/i810/
F: include/uapi/drm/i810_drm.h
+DRM DRIVER FOR JADARD JD9365DA-H3 MIPI-DSI LCD PANELS
+M: Jagan Teki <jagan@edgeble.ai>
+S: Maintained
+F: Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
+F: drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+
DRM DRIVER FOR LOGICVC DISPLAY CONTROLLER
M: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
S: Supported
@@ -7113,7 +7119,7 @@ F: drivers/gpu/drm/ttm/
F: include/drm/ttm/
DRM GPU SCHEDULER
-M: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+M: Luben Tuikov <luben.tuikov@amd.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 6e55c47288e4..c44a54cadb61 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -117,7 +117,9 @@ drm_kms_helper-y := \
drm_self_refresh_helper.o \
drm_simple_kms_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
-drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
+drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += \
+ drm_fbdev_generic.o \
+ drm_fb_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
#
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 8816853e50c0..f99d4873bf22 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -673,7 +673,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
goto err;
}
- ret = amdgpu_job_alloc(adev, 1, &job, NULL);
+ ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 491d4846fc02..e1320edfc527 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -26,7 +26,6 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 1bbd39b3b0fc..0528c2b1db6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -291,12 +291,8 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
return -EINVAL;
for (i = 0; i < p->gang_size; ++i) {
- ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm);
- if (ret)
- goto free_all_kdata;
-
- ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i],
- &fpriv->vm);
+ ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
+ num_ibs[i], &p->jobs[i]);
if (ret)
goto free_all_kdata;
}
@@ -430,7 +426,7 @@ static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
dma_fence_put(old);
}
- r = amdgpu_sync_fence(&p->gang_leader->sync, fence);
+ r = amdgpu_sync_fence(&p->sync, fence);
dma_fence_put(fence);
if (r)
return r;
@@ -452,9 +448,20 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
return r;
}
- r = amdgpu_sync_fence(&p->gang_leader->sync, fence);
- dma_fence_put(fence);
+ r = amdgpu_sync_fence(&p->sync, fence);
+ if (r)
+ goto error;
+ /*
+ * When we have an explicit dependency it might be necessary to insert a
+ * pipeline sync to make sure that all caches etc are flushed and the
+ * next job actually sees the results from the previous one.
+ */
+ if (fence->context == p->gang_leader->base.entity->fence_context)
+ r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
+
+error:
+ dma_fence_put(fence);
return r;
}
@@ -1101,7 +1108,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&job->sync, fpriv->prt_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update);
if (r)
return r;
@@ -1112,7 +1119,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
if (r)
return r;
}
@@ -1131,7 +1138,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
if (r)
return r;
}
@@ -1144,7 +1151,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&job->sync, vm->last_update);
+ r = amdgpu_sync_fence(&p->sync, vm->last_update);
if (r)
return r;
@@ -1176,7 +1183,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- struct amdgpu_job *leader = p->gang_leader;
struct amdgpu_bo_list_entry *e;
unsigned int i;
int r;
@@ -1188,14 +1194,14 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
sync_mode = amdgpu_bo_explicit_sync(bo) ?
AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
- r = amdgpu_sync_resv(p->adev, &leader->sync, resv, sync_mode,
+ r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode,
&fpriv->vm);
if (r)
return r;
}
- for (i = 0; i < p->gang_size - 1; ++i) {
- r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync);
+ for (i = 0; i < p->gang_size; ++i) {
+ r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]);
if (r)
return r;
}
@@ -1241,7 +1247,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct dma_fence *fence;
fence = &p->jobs[i]->base.s_fence->scheduled;
- r = amdgpu_sync_fence(&leader->sync, fence);
+ r = drm_sched_job_add_dependency(&leader->base, fence);
if (r)
goto error_cleanup;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
index cbaa19b2b8a3..207e801c24ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
@@ -75,6 +75,8 @@ struct amdgpu_cs_parser {
unsigned num_post_deps;
struct amdgpu_cs_post_dep *post_deps;
+
+ struct amdgpu_sync sync;
};
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 5b9f992e4607..ad4e78728733 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -37,6 +37,7 @@
#include <linux/pci-p2pdma.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include <linux/vgaarb.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 1a06b8d724f3..311a8ea6f065 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -39,8 +39,8 @@
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_vblank.h>
@@ -1214,7 +1214,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
const struct drm_mode_config_funcs amdgpu_mode_funcs = {
.fb_create = amdgpu_display_user_framebuffer_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
};
static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index bf2d50c8c92a..6b35bb948f96 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -25,6 +25,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem.h>
#include <drm/drm_vblank.h>
#include <drm/drm_managed.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 258cffe3c06a..774c77bb8f4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -182,7 +182,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
need_ctx_switch = ring->current_ctx != fence_ctx;
if (ring->funcs->emit_pipeline_sync && job &&
- ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) ||
+ ((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) ||
(amdgpu_sriov_vf(adev) && need_ctx_switch) ||
amdgpu_vm_need_pipeline_sync(ring, job))) {
need_pipe_sync = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 03d115d2b5ed..2a9a2593dc18 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -170,26 +170,27 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
- * @sync: sync object where we add dependencies
* @idle: resulting idle VMID
+ * @fence: fence to wait for if no id could be grabbed
*
* Try to find an idle VMID, if none is idle add a fence to wait to the sync
* object. Returns -ENOMEM when we are out of memory.
*/
static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
struct amdgpu_ring *ring,
- struct amdgpu_sync *sync,
- struct amdgpu_vmid **idle)
+ struct amdgpu_vmid **idle,
+ struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct dma_fence **fences;
unsigned i;
- int r;
- if (!dma_fence_is_signaled(ring->vmid_wait))
- return amdgpu_sync_fence(sync, ring->vmid_wait);
+ if (!dma_fence_is_signaled(ring->vmid_wait)) {
+ *fence = dma_fence_get(ring->vmid_wait);
+ return 0;
+ }
fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
if (!fences)
@@ -228,10 +229,10 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
return -ENOMEM;
}
- r = amdgpu_sync_fence(sync, &array->base);
+ *fence = dma_fence_get(&array->base);
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = &array->base;
- return r;
+ return 0;
}
kfree(fences);
@@ -243,19 +244,17 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
- * @sync: sync object where we add dependencies
- * @fence: fence protecting ID from reuse
* @job: job who wants to use the VMID
* @id: resulting VMID
+ * @fence: fence to wait for if no id could be grabbed
*
* Try to assign a reserved VMID.
*/
static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
struct amdgpu_ring *ring,
- struct amdgpu_sync *sync,
- struct dma_fence *fence,
struct amdgpu_job *job,
- struct amdgpu_vmid **id)
+ struct amdgpu_vmid **id,
+ struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
@@ -282,7 +281,8 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
if (tmp) {
*id = NULL;
- return amdgpu_sync_fence(sync, tmp);
+ *fence = dma_fence_get(tmp);
+ return 0;
}
needs_flush = true;
}
@@ -290,7 +290,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
/* Good we can use this VMID. Remember this submission as
* user of the VMID.
*/
- r = amdgpu_sync_fence(&(*id)->active, fence);
+ r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished);
if (r)
return r;
@@ -304,19 +304,17 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
- * @sync: sync object where we add dependencies
- * @fence: fence protecting ID from reuse
* @job: job who wants to use the VMID
* @id: resulting VMID
+ * @fence: fence to wait for if no id could be grabbed
*
* Try to reuse a VMID for this submission.
*/
static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct amdgpu_ring *ring,
- struct amdgpu_sync *sync,
- struct dma_fence *fence,
struct amdgpu_job *job,
- struct amdgpu_vmid **id)
+ struct amdgpu_vmid **id,
+ struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
@@ -352,7 +350,8 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
/* Good, we can use this VMID. Remember this submission as
* user of the VMID.
*/
- r = amdgpu_sync_fence(&(*id)->active, fence);
+ r = amdgpu_sync_fence(&(*id)->active,
+ &job->base.s_fence->finished);
if (r)
return r;
@@ -370,15 +369,13 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
- * @sync: sync object where we add dependencies
- * @fence: fence protecting ID from reuse
* @job: job who wants to use the VMID
+ * @fence: fence to wait for if no id could be grabbed
*
* Allocate an id for the vm, adding fences to the sync obj as necessary.
*/
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync, struct dma_fence *fence,
- struct amdgpu_job *job)
+ struct amdgpu_job *job, struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
@@ -388,16 +385,16 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
int r = 0;
mutex_lock(&id_mgr->lock);
- r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
+ r = amdgpu_vmid_grab_idle(vm, ring, &idle, fence);
if (r || !idle)
goto error;
if (vm->reserved_vmid[vmhub]) {
- r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
+ r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
if (r || !id)
goto error;
} else {
- r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
+ r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence);
if (r)
goto error;
@@ -406,7 +403,8 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
id = idle;
/* Remember this submission as user of the VMID */
- r = amdgpu_sync_fence(&id->active, fence);
+ r = amdgpu_sync_fence(&id->active,
+ &job->base.s_fence->finished);
if (r)
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 06c8a0034fa5..57efe61dceed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -84,8 +84,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
unsigned vmhub);
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync, struct dma_fence *fence,
- struct amdgpu_job *job);
+ struct amdgpu_job *job, struct dma_fence **fence);
void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
unsigned vmid);
void amdgpu_vmid_reset_all(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index cd968e781077..032651a655f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -88,8 +88,9 @@ exit:
return DRM_GPU_SCHED_STAT_NOMINAL;
}
-int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
- struct amdgpu_job **job, struct amdgpu_vm *vm)
+int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct drm_sched_entity *entity, void *owner,
+ unsigned int num_ibs, struct amdgpu_job **job)
{
if (num_ibs == 0)
return -EINVAL;
@@ -105,28 +106,34 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
(*job)->base.sched = &adev->rings[0]->sched;
(*job)->vm = vm;
- amdgpu_sync_create(&(*job)->sync);
- amdgpu_sync_create(&(*job)->sched_sync);
+ amdgpu_sync_create(&(*job)->explicit_sync);
(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
(*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
- return 0;
+ if (!entity)
+ return 0;
+
+ return drm_sched_job_init(&(*job)->base, entity, owner);
}
-int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
- enum amdgpu_ib_pool_type pool_type,
- struct amdgpu_job **job)
+int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
+ struct drm_sched_entity *entity, void *owner,
+ size_t size, enum amdgpu_ib_pool_type pool_type,
+ struct amdgpu_job **job)
{
int r;
- r = amdgpu_job_alloc(adev, 1, job, NULL);
+ r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job);
if (r)
return r;
(*job)->num_ibs = 1;
r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
- if (r)
+ if (r) {
+ if (entity)
+ drm_sched_job_cleanup(&(*job)->base);
kfree(*job);
+ }
return r;
}
@@ -166,9 +173,7 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
drm_sched_job_cleanup(s_job);
- amdgpu_sync_free(&job->sync);
- amdgpu_sync_free(&job->sched_sync);
-
+ amdgpu_sync_free(&job->explicit_sync);
dma_fence_put(&job->hw_fence);
}
@@ -190,9 +195,11 @@ void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
void amdgpu_job_free(struct amdgpu_job *job)
{
+ if (job->base.entity)
+ drm_sched_job_cleanup(&job->base);
+
amdgpu_job_free_resources(job);
- amdgpu_sync_free(&job->sync);
- amdgpu_sync_free(&job->sched_sync);
+ amdgpu_sync_free(&job->explicit_sync);
if (job->gang_submit != &job->base.s_fence->scheduled)
dma_fence_put(job->gang_submit);
@@ -202,25 +209,16 @@ void amdgpu_job_free(struct amdgpu_job *job)
dma_fence_put(&job->hw_fence);
}
-int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
- void *owner, struct dma_fence **f)
+struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
{
- int r;
-
- if (!f)
- return -EINVAL;
-
- r = drm_sched_job_init(&job->base, entity, owner);
- if (r)
- return r;
+ struct dma_fence *f;
drm_sched_job_arm(&job->base);
-
- *f = dma_fence_get(&job->base.s_fence->finished);
+ f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
drm_sched_entity_push_job(&job->base);
- return 0;
+ return f;
}
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
@@ -238,30 +236,19 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
return 0;
}
-static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
- struct drm_sched_entity *s_entity)
+static struct dma_fence *
+amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
struct amdgpu_job *job = to_amdgpu_job(sched_job);
- struct amdgpu_vm *vm = job->vm;
- struct dma_fence *fence;
+ struct dma_fence *fence = NULL;
int r;
- fence = amdgpu_sync_get_fence(&job->sync);
- if (fence && drm_sched_dependency_optimized(fence, s_entity)) {
- r = amdgpu_sync_fence(&job->sched_sync, fence);
- if (r)
- DRM_ERROR("Error adding fence (%d)\n", r);
- }
-
- while (fence == NULL && vm && !job->vmid) {
- r = amdgpu_vmid_grab(vm, ring, &job->sync,
- &job->base.s_fence->finished,
- job);
+ while (!fence && job->vm && !job->vmid) {
+ r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r);
-
- fence = amdgpu_sync_get_fence(&job->sync);
}
if (!fence && job->gang_submit)
@@ -281,8 +268,6 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
job = to_amdgpu_job(sched_job);
finished = &job->base.s_fence->finished;
- BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
-
trace_amdgpu_sched_run_job(job);
/* Skip job if VRAM is lost and never resubmit gangs */
@@ -341,7 +326,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
}
const struct drm_sched_backend_ops amdgpu_sched_ops = {
- .dependency = amdgpu_job_dependency,
+ .prepare_job = amdgpu_job_prepare_job,
.run_job = amdgpu_job_run,
.timedout_job = amdgpu_job_timedout,
.free_job = amdgpu_job_free_cb
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index ab7b150e5d50..a372802ea4e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -47,8 +47,7 @@ enum amdgpu_ib_pool_type;
struct amdgpu_job {
struct drm_sched_job base;
struct amdgpu_vm *vm;
- struct amdgpu_sync sync;
- struct amdgpu_sync sched_sync;
+ struct amdgpu_sync explicit_sync;
struct dma_fence hw_fence;
struct dma_fence *gang_submit;
uint32_t preamble_status;
@@ -78,18 +77,20 @@ static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job)
return to_amdgpu_ring(job->base.entity->rq->sched);
}
-int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
- struct amdgpu_job **job, struct amdgpu_vm *vm);
-int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
- enum amdgpu_ib_pool_type pool, struct amdgpu_job **job);
+int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct drm_sched_entity *entity, void *owner,
+ unsigned int num_ibs, struct amdgpu_job **job);
+int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
+ struct drm_sched_entity *entity, void *owner,
+ size_t size, enum amdgpu_ib_pool_type pool_type,
+ struct amdgpu_job **job);
void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
struct amdgpu_bo *gws, struct amdgpu_bo *oa);
void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
struct amdgpu_job *leader);
void amdgpu_job_free(struct amdgpu_job *job);
-int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
- void *owner, struct dma_fence **f);
+struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job);
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct dma_fence **fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 518eb0e40d32..de182bfcf85f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -150,14 +150,15 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
const unsigned ib_size_dw = 16;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
ib = &job->ibs[0];
- ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0);
+ ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0,
+ PACKETJ_TYPE0);
ib->ptr[1] = 0xDEADBEEF;
for (i = 2; i < 16; i += 2) {
ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d75e0370a074..ba348046fcec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -29,6 +29,7 @@
#include "amdgpu.h"
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "atom.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 37322550d750..8a39300b1a84 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -36,7 +36,6 @@
#include <drm/drm_encoder.h>
#include <drm/drm_fixed.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
#include <linux/i2c.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 090e66a1b284..bac7976975bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -259,6 +259,14 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return 0;
}
+/* Free the entry back to the slab */
+static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e)
+{
+ hash_del(&e->node);
+ dma_fence_put(e->fence);
+ kmem_cache_free(amdgpu_sync_slab, e);
+}
+
/**
* amdgpu_sync_peek_fence - get the next fence not signaled yet
*
@@ -280,9 +288,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
if (dma_fence_is_signaled(f)) {
- hash_del(&e->node);
- dma_fence_put(f);
- kmem_cache_free(amdgpu_sync_slab, e);
+ amdgpu_sync_entry_free(e);
continue;
}
if (ring && s_fence) {
@@ -355,15 +361,42 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
if (r)
return r;
} else {
- hash_del(&e->node);
- dma_fence_put(f);
- kmem_cache_free(amdgpu_sync_slab, e);
+ amdgpu_sync_entry_free(e);
}
}
return 0;
}
+/**
+ * amdgpu_sync_push_to_job - push fences into job
+ * @sync: sync object to get the fences from
+ * @job: job to push the fences into
+ *
+ * Add all unsignaled fences from sync to job.
+ */
+int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job)
+{
+ struct amdgpu_sync_entry *e;
+ struct hlist_node *tmp;
+ struct dma_fence *f;
+ int i, r;
+
+ hash_for_each_safe(sync->fences, i, tmp, e, node) {
+ f = e->fence;
+ if (dma_fence_is_signaled(f)) {
+ amdgpu_sync_entry_free(e);
+ continue;
+ }
+
+ dma_fence_get(f);
+ r = drm_sched_job_add_dependency(&job->base, f);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
{
struct amdgpu_sync_entry *e;
@@ -375,9 +408,7 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
if (r)
return r;
- hash_del(&e->node);
- dma_fence_put(e->fence);
- kmem_cache_free(amdgpu_sync_slab, e);
+ amdgpu_sync_entry_free(e);
}
return 0;
@@ -396,11 +427,8 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
struct hlist_node *tmp;
unsigned int i;
- hash_for_each_safe(sync->fences, i, tmp, e, node) {
- hash_del(&e->node);
- dma_fence_put(e->fence);
- kmem_cache_free(amdgpu_sync_slab, e);
- }
+ hash_for_each_safe(sync->fences, i, tmp, e, node)
+ amdgpu_sync_entry_free(e);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index 2d5c613cda10..cf1e9e858efd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -30,6 +30,7 @@ struct dma_fence;
struct dma_resv;
struct amdgpu_device;
struct amdgpu_ring;
+struct amdgpu_job;
enum amdgpu_sync_mode {
AMDGPU_SYNC_ALWAYS,
@@ -54,6 +55,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone);
+int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job);
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
void amdgpu_sync_free(struct amdgpu_sync *sync);
int amdgpu_sync_init(void);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8c00a7a06c32..0ba87ca6f318 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -189,7 +189,6 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
struct amdgpu_device *adev = ring->adev;
unsigned offset, num_pages, num_dw, num_bytes;
uint64_t src_addr, dst_addr;
- struct dma_fence *fence;
struct amdgpu_job *job;
void *cpu_addr;
uint64_t flags;
@@ -229,7 +228,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED, &job);
if (r)
return r;
@@ -269,18 +270,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
}
}
- r = amdgpu_job_submit(job, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
- if (r)
- goto error_free;
-
- dma_fence_put(fence);
-
- return r;
-
-error_free:
- amdgpu_job_free(job);
- return r;
+ dma_fence_put(amdgpu_job_submit(job));
+ return 0;
}
/**
@@ -1417,7 +1408,8 @@ static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
}
static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
- unsigned long offset, void *buf, int len, int write)
+ unsigned long offset, void *buf,
+ int len, int write)
{
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
@@ -1441,26 +1433,27 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
memcpy(adev->mman.sdma_access_ptr, buf, len);
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job);
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ num_dw * 4, AMDGPU_IB_POOL_DELAYED,
+ &job);
if (r)
goto out;
amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
- src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start;
+ src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
+ src_mm.start;
dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
if (write)
swap(src_addr, dst_addr);
- amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false);
+ amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
+ PAGE_SIZE, false);
amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
- r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
- if (r) {
- amdgpu_job_free(job);
- goto out;
- }
+ fence = amdgpu_job_submit(job);
if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
r = -ETIMEDOUT;
@@ -1959,7 +1952,9 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
AMDGPU_IB_POOL_DELAYED;
int r;
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, job);
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ num_dw * 4, pool, job);
if (r)
return r;
@@ -1969,17 +1964,11 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
adev->gart.bo);
(*job)->vm_needs_flush = true;
}
- if (resv) {
- r = amdgpu_sync_resv(adev, &(*job)->sync, resv,
- AMDGPU_SYNC_ALWAYS,
- AMDGPU_FENCE_OWNER_UNDEFINED);
- if (r) {
- DRM_ERROR("sync failed (%d).\n", r);
- amdgpu_job_free(*job);
- return r;
- }
- }
- return 0;
+ if (!resv)
+ return 0;
+
+ return drm_sched_job_add_resv_dependencies(&(*job)->base, resv,
+ DMA_RESV_USAGE_BOOKKEEP);
}
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
@@ -2024,8 +2013,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
if (direct_submit)
r = amdgpu_job_submit_direct(job, ring, fence);
else
- r = amdgpu_job_submit(job, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+ *fence = amdgpu_job_submit(job);
if (r)
goto error_free;
@@ -2070,16 +2058,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
- r = amdgpu_job_submit(job, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, fence);
- if (r)
- goto error_free;
-
+ *fence = amdgpu_job_submit(job);
return 0;
-
-error_free:
- amdgpu_job_free(job);
- return r;
}
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 6eac649499d3..e00bb654e24b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1132,7 +1132,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
unsigned offset_idx = 0;
unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
- r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
+ r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ 64, direct ? AMDGPU_IB_POOL_DIRECT :
AMDGPU_IB_POOL_DELAYED, &job);
if (r)
return r;
@@ -1175,16 +1177,13 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r)
goto err_free;
} else {
- r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
- AMDGPU_SYNC_ALWAYS,
- AMDGPU_FENCE_OWNER_UNDEFINED);
+ r = drm_sched_job_add_resv_dependencies(&job->base,
+ bo->tbo.base.resv,
+ DMA_RESV_USAGE_KERNEL);
if (r)
goto err_free;
- r = amdgpu_job_submit(job, &adev->uvd.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &f);
- if (r)
- goto err_free;
+ f = amdgpu_job_submit(job);
}
amdgpu_bo_reserve(bo, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 02cb3a12dd76..b239e874f2d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -450,8 +450,10 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
+ &job);
if (r)
return r;
@@ -538,7 +540,9 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
struct dma_fence *f = NULL;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ ib_size_dw * 4,
direct ? AMDGPU_IB_POOL_DIRECT :
AMDGPU_IB_POOL_DELAYED, &job);
if (r)
@@ -570,8 +574,7 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
if (direct)
r = amdgpu_job_submit_direct(job, ring, &f);
else
- r = amdgpu_job_submit(job, &ring->adev->vce.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+ f = amdgpu_job_submit(job);
if (r)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 0b52af415b28..3449145ab2bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -600,15 +600,16 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
+ u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
struct amdgpu_device *adev = ring->adev;
struct dma_fence *f = NULL;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
int i, r;
- r = amdgpu_job_alloc_with_ib(adev, 64,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
+ 64, AMDGPU_IB_POOL_DIRECT,
+ &job);
if (r)
goto err;
@@ -787,8 +788,9 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
if (sq)
ib_size_dw += 8;
- r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
+ ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
+ &job);
if (r)
goto err;
@@ -916,8 +918,9 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
if (sq)
ib_size_dw += 8;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
+ ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
+ &job);
if (r)
return r;
@@ -982,8 +985,9 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
if (sq)
ib_size_dw += 8;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
+ ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
+ &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 69e105fa41f6..59cf64216fbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -47,6 +47,32 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
return r;
}
+/* Allocate a new job for @count PTE updates */
+static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
+ unsigned int count)
+{
+ enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
+ : AMDGPU_IB_POOL_DELAYED;
+ struct drm_sched_entity *entity = p->immediate ? &p->vm->immediate
+ : &p->vm->delayed;
+ unsigned int ndw;
+ int r;
+
+ /* estimate how many dw we need */
+ ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
+ if (p->pages_addr)
+ ndw += count * 2;
+ ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
+
+ r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM,
+ ndw * 4, pool, &p->job);
+ if (r)
+ return r;
+
+ p->num_dw_left = ndw;
+ return 0;
+}
+
/**
* amdgpu_vm_sdma_prepare - prepare SDMA command submission
*
@@ -61,21 +87,22 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode)
{
- enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
- : AMDGPU_IB_POOL_DELAYED;
- unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
+ struct amdgpu_sync sync;
int r;
- r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job);
+ r = amdgpu_vm_sdma_alloc_job(p, 0);
if (r)
return r;
- p->num_dw_left = ndw;
-
if (!resv)
return 0;
- return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm);
+ amdgpu_sync_create(&sync);
+ r = amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm);
+ if (!r)
+ r = amdgpu_sync_push_to_job(&sync, p->job);
+ amdgpu_sync_free(&sync);
+ return r;
}
/**
@@ -91,20 +118,16 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
struct dma_fence **fence)
{
struct amdgpu_ib *ib = p->job->ibs;
- struct drm_sched_entity *entity;
struct amdgpu_ring *ring;
struct dma_fence *f;
- int r;
- entity = p->immediate ? &p->vm->immediate : &p->vm->delayed;
- ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
+ ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring,
+ sched);
WARN_ON(ib->length_dw == 0);
amdgpu_ring_pad_ib(ring, ib);
WARN_ON(ib->length_dw > p->num_dw_left);
- r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f);
- if (r)
- goto error;
+ f = amdgpu_job_submit(p->job);
if (p->unlocked) {
struct dma_fence *tmp = dma_fence_get(f);
@@ -127,10 +150,6 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
}
dma_fence_put(f);
return 0;
-
-error:
- amdgpu_job_free(p->job);
- return r;
}
/**
@@ -210,8 +229,6 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
uint64_t flags)
{
struct amdgpu_bo *bo = &vmbo->bo;
- enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
- : AMDGPU_IB_POOL_DELAYED;
struct dma_resv_iter cursor;
unsigned int i, ndw, nptes;
struct dma_fence *fence;
@@ -221,7 +238,7 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
/* Wait for PD/PT moves to be completed */
dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
- r = amdgpu_sync_fence(&p->job->sync, fence);
+ r = drm_sched_job_add_dependency(&p->job->base, fence);
if (r) {
dma_resv_iter_end(&cursor);
return r;
@@ -238,19 +255,9 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
if (r)
return r;
- /* estimate how many dw we need */
- ndw = 32;
- if (p->pages_addr)
- ndw += count * 2;
- ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
- ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
-
- r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool,
- &p->job);
+ r = amdgpu_vm_sdma_alloc_job(p, count);
if (r)
return r;
-
- p->num_dw_left = ndw;
}
if (!p->pages_addr) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 05051d5d2ec3..90f87b2d985b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -21,6 +21,7 @@
*
*/
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index c928bc9eb202..0352de72c886 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -21,6 +21,7 @@
*
*/
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 62315fd5a05f..07bd16e82046 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -23,6 +23,7 @@
#include <linux/pci.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 87d5e4c21cb3..d73df100f2b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -21,6 +21,7 @@
*
*/
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index f513e2c2e964..657e53708248 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -371,7 +371,9 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* translation. Avoid this by doing the invalidation from the SDMA
* itself.
*/
- r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
+ r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
&job);
if (r)
goto error_alloc;
@@ -380,10 +382,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
job->vm_needs_flush = true;
job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
- r = amdgpu_job_submit(job, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
- if (r)
- goto error_submit;
+ fence = amdgpu_job_submit(job);
mutex_unlock(&adev->mman.gtt_window_lock);
@@ -392,9 +391,6 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
return;
-error_submit:
- amdgpu_job_free(job);
-
error_alloc:
mutex_unlock(&adev->mman.gtt_window_lock);
DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 375c440957dc..5fe872f4bea7 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -216,8 +216,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -280,8 +280,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index e668b3baa8c6..e407be6cb63c 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -213,7 +213,7 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
*
* Open up a stream for HW test
*/
-static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle,
struct amdgpu_bo *bo,
struct dma_fence **fence)
{
@@ -224,8 +224,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -276,7 +276,7 @@ err:
*
* Close up a stream for HW test or if userspace failed to do so
*/
-static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle,
struct amdgpu_bo *bo,
struct dma_fence **fence)
{
@@ -287,8 +287,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 3723e90e3a90..c9a95a816912 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -65,8 +65,11 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = npages * 8;
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
- AMDGPU_IB_POOL_DELAYED, &job);
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ num_dw * 4 + num_bytes,
+ AMDGPU_IB_POOL_DELAYED,
+ &job);
if (r)
return r;
@@ -89,18 +92,10 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
cpu_addr = &job->ibs[0].ptr[num_dw];
amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
- r = amdgpu_job_submit(job, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
- if (r)
- goto error_free;
-
+ fence = amdgpu_job_submit(job);
dma_fence_put(fence);
return r;
-
-error_free:
- amdgpu_job_free(job);
- return r;
}
/**
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3db79f0b5a8f..441810277cb5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -82,7 +82,6 @@
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_edid.h>
#include <drm/drm_vblank.h>
@@ -2812,7 +2811,6 @@ const struct amdgpu_ip_block_version dm_ip_block =
static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
.fb_create = amdgpu_display_user_framebuffer_create,
.get_format_info = amd_get_format_info,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = amdgpu_dm_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index 9fce4239d4ad..3f4e719eebd8 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -9,7 +9,7 @@
#include <linux/platform_device.h>
#include <linux/component.h>
#include <linux/pm_runtime.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include "komeda_dev.h"
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 451746ebbe71..62dc64550793 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -10,7 +10,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
@@ -59,7 +58,6 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
static const struct drm_driver komeda_kms_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- .lastclose = drm_fb_helper_lastclose,
DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_dma_dumb_create),
.fops = &komeda_cma_fops,
.name = "komeda",
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 7030339fa232..3cfefadc7c9d 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -19,7 +19,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_dma_helper.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_of.h>
@@ -275,7 +274,7 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
dest_h = drm_rect_height(&new_plane_state->dst);
scanout_start = drm_fb_dma_get_gem_addr(fb, new_plane_state, 0);
- hdlcd = plane->dev->dev_private;
+ hdlcd = drm_to_hdlcd_priv(plane->dev);
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]);
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, fb->pitches[0]);
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1);
@@ -290,7 +289,6 @@ static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = {
static const struct drm_plane_funcs hdlcd_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
@@ -298,24 +296,19 @@ static const struct drm_plane_funcs hdlcd_plane_funcs = {
static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
{
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
struct drm_plane *plane = NULL;
u32 formats[ARRAY_SIZE(supported_formats)], i;
- int ret;
-
- plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
- if (!plane)
- return ERR_PTR(-ENOMEM);
for (i = 0; i < ARRAY_SIZE(supported_formats); i++)
formats[i] = supported_formats[i].fourcc;
- ret = drm_universal_plane_init(drm, plane, 0xff, &hdlcd_plane_funcs,
- formats, ARRAY_SIZE(formats),
- NULL,
- DRM_PLANE_TYPE_PRIMARY, NULL);
- if (ret)
- return ERR_PTR(ret);
+ plane = drmm_universal_plane_alloc(drm, struct drm_plane, dev, 0xff,
+ &hdlcd_plane_funcs,
+ formats, ARRAY_SIZE(formats),
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (IS_ERR(plane))
+ return plane;
drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs);
hdlcd->plane = plane;
@@ -325,7 +318,7 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
int hdlcd_setup_crtc(struct drm_device *drm)
{
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
struct drm_plane *primary;
int ret;
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index a032003c340c..7043d1c9ed8f 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -26,7 +26,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
@@ -98,7 +98,7 @@ static void hdlcd_irq_uninstall(struct hdlcd_drm_private *hdlcd)
static int hdlcd_load(struct drm_device *drm, unsigned long flags)
{
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
struct platform_device *pdev = to_platform_device(drm->dev);
struct resource *res;
u32 version;
@@ -175,14 +175,21 @@ static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static void hdlcd_setup_mode_config(struct drm_device *drm)
+static int hdlcd_setup_mode_config(struct drm_device *drm)
{
- drm_mode_config_init(drm);
+ int ret;
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
drm->mode_config.max_width = HDLCD_MAX_XRES;
drm->mode_config.max_height = HDLCD_MAX_YRES;
drm->mode_config.funcs = &hdlcd_mode_config_funcs;
+
+ return 0;
}
#ifdef CONFIG_DEBUG_FS
@@ -190,7 +197,7 @@ static int hdlcd_show_underrun_count(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *drm = node->minor->dev;
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
seq_printf(m, "underrun : %d\n", atomic_read(&hdlcd->buffer_underrun_count));
seq_printf(m, "dma_end : %d\n", atomic_read(&hdlcd->dma_end_count));
@@ -203,7 +210,7 @@ static int hdlcd_show_pxlclock(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *drm = node->minor->dev;
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
unsigned long clkrate = clk_get_rate(hdlcd->clk);
unsigned long mode_clock = hdlcd->crtc.mode.crtc_clock * 1000;
@@ -247,18 +254,18 @@ static int hdlcd_drm_bind(struct device *dev)
struct hdlcd_drm_private *hdlcd;
int ret;
- hdlcd = devm_kzalloc(dev, sizeof(*hdlcd), GFP_KERNEL);
- if (!hdlcd)
- return -ENOMEM;
+ hdlcd = devm_drm_dev_alloc(dev, &hdlcd_driver, typeof(*hdlcd), base);
+ if (IS_ERR(hdlcd))
+ return PTR_ERR(hdlcd);
- drm = drm_dev_alloc(&hdlcd_driver, dev);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
+ drm = &hdlcd->base;
- drm->dev_private = hdlcd;
dev_set_drvdata(dev, drm);
- hdlcd_setup_mode_config(drm);
+ ret = hdlcd_setup_mode_config(drm);
+ if (ret)
+ goto err_free;
+
ret = hdlcd_load(drm, 0);
if (ret)
goto err_free;
@@ -317,17 +324,14 @@ err_unload:
hdlcd_irq_uninstall(hdlcd);
of_reserved_mem_device_release(drm->dev);
err_free:
- drm_mode_config_cleanup(drm);
dev_set_drvdata(dev, NULL);
- drm_dev_put(drm);
-
return ret;
}
static void hdlcd_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
@@ -341,10 +345,7 @@ static void hdlcd_drm_unbind(struct device *dev)
if (pm_runtime_enabled(dev))
pm_runtime_disable(dev);
of_reserved_mem_device_release(dev);
- drm_mode_config_cleanup(drm);
- drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
- drm_dev_put(drm);
}
static const struct component_master_ops hdlcd_master_ops = {
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h
index 909c39c28487..f1c1da2ac2db 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.h
+++ b/drivers/gpu/drm/arm/hdlcd_drv.h
@@ -7,6 +7,7 @@
#define __HDLCD_DRV_H__
struct hdlcd_drm_private {
+ struct drm_device base;
void __iomem *mmio;
struct clk *clk;
struct drm_crtc crtc;
@@ -20,6 +21,7 @@ struct hdlcd_drm_private {
#endif
};
+#define drm_to_hdlcd_priv(x) container_of(x, struct hdlcd_drm_private, base)
#define crtc_to_hdlcd_priv(x) container_of(x, struct hdlcd_drm_private, crtc)
static inline void hdlcd_write(struct hdlcd_drm_private *hdlcd,
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 1d0b0c54ccc7..b734dbdcc577 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -19,7 +19,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 38f5170c0fea..584cee123bd8 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -19,6 +19,8 @@
static const struct fb_ops armada_fb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
+ .fb_read = drm_fb_helper_cfb_read,
+ .fb_write = drm_fb_helper_cfb_write,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
@@ -72,7 +74,7 @@ static int armada_fbdev_create(struct drm_fb_helper *fbh,
if (IS_ERR(dfb))
return PTR_ERR(dfb);
- info = drm_fb_helper_alloc_fbi(fbh);
+ info = drm_fb_helper_alloc_info(fbh);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto err_fballoc;
@@ -155,7 +157,7 @@ void armada_fbdev_fini(struct drm_device *dev)
struct drm_fb_helper *fbh = priv->fbdev;
if (fbh) {
- drm_fb_helper_unregister_fbi(fbh);
+ drm_fb_helper_unregister_info(fbh);
drm_fb_helper_fini(fbh);
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index a94f1a9e8f40..718119e168a6 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -16,7 +16,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index bbeb5defc8f5..420fc75c240e 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -33,6 +33,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 74f41282444f..d51b81fea9c8 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -38,7 +38,6 @@
#include <drm/drm_encoder.h>
#include <drm/drm_mode.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_fb_helper.h>
#define DRIVER_AUTHOR "Dave Airlie"
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index f7e7f4e919c7..a2bb5b916235 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -19,7 +19,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
index 7f4fce1aa998..0b6a28436885 100644
--- a/drivers/gpu/drm/bridge/tc358762.c
+++ b/drivers/gpu/drm/bridge/tc358762.c
@@ -11,6 +11,7 @@
*/
#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
@@ -19,7 +20,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 1a586b3c454b..d579fd8f7cb8 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -2536,7 +2536,7 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
if (funcs->prepare_fb) {
ret = funcs->prepare_fb(plane, new_plane_state);
if (ret)
- goto fail;
+ goto fail_prepare_fb;
} else {
WARN_ON_ONCE(funcs->cleanup_fb);
@@ -2545,13 +2545,34 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
ret = drm_gem_plane_helper_prepare_fb(plane, new_plane_state);
if (ret)
- goto fail;
+ goto fail_prepare_fb;
+ }
+ }
+
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ const struct drm_plane_helper_funcs *funcs = plane->helper_private;
+
+ if (funcs->begin_fb_access) {
+ ret = funcs->begin_fb_access(plane, new_plane_state);
+ if (ret)
+ goto fail_begin_fb_access;
}
}
return 0;
-fail:
+fail_begin_fb_access:
+ for_each_new_plane_in_state(state, plane, new_plane_state, j) {
+ const struct drm_plane_helper_funcs *funcs = plane->helper_private;
+
+ if (j >= i)
+ continue;
+
+ if (funcs->end_fb_access)
+ funcs->end_fb_access(plane, new_plane_state);
+ }
+ i = j; /* set i to upper limit to cleanup all planes */
+fail_prepare_fb:
for_each_new_plane_in_state(state, plane, new_plane_state, j) {
const struct drm_plane_helper_funcs *funcs;
@@ -2828,6 +2849,13 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
int i;
for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
+ const struct drm_plane_helper_funcs *funcs = plane->helper_private;
+
+ if (funcs->end_fb_access)
+ funcs->end_fb_access(plane, new_plane_state);
+ }
+
+ for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
struct drm_plane_state *plane_state;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index f5fb22e0d033..a209659a996c 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -43,7 +43,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 71edb80fe0fb..e0384f967c0b 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -30,24 +30,17 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/console.h>
-#include <linux/dma-buf.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/sysrq.h>
-#include <linux/vmalloc.h>
#include <drm/drm_atomic.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "drm_crtc_helper_internal.h"
#include "drm_internal.h"
static bool drm_fbdev_emulation = true;
@@ -74,7 +67,7 @@ MODULE_PARM_DESC(drm_fbdev_overalloc,
* considered as a broken and legacy behaviour from a modern fbdev device.
*/
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
-static bool drm_leak_fbdev_smem = false;
+static bool drm_leak_fbdev_smem;
module_param_unsafe(drm_leak_fbdev_smem, bool, 0600);
MODULE_PARM_DESC(drm_leak_fbdev_smem,
"Allow unsafe leaking fbdev physical smem address [default=false]");
@@ -96,11 +89,13 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* It will automatically set up deferred I/O if the driver requires a shadow
* buffer.
*
- * At runtime drivers should restore the fbdev console by using
+ * Existing fbdev implementations should restore the fbdev console by using
* drm_fb_helper_lastclose() as their &drm_driver.lastclose callback.
* They should also notify the fb helper code from updates to the output
* configuration by using drm_fb_helper_output_poll_changed() as their
- * &drm_mode_config_funcs.output_poll_changed callback.
+ * &drm_mode_config_funcs.output_poll_changed callback. New implementations
+ * of fbdev should be build on top of struct &drm_client_funcs, which handles
+ * this automatically. Setting the old callbacks should be avoided.
*
* For suspend/resume consider using drm_mode_config_helper_suspend() and
* drm_mode_config_helper_resume() which takes care of fbdev as well.
@@ -368,115 +363,31 @@ static void drm_fb_helper_resume_worker(struct work_struct *work)
resume_work);
console_lock();
- fb_set_suspend(helper->fbdev, 0);
+ fb_set_suspend(helper->info, 0);
console_unlock();
}
-static void drm_fb_helper_damage_blit_real(struct drm_fb_helper *fb_helper,
- struct drm_clip_rect *clip,
- struct iosys_map *dst)
-{
- struct drm_framebuffer *fb = fb_helper->fb;
- size_t offset = clip->y1 * fb->pitches[0];
- size_t len = clip->x2 - clip->x1;
- unsigned int y;
- void *src;
-
- switch (drm_format_info_bpp(fb->format, 0)) {
- case 1:
- offset += clip->x1 / 8;
- len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
- break;
- case 2:
- offset += clip->x1 / 4;
- len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
- break;
- case 4:
- offset += clip->x1 / 2;
- len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
- break;
- default:
- offset += clip->x1 * fb->format->cpp[0];
- len *= fb->format->cpp[0];
- break;
- }
-
- src = fb_helper->fbdev->screen_buffer + offset;
- iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
-
- for (y = clip->y1; y < clip->y2; y++) {
- iosys_map_memcpy_to(dst, 0, src, len);
- iosys_map_incr(dst, fb->pitches[0]);
- src += fb->pitches[0];
- }
-}
-
-static int drm_fb_helper_damage_blit(struct drm_fb_helper *fb_helper,
- struct drm_clip_rect *clip)
-{
- struct drm_client_buffer *buffer = fb_helper->buffer;
- struct iosys_map map, dst;
- int ret;
-
- /*
- * We have to pin the client buffer to its current location while
- * flushing the shadow buffer. In the general case, concurrent
- * modesetting operations could try to move the buffer and would
- * fail. The modeset has to be serialized by acquiring the reservation
- * object of the underlying BO here.
- *
- * For fbdev emulation, we only have to protect against fbdev modeset
- * operations. Nothing else will involve the client buffer's BO. So it
- * is sufficient to acquire struct drm_fb_helper.lock here.
- */
- mutex_lock(&fb_helper->lock);
-
- ret = drm_client_buffer_vmap(buffer, &map);
- if (ret)
- goto out;
-
- dst = map;
- drm_fb_helper_damage_blit_real(fb_helper, clip, &dst);
-
- drm_client_buffer_vunmap(buffer);
-
-out:
- mutex_unlock(&fb_helper->lock);
-
- return ret;
-}
-
static void drm_fb_helper_damage_work(struct work_struct *work)
{
- struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
- damage_work);
+ struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, damage_work);
struct drm_device *dev = helper->dev;
struct drm_clip_rect *clip = &helper->damage_clip;
struct drm_clip_rect clip_copy;
unsigned long flags;
int ret;
+ if (drm_WARN_ON_ONCE(dev, !helper->funcs->fb_dirty))
+ return;
+
spin_lock_irqsave(&helper->damage_lock, flags);
clip_copy = *clip;
clip->x1 = clip->y1 = ~0;
clip->x2 = clip->y2 = 0;
spin_unlock_irqrestore(&helper->damage_lock, flags);
- /* Call damage handlers only if necessary */
- if (!(clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2))
- return;
-
- if (helper->buffer) {
- ret = drm_fb_helper_damage_blit(helper, &clip_copy);
- if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
- goto err;
- }
-
- if (helper->fb->funcs->dirty) {
- ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
- if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
- goto err;
- }
+ ret = helper->funcs->fb_dirty(helper, &clip_copy);
+ if (ret)
+ goto err;
return;
@@ -536,11 +447,6 @@ int drm_fb_helper_init(struct drm_device *dev,
{
int ret;
- if (!drm_fbdev_emulation) {
- dev->fb_helper = fb_helper;
- return 0;
- }
-
/*
* If this is not the generic fbdev client, initialize a drm_client
* without callbacks so we can use the modesets.
@@ -558,7 +464,7 @@ int drm_fb_helper_init(struct drm_device *dev,
EXPORT_SYMBOL(drm_fb_helper_init);
/**
- * drm_fb_helper_alloc_fbi - allocate fb_info and some of its members
+ * drm_fb_helper_alloc_info - allocate fb_info and some of its members
* @fb_helper: driver-allocated fbdev helper
*
* A helper to alloc fb_info and the members cmap and apertures. Called
@@ -570,7 +476,7 @@ EXPORT_SYMBOL(drm_fb_helper_init);
* fb_info pointer if things went okay, pointer containing error code
* otherwise
*/
-struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
+struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
{
struct device *dev = fb_helper->dev->dev;
struct fb_info *info;
@@ -598,7 +504,7 @@ struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
goto err_free_cmap;
}
- fb_helper->fbdev = info;
+ fb_helper->info = info;
info->skip_vt_switch = true;
return info;
@@ -609,22 +515,22 @@ err_release:
framebuffer_release(info);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL(drm_fb_helper_alloc_fbi);
+EXPORT_SYMBOL(drm_fb_helper_alloc_info);
/**
- * drm_fb_helper_unregister_fbi - unregister fb_info framebuffer device
+ * drm_fb_helper_unregister_info - unregister fb_info framebuffer device
* @fb_helper: driver-allocated fbdev helper, can be NULL
*
* A wrapper around unregister_framebuffer, to release the fb_info
* framebuffer device. This must be called before releasing all resources for
* @fb_helper by calling drm_fb_helper_fini().
*/
-void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
+void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper)
{
- if (fb_helper && fb_helper->fbdev)
- unregister_framebuffer(fb_helper->fbdev);
+ if (fb_helper && fb_helper->info)
+ unregister_framebuffer(fb_helper->info);
}
-EXPORT_SYMBOL(drm_fb_helper_unregister_fbi);
+EXPORT_SYMBOL(drm_fb_helper_unregister_info);
/**
* drm_fb_helper_fini - finialize a &struct drm_fb_helper
@@ -647,13 +553,13 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
cancel_work_sync(&fb_helper->resume_work);
cancel_work_sync(&fb_helper->damage_work);
- info = fb_helper->fbdev;
+ info = fb_helper->info;
if (info) {
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
- fb_helper->fbdev = NULL;
+ fb_helper->info = NULL;
mutex_lock(&kernel_fb_helper_lock);
if (!list_empty(&fb_helper->kernel_fb_list)) {
@@ -670,26 +576,12 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_fini);
-static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
-{
- struct drm_device *dev = fb_helper->dev;
- struct drm_framebuffer *fb = fb_helper->fb;
-
- return dev->mode_config.prefer_shadow_fbdev ||
- dev->mode_config.prefer_shadow ||
- fb->funcs->dirty;
-}
-
-static void drm_fb_helper_damage(struct fb_info *info, u32 x, u32 y,
+static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y,
u32 width, u32 height)
{
- struct drm_fb_helper *helper = info->par;
struct drm_clip_rect *clip = &helper->damage_clip;
unsigned long flags;
- if (!drm_fbdev_use_shadow_fb(helper))
- return;
-
spin_lock_irqsave(&helper->damage_lock, flags);
clip->x1 = min_t(u32, clip->x1, x);
clip->y1 = min_t(u32, clip->y1, y);
@@ -739,6 +631,7 @@ static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off,
*/
void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist)
{
+ struct drm_fb_helper *helper = info->par;
unsigned long start, end, min_off, max_off;
struct fb_deferred_io_pageref *pageref;
struct drm_rect damage_area;
@@ -754,60 +647,167 @@ void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagerefli
if (min_off >= max_off)
return;
+ if (helper->funcs->fb_dirty) {
+ /*
+ * As we can only track pages, we might reach beyond the end
+ * of the screen and account for non-existing scanlines. Hence,
+ * keep the covered memory area within the screen buffer.
+ */
+ max_off = min(max_off, info->screen_size);
+
+ drm_fb_helper_memory_range_to_clip(info, min_off, max_off - min_off, &damage_area);
+ drm_fb_helper_damage(helper, damage_area.x1, damage_area.y1,
+ drm_rect_width(&damage_area),
+ drm_rect_height(&damage_area));
+ }
+}
+EXPORT_SYMBOL(drm_fb_helper_deferred_io);
+
+typedef ssize_t (*drm_fb_helper_read_screen)(struct fb_info *info, char __user *buf,
+ size_t count, loff_t pos);
+
+static ssize_t __drm_fb_helper_read(struct fb_info *info, char __user *buf, size_t count,
+ loff_t *ppos, drm_fb_helper_read_screen read_screen)
+{
+ loff_t pos = *ppos;
+ size_t total_size;
+ ssize_t ret;
+
+ if (info->screen_size)
+ total_size = info->screen_size;
+ else
+ total_size = info->fix.smem_len;
+
+ if (pos >= total_size)
+ return 0;
+ if (count >= total_size)
+ count = total_size;
+ if (total_size - count < pos)
+ count = total_size - pos;
+
+ if (info->fbops->fb_sync)
+ info->fbops->fb_sync(info);
+
+ ret = read_screen(info, buf, count, pos);
+ if (ret > 0)
+ *ppos += ret;
+
+ return ret;
+}
+
+typedef ssize_t (*drm_fb_helper_write_screen)(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t pos);
+
+static ssize_t __drm_fb_helper_write(struct fb_info *info, const char __user *buf, size_t count,
+ loff_t *ppos, drm_fb_helper_write_screen write_screen)
+{
+ loff_t pos = *ppos;
+ size_t total_size;
+ ssize_t ret;
+ int err = 0;
+
+ if (info->screen_size)
+ total_size = info->screen_size;
+ else
+ total_size = info->fix.smem_len;
+
+ if (pos > total_size)
+ return -EFBIG;
+ if (count > total_size) {
+ err = -EFBIG;
+ count = total_size;
+ }
+ if (total_size - count < pos) {
+ if (!err)
+ err = -ENOSPC;
+ count = total_size - pos;
+ }
+
+ if (info->fbops->fb_sync)
+ info->fbops->fb_sync(info);
+
/*
- * As we can only track pages, we might reach beyond the end
- * of the screen and account for non-existing scanlines. Hence,
- * keep the covered memory area within the screen buffer.
+ * Copy to framebuffer even if we already logged an error. Emulates
+ * the behavior of the original fbdev implementation.
*/
- max_off = min(max_off, info->screen_size);
+ ret = write_screen(info, buf, count, pos);
+ if (ret < 0)
+ return ret; /* return last error, if any */
+ else if (!ret)
+ return err; /* return previous error, if any */
+
+ *ppos += ret;
- drm_fb_helper_memory_range_to_clip(info, min_off, max_off - min_off, &damage_area);
- drm_fb_helper_damage(info, damage_area.x1, damage_area.y1,
- drm_rect_width(&damage_area),
- drm_rect_height(&damage_area));
+ return ret;
+}
+
+static ssize_t drm_fb_helper_read_screen_buffer(struct fb_info *info, char __user *buf,
+ size_t count, loff_t pos)
+{
+ const char *src = info->screen_buffer + pos;
+
+ if (copy_to_user(buf, src, count))
+ return -EFAULT;
+
+ return count;
}
-EXPORT_SYMBOL(drm_fb_helper_deferred_io);
/**
- * drm_fb_helper_sys_read - wrapper around fb_sys_read
+ * drm_fb_helper_sys_read - Implements struct &fb_ops.fb_read for system memory
* @info: fb_info struct pointer
* @buf: userspace buffer to read from framebuffer memory
* @count: number of bytes to read from framebuffer memory
* @ppos: read offset within framebuffer memory
*
- * A wrapper around fb_sys_read implemented by fbdev core
+ * Returns:
+ * The number of bytes read on success, or an error code otherwise.
*/
ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
size_t count, loff_t *ppos)
{
- return fb_sys_read(info, buf, count, ppos);
+ return __drm_fb_helper_read(info, buf, count, ppos, drm_fb_helper_read_screen_buffer);
}
EXPORT_SYMBOL(drm_fb_helper_sys_read);
+static ssize_t drm_fb_helper_write_screen_buffer(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t pos)
+{
+ char *dst = info->screen_buffer + pos;
+
+ if (copy_from_user(dst, buf, count))
+ return -EFAULT;
+
+ return count;
+}
+
/**
- * drm_fb_helper_sys_write - wrapper around fb_sys_write
+ * drm_fb_helper_sys_write - Implements struct &fb_ops.fb_write for system memory
* @info: fb_info struct pointer
* @buf: userspace buffer to write to framebuffer memory
* @count: number of bytes to write to framebuffer memory
* @ppos: write offset within framebuffer memory
*
- * A wrapper around fb_sys_write implemented by fbdev core
+ * Returns:
+ * The number of bytes written on success, or an error code otherwise.
*/
ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos)
{
+ struct drm_fb_helper *helper = info->par;
loff_t pos = *ppos;
ssize_t ret;
struct drm_rect damage_area;
- ret = fb_sys_write(info, buf, count, ppos);
+ ret = __drm_fb_helper_write(info, buf, count, ppos, drm_fb_helper_write_screen_buffer);
if (ret <= 0)
return ret;
- drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area);
- drm_fb_helper_damage(info, damage_area.x1, damage_area.y1,
- drm_rect_width(&damage_area),
- drm_rect_height(&damage_area));
+ if (helper->funcs->fb_dirty) {
+ drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area);
+ drm_fb_helper_damage(helper, damage_area.x1, damage_area.y1,
+ drm_rect_width(&damage_area),
+ drm_rect_height(&damage_area));
+ }
return ret;
}
@@ -823,8 +823,12 @@ EXPORT_SYMBOL(drm_fb_helper_sys_write);
void drm_fb_helper_sys_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
+ struct drm_fb_helper *helper = info->par;
+
sys_fillrect(info, rect);
- drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height);
+
+ if (helper->funcs->fb_dirty)
+ drm_fb_helper_damage(helper, rect->dx, rect->dy, rect->width, rect->height);
}
EXPORT_SYMBOL(drm_fb_helper_sys_fillrect);
@@ -838,8 +842,12 @@ EXPORT_SYMBOL(drm_fb_helper_sys_fillrect);
void drm_fb_helper_sys_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
+ struct drm_fb_helper *helper = info->par;
+
sys_copyarea(info, area);
- drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height);
+
+ if (helper->funcs->fb_dirty)
+ drm_fb_helper_damage(helper, area->dx, area->dy, area->width, area->height);
}
EXPORT_SYMBOL(drm_fb_helper_sys_copyarea);
@@ -853,11 +861,131 @@ EXPORT_SYMBOL(drm_fb_helper_sys_copyarea);
void drm_fb_helper_sys_imageblit(struct fb_info *info,
const struct fb_image *image)
{
+ struct drm_fb_helper *helper = info->par;
+
sys_imageblit(info, image);
- drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height);
+
+ if (helper->funcs->fb_dirty)
+ drm_fb_helper_damage(helper, image->dx, image->dy, image->width, image->height);
}
EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
+static ssize_t fb_read_screen_base(struct fb_info *info, char __user *buf, size_t count,
+ loff_t pos)
+{
+ const char __iomem *src = info->screen_base + pos;
+ size_t alloc_size = min_t(size_t, count, PAGE_SIZE);
+ ssize_t ret = 0;
+ int err = 0;
+ char *tmp;
+
+ tmp = kmalloc(alloc_size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ while (count) {
+ size_t c = min_t(size_t, count, alloc_size);
+
+ memcpy_fromio(tmp, src, c);
+ if (copy_to_user(buf, tmp, c)) {
+ err = -EFAULT;
+ break;
+ }
+
+ src += c;
+ buf += c;
+ ret += c;
+ count -= c;
+ }
+
+ kfree(tmp);
+
+ return ret ? ret : err;
+}
+
+/**
+ * drm_fb_helper_cfb_read - Implements struct &fb_ops.fb_read for I/O memory
+ * @info: fb_info struct pointer
+ * @buf: userspace buffer to read from framebuffer memory
+ * @count: number of bytes to read from framebuffer memory
+ * @ppos: read offset within framebuffer memory
+ *
+ * Returns:
+ * The number of bytes read on success, or an error code otherwise.
+ */
+ssize_t drm_fb_helper_cfb_read(struct fb_info *info, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return __drm_fb_helper_read(info, buf, count, ppos, fb_read_screen_base);
+}
+EXPORT_SYMBOL(drm_fb_helper_cfb_read);
+
+static ssize_t fb_write_screen_base(struct fb_info *info, const char __user *buf, size_t count,
+ loff_t pos)
+{
+ char __iomem *dst = info->screen_base + pos;
+ size_t alloc_size = min_t(size_t, count, PAGE_SIZE);
+ ssize_t ret = 0;
+ int err = 0;
+ u8 *tmp;
+
+ tmp = kmalloc(alloc_size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ while (count) {
+ size_t c = min_t(size_t, count, alloc_size);
+
+ if (copy_from_user(tmp, buf, c)) {
+ err = -EFAULT;
+ break;
+ }
+ memcpy_toio(dst, tmp, c);
+
+ dst += c;
+ buf += c;
+ ret += c;
+ count -= c;
+ }
+
+ kfree(tmp);
+
+ return ret ? ret : err;
+}
+
+/**
+ * drm_fb_helper_cfb_write - Implements struct &fb_ops.fb_write for I/O memory
+ * @info: fb_info struct pointer
+ * @buf: userspace buffer to write to framebuffer memory
+ * @count: number of bytes to write to framebuffer memory
+ * @ppos: write offset within framebuffer memory
+ *
+ * Returns:
+ * The number of bytes written on success, or an error code otherwise.
+ */
+ssize_t drm_fb_helper_cfb_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct drm_fb_helper *helper = info->par;
+ loff_t pos = *ppos;
+ ssize_t ret;
+ struct drm_rect damage_area;
+
+ ret = __drm_fb_helper_write(info, buf, count, ppos, fb_write_screen_base);
+ if (ret <= 0)
+ return ret;
+
+ if (helper->funcs->fb_dirty) {
+ drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area);
+ drm_fb_helper_damage(helper, damage_area.x1, damage_area.y1,
+ drm_rect_width(&damage_area),
+ drm_rect_height(&damage_area));
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_fb_helper_cfb_write);
+
/**
* drm_fb_helper_cfb_fillrect - wrapper around cfb_fillrect
* @info: fbdev registered by the helper
@@ -868,8 +996,12 @@ EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
void drm_fb_helper_cfb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
+ struct drm_fb_helper *helper = info->par;
+
cfb_fillrect(info, rect);
- drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height);
+
+ if (helper->funcs->fb_dirty)
+ drm_fb_helper_damage(helper, rect->dx, rect->dy, rect->width, rect->height);
}
EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect);
@@ -883,8 +1015,12 @@ EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect);
void drm_fb_helper_cfb_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
+ struct drm_fb_helper *helper = info->par;
+
cfb_copyarea(info, area);
- drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height);
+
+ if (helper->funcs->fb_dirty)
+ drm_fb_helper_damage(helper, area->dx, area->dy, area->width, area->height);
}
EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea);
@@ -898,8 +1034,12 @@ EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea);
void drm_fb_helper_cfb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
+ struct drm_fb_helper *helper = info->par;
+
cfb_imageblit(info, image);
- drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height);
+
+ if (helper->funcs->fb_dirty)
+ drm_fb_helper_damage(helper, image->dx, image->dy, image->width, image->height);
}
EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit);
@@ -914,8 +1054,8 @@ EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit);
*/
void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend)
{
- if (fb_helper && fb_helper->fbdev)
- fb_set_suspend(fb_helper->fbdev, suspend);
+ if (fb_helper && fb_helper->info)
+ fb_set_suspend(fb_helper->info, suspend);
}
EXPORT_SYMBOL(drm_fb_helper_set_suspend);
@@ -938,20 +1078,20 @@ EXPORT_SYMBOL(drm_fb_helper_set_suspend);
void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
bool suspend)
{
- if (!fb_helper || !fb_helper->fbdev)
+ if (!fb_helper || !fb_helper->info)
return;
/* make sure there's no pending/ongoing resume */
flush_work(&fb_helper->resume_work);
if (suspend) {
- if (fb_helper->fbdev->state != FBINFO_STATE_RUNNING)
+ if (fb_helper->info->state != FBINFO_STATE_RUNNING)
return;
console_lock();
} else {
- if (fb_helper->fbdev->state == FBINFO_STATE_RUNNING)
+ if (fb_helper->info->state == FBINFO_STATE_RUNNING)
return;
if (!console_trylock()) {
@@ -960,7 +1100,7 @@ void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
}
}
- fb_set_suspend(fb_helper->fbdev, suspend);
+ fb_set_suspend(fb_helper->info, suspend);
console_unlock();
}
EXPORT_SYMBOL(drm_fb_helper_set_suspend_unlocked);
@@ -1749,6 +1889,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
sizes.surface_height = config->max_height;
}
+#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
+ fb_helper->hint_leak_smem_start = drm_leak_fbdev_smem;
+#endif
+
/* push down into drivers */
ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
if (ret < 0)
@@ -1850,7 +1994,7 @@ EXPORT_SYMBOL(drm_fb_helper_fill_info);
/*
* This is a continuation of drm_setup_crtcs() that sets up anything related
* to the framebuffer. During initialization, drm_setup_crtcs() is called before
- * the framebuffer has been allocated (fb_helper->fb and fb_helper->fbdev).
+ * the framebuffer has been allocated (fb_helper->fb and fb_helper->info).
* So, any setup that touches those fields needs to be done here instead of in
* drm_setup_crtcs().
*/
@@ -1858,7 +2002,7 @@ static void drm_setup_crtcs_fb(struct drm_fb_helper *fb_helper)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_connector_list_iter conn_iter;
- struct fb_info *info = fb_helper->fbdev;
+ struct fb_info *info = fb_helper->info;
unsigned int rotation, sw_rotations = 0;
struct drm_connector *connector;
struct drm_mode_set *modeset;
@@ -1942,11 +2086,11 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
fb_helper->deferred_setup = false;
- info = fb_helper->fbdev;
+ info = fb_helper->info;
info->var.pixclock = 0;
/* Shamelessly allow physical address leaking to userspace */
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
- if (!drm_leak_fbdev_smem)
+ if (!fb_helper->hint_leak_smem_start)
#endif
/* don't leak any physical addresses to userspace */
info->flags |= FBINFO_HIDE_SMEM_START;
@@ -2077,7 +2221,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
drm_setup_crtcs_fb(fb_helper);
mutex_unlock(&fb_helper->lock);
- drm_fb_helper_set_par(fb_helper->fbdev);
+ drm_fb_helper_set_par(fb_helper->info);
return 0;
}
@@ -2103,530 +2247,10 @@ EXPORT_SYMBOL(drm_fb_helper_lastclose);
*
* This function can be used as the
* &drm_mode_config_funcs.output_poll_changed callback for drivers that only
- * need to call drm_fb_helper_hotplug_event().
+ * need to call drm_fbdev.hotplug_event().
*/
void drm_fb_helper_output_poll_changed(struct drm_device *dev)
{
drm_fb_helper_hotplug_event(dev->fb_helper);
}
EXPORT_SYMBOL(drm_fb_helper_output_poll_changed);
-
-/* @user: 1=userspace, 0=fbcon */
-static int drm_fbdev_fb_open(struct fb_info *info, int user)
-{
- struct drm_fb_helper *fb_helper = info->par;
-
- /* No need to take a ref for fbcon because it unbinds on unregister */
- if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
- return -ENODEV;
-
- return 0;
-}
-
-static int drm_fbdev_fb_release(struct fb_info *info, int user)
-{
- struct drm_fb_helper *fb_helper = info->par;
-
- if (user)
- module_put(fb_helper->dev->driver->fops->owner);
-
- return 0;
-}
-
-static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
-{
- struct fb_info *fbi = fb_helper->fbdev;
- void *shadow = NULL;
-
- if (!fb_helper->dev)
- return;
-
- if (fbi) {
- if (fbi->fbdefio)
- fb_deferred_io_cleanup(fbi);
- if (drm_fbdev_use_shadow_fb(fb_helper))
- shadow = fbi->screen_buffer;
- }
-
- drm_fb_helper_fini(fb_helper);
-
- if (shadow)
- vfree(shadow);
- else if (fb_helper->buffer)
- drm_client_buffer_vunmap(fb_helper->buffer);
-
- drm_client_framebuffer_delete(fb_helper->buffer);
-}
-
-static void drm_fbdev_release(struct drm_fb_helper *fb_helper)
-{
- drm_fbdev_cleanup(fb_helper);
- drm_client_release(&fb_helper->client);
- kfree(fb_helper);
-}
-
-/*
- * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of
- * unregister_framebuffer() or fb_release().
- */
-static void drm_fbdev_fb_destroy(struct fb_info *info)
-{
- drm_fbdev_release(info->par);
-}
-
-static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
-{
- struct drm_fb_helper *fb_helper = info->par;
-
- if (drm_fbdev_use_shadow_fb(fb_helper))
- return fb_deferred_io_mmap(info, vma);
- else if (fb_helper->dev->driver->gem_prime_mmap)
- return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
- else
- return -ENODEV;
-}
-
-static bool drm_fbdev_use_iomem(struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_client_buffer *buffer = fb_helper->buffer;
-
- return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem;
-}
-
-static ssize_t fb_read_screen_base(struct fb_info *info, char __user *buf, size_t count,
- loff_t pos)
-{
- const char __iomem *src = info->screen_base + pos;
- size_t alloc_size = min_t(size_t, count, PAGE_SIZE);
- ssize_t ret = 0;
- int err = 0;
- char *tmp;
-
- tmp = kmalloc(alloc_size, GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- while (count) {
- size_t c = min_t(size_t, count, alloc_size);
-
- memcpy_fromio(tmp, src, c);
- if (copy_to_user(buf, tmp, c)) {
- err = -EFAULT;
- break;
- }
-
- src += c;
- buf += c;
- ret += c;
- count -= c;
- }
-
- kfree(tmp);
-
- return ret ? ret : err;
-}
-
-static ssize_t fb_read_screen_buffer(struct fb_info *info, char __user *buf, size_t count,
- loff_t pos)
-{
- const char *src = info->screen_buffer + pos;
-
- if (copy_to_user(buf, src, count))
- return -EFAULT;
-
- return count;
-}
-
-static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf,
- size_t count, loff_t *ppos)
-{
- loff_t pos = *ppos;
- size_t total_size;
- ssize_t ret;
-
- if (info->screen_size)
- total_size = info->screen_size;
- else
- total_size = info->fix.smem_len;
-
- if (pos >= total_size)
- return 0;
- if (count >= total_size)
- count = total_size;
- if (total_size - count < pos)
- count = total_size - pos;
-
- if (drm_fbdev_use_iomem(info))
- ret = fb_read_screen_base(info, buf, count, pos);
- else
- ret = fb_read_screen_buffer(info, buf, count, pos);
-
- if (ret > 0)
- *ppos += ret;
-
- return ret;
-}
-
-static ssize_t fb_write_screen_base(struct fb_info *info, const char __user *buf, size_t count,
- loff_t pos)
-{
- char __iomem *dst = info->screen_base + pos;
- size_t alloc_size = min_t(size_t, count, PAGE_SIZE);
- ssize_t ret = 0;
- int err = 0;
- u8 *tmp;
-
- tmp = kmalloc(alloc_size, GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- while (count) {
- size_t c = min_t(size_t, count, alloc_size);
-
- if (copy_from_user(tmp, buf, c)) {
- err = -EFAULT;
- break;
- }
- memcpy_toio(dst, tmp, c);
-
- dst += c;
- buf += c;
- ret += c;
- count -= c;
- }
-
- kfree(tmp);
-
- return ret ? ret : err;
-}
-
-static ssize_t fb_write_screen_buffer(struct fb_info *info, const char __user *buf, size_t count,
- loff_t pos)
-{
- char *dst = info->screen_buffer + pos;
-
- if (copy_from_user(dst, buf, count))
- return -EFAULT;
-
- return count;
-}
-
-static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- loff_t pos = *ppos;
- size_t total_size;
- ssize_t ret;
- struct drm_rect damage_area;
- int err = 0;
-
- if (info->screen_size)
- total_size = info->screen_size;
- else
- total_size = info->fix.smem_len;
-
- if (pos > total_size)
- return -EFBIG;
- if (count > total_size) {
- err = -EFBIG;
- count = total_size;
- }
- if (total_size - count < pos) {
- if (!err)
- err = -ENOSPC;
- count = total_size - pos;
- }
-
- /*
- * Copy to framebuffer even if we already logged an error. Emulates
- * the behavior of the original fbdev implementation.
- */
- if (drm_fbdev_use_iomem(info))
- ret = fb_write_screen_base(info, buf, count, pos);
- else
- ret = fb_write_screen_buffer(info, buf, count, pos);
-
- if (ret < 0)
- return ret; /* return last error, if any */
- else if (!ret)
- return err; /* return previous error, if any */
-
- *ppos += ret;
-
- drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area);
- drm_fb_helper_damage(info, damage_area.x1, damage_area.y1,
- drm_rect_width(&damage_area),
- drm_rect_height(&damage_area));
-
- return ret;
-}
-
-static void drm_fbdev_fb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
- if (drm_fbdev_use_iomem(info))
- drm_fb_helper_cfb_fillrect(info, rect);
- else
- drm_fb_helper_sys_fillrect(info, rect);
-}
-
-static void drm_fbdev_fb_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
- if (drm_fbdev_use_iomem(info))
- drm_fb_helper_cfb_copyarea(info, area);
- else
- drm_fb_helper_sys_copyarea(info, area);
-}
-
-static void drm_fbdev_fb_imageblit(struct fb_info *info,
- const struct fb_image *image)
-{
- if (drm_fbdev_use_iomem(info))
- drm_fb_helper_cfb_imageblit(info, image);
- else
- drm_fb_helper_sys_imageblit(info, image);
-}
-
-static const struct fb_ops drm_fbdev_fb_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_open = drm_fbdev_fb_open,
- .fb_release = drm_fbdev_fb_release,
- .fb_destroy = drm_fbdev_fb_destroy,
- .fb_mmap = drm_fbdev_fb_mmap,
- .fb_read = drm_fbdev_fb_read,
- .fb_write = drm_fbdev_fb_write,
- .fb_fillrect = drm_fbdev_fb_fillrect,
- .fb_copyarea = drm_fbdev_fb_copyarea,
- .fb_imageblit = drm_fbdev_fb_imageblit,
-};
-
-static struct fb_deferred_io drm_fbdev_defio = {
- .delay = HZ / 20,
- .deferred_io = drm_fb_helper_deferred_io,
-};
-
-/*
- * This function uses the client API to create a framebuffer backed by a dumb buffer.
- *
- * The _sys_ versions are used for &fb_ops.fb_read, fb_write, fb_fillrect,
- * fb_copyarea, fb_imageblit.
- */
-static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct drm_client_dev *client = &fb_helper->client;
- struct drm_device *dev = fb_helper->dev;
- struct drm_client_buffer *buffer;
- struct drm_framebuffer *fb;
- struct fb_info *fbi;
- u32 format;
- struct iosys_map map;
- int ret;
-
- drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
- sizes->surface_width, sizes->surface_height,
- sizes->surface_bpp);
-
- format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
- buffer = drm_client_framebuffer_create(client, sizes->surface_width,
- sizes->surface_height, format);
- if (IS_ERR(buffer))
- return PTR_ERR(buffer);
-
- fb_helper->buffer = buffer;
- fb_helper->fb = buffer->fb;
- fb = buffer->fb;
-
- fbi = drm_fb_helper_alloc_fbi(fb_helper);
- if (IS_ERR(fbi))
- return PTR_ERR(fbi);
-
- fbi->fbops = &drm_fbdev_fb_ops;
- fbi->screen_size = sizes->surface_height * fb->pitches[0];
- fbi->fix.smem_len = fbi->screen_size;
- fbi->flags = FBINFO_DEFAULT;
-
- drm_fb_helper_fill_info(fbi, fb_helper, sizes);
-
- if (drm_fbdev_use_shadow_fb(fb_helper)) {
- fbi->screen_buffer = vzalloc(fbi->screen_size);
- if (!fbi->screen_buffer)
- return -ENOMEM;
- fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
-
- fbi->fbdefio = &drm_fbdev_defio;
- fb_deferred_io_init(fbi);
- } else {
- /* buffer is mapped for HW framebuffer */
- ret = drm_client_buffer_vmap(fb_helper->buffer, &map);
- if (ret)
- return ret;
- if (map.is_iomem) {
- fbi->screen_base = map.vaddr_iomem;
- } else {
- fbi->screen_buffer = map.vaddr;
- fbi->flags |= FBINFO_VIRTFB;
- }
-
- /*
- * Shamelessly leak the physical address to user-space. As
- * page_to_phys() is undefined for I/O memory, warn in this
- * case.
- */
-#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
- if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0 &&
- !drm_WARN_ON_ONCE(dev, map.is_iomem))
- fbi->fix.smem_start =
- page_to_phys(virt_to_page(fbi->screen_buffer));
-#endif
- }
-
- return 0;
-}
-
-static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = {
- .fb_probe = drm_fb_helper_generic_probe,
-};
-
-static void drm_fbdev_client_unregister(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
-
- if (fb_helper->fbdev)
- /* drm_fbdev_fb_destroy() takes care of cleanup */
- drm_fb_helper_unregister_fbi(fb_helper);
- else
- drm_fbdev_release(fb_helper);
-}
-
-static int drm_fbdev_client_restore(struct drm_client_dev *client)
-{
- drm_fb_helper_lastclose(client->dev);
-
- return 0;
-}
-
-static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- struct drm_device *dev = client->dev;
- int ret;
-
- /* Setup is not retried if it has failed */
- if (!fb_helper->dev && fb_helper->funcs)
- return 0;
-
- if (dev->fb_helper)
- return drm_fb_helper_hotplug_event(dev->fb_helper);
-
- if (!dev->mode_config.num_connector) {
- drm_dbg_kms(dev, "No connectors found, will not create framebuffer!\n");
- return 0;
- }
-
- drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs);
-
- ret = drm_fb_helper_init(dev, fb_helper);
- if (ret)
- goto err;
-
- if (!drm_drv_uses_atomic_modeset(dev))
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(fb_helper, fb_helper->preferred_bpp);
- if (ret)
- goto err_cleanup;
-
- return 0;
-
-err_cleanup:
- drm_fbdev_cleanup(fb_helper);
-err:
- fb_helper->dev = NULL;
- fb_helper->fbdev = NULL;
-
- drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
-
- return ret;
-}
-
-static const struct drm_client_funcs drm_fbdev_client_funcs = {
- .owner = THIS_MODULE,
- .unregister = drm_fbdev_client_unregister,
- .restore = drm_fbdev_client_restore,
- .hotplug = drm_fbdev_client_hotplug,
-};
-
-/**
- * drm_fbdev_generic_setup() - Setup generic fbdev emulation
- * @dev: DRM device
- * @preferred_bpp: Preferred bits per pixel for the device.
- * @dev->mode_config.preferred_depth is used if this is zero.
- *
- * This function sets up generic fbdev emulation for drivers that supports
- * dumb buffers with a virtual address and that can be mmap'ed.
- * drm_fbdev_generic_setup() shall be called after the DRM driver registered
- * the new DRM device with drm_dev_register().
- *
- * Restore, hotplug events and teardown are all taken care of. Drivers that do
- * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
- * Simple drivers might use drm_mode_config_helper_suspend().
- *
- * Drivers that set the dirty callback on their framebuffer will get a shadow
- * fbdev buffer that is blitted onto the real buffer. This is done in order to
- * make deferred I/O work with all kinds of buffers. A shadow buffer can be
- * requested explicitly by setting struct drm_mode_config.prefer_shadow or
- * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is
- * required to use generic fbdev emulation with SHMEM helpers.
- *
- * This function is safe to call even when there are no connectors present.
- * Setup will be retried on the next hotplug event.
- *
- * The fbdev is destroyed by drm_dev_unregister().
- */
-void drm_fbdev_generic_setup(struct drm_device *dev,
- unsigned int preferred_bpp)
-{
- struct drm_fb_helper *fb_helper;
- int ret;
-
- drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
- drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
-
- if (!drm_fbdev_emulation)
- return;
-
- fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
- if (!fb_helper) {
- drm_err(dev, "Failed to allocate fb_helper\n");
- return;
- }
-
- ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
- if (ret) {
- kfree(fb_helper);
- drm_err(dev, "Failed to register client: %d\n", ret);
- return;
- }
-
- /*
- * FIXME: This mixes up depth with bpp, which results in a glorious
- * mess, resulting in some drivers picking wrong fbdev defaults and
- * others wrong preferred_depth defaults.
- */
- if (!preferred_bpp)
- preferred_bpp = dev->mode_config.preferred_depth;
- if (!preferred_bpp)
- preferred_bpp = 32;
- fb_helper->preferred_bpp = preferred_bpp;
-
- ret = drm_fbdev_client_hotplug(&fb_helper->client);
- if (ret)
- drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
- drm_client_register(&fb_helper->client);
-}
-EXPORT_SYMBOL(drm_fbdev_generic_setup);
diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c
new file mode 100644
index 000000000000..ab8695669279
--- /dev/null
+++ b/drivers/gpu/drm/drm_fbdev_generic.c
@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: MIT
+
+#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
+
+#include <drm/drm_fbdev_generic.h>
+
+static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_framebuffer *fb = fb_helper->fb;
+
+ return dev->mode_config.prefer_shadow_fbdev ||
+ dev->mode_config.prefer_shadow ||
+ fb->funcs->dirty;
+}
+
+/* @user: 1=userspace, 0=fbcon */
+static int drm_fbdev_fb_open(struct fb_info *info, int user)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ /* No need to take a ref for fbcon because it unbinds on unregister */
+ if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int drm_fbdev_fb_release(struct fb_info *info, int user)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ if (user)
+ module_put(fb_helper->dev->driver->fops->owner);
+
+ return 0;
+}
+
+static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
+{
+ struct fb_info *fbi = fb_helper->info;
+ void *shadow = NULL;
+
+ if (!fb_helper->dev)
+ return;
+
+ if (fbi) {
+ if (fbi->fbdefio)
+ fb_deferred_io_cleanup(fbi);
+ if (drm_fbdev_use_shadow_fb(fb_helper))
+ shadow = fbi->screen_buffer;
+ }
+
+ drm_fb_helper_fini(fb_helper);
+
+ if (shadow)
+ vfree(shadow);
+ else if (fb_helper->buffer)
+ drm_client_buffer_vunmap(fb_helper->buffer);
+
+ drm_client_framebuffer_delete(fb_helper->buffer);
+}
+
+static void drm_fbdev_release(struct drm_fb_helper *fb_helper)
+{
+ drm_fbdev_cleanup(fb_helper);
+ drm_client_release(&fb_helper->client);
+ kfree(fb_helper);
+}
+
+/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of
+ * unregister_framebuffer() or fb_release().
+ */
+static void drm_fbdev_fb_destroy(struct fb_info *info)
+{
+ drm_fbdev_release(info->par);
+}
+
+static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ if (drm_fbdev_use_shadow_fb(fb_helper))
+ return fb_deferred_io_mmap(info, vma);
+ else if (fb_helper->dev->driver->gem_prime_mmap)
+ return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
+ else
+ return -ENODEV;
+}
+
+static bool drm_fbdev_use_iomem(struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_client_buffer *buffer = fb_helper->buffer;
+
+ return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem;
+}
+
+static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t ret;
+
+ if (drm_fbdev_use_iomem(info))
+ ret = drm_fb_helper_cfb_read(info, buf, count, ppos);
+ else
+ ret = drm_fb_helper_sys_read(info, buf, count, ppos);
+
+ return ret;
+}
+
+static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t ret;
+
+ if (drm_fbdev_use_iomem(info))
+ ret = drm_fb_helper_cfb_write(info, buf, count, ppos);
+ else
+ ret = drm_fb_helper_sys_write(info, buf, count, ppos);
+
+ return ret;
+}
+
+static void drm_fbdev_fb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ if (drm_fbdev_use_iomem(info))
+ drm_fb_helper_cfb_fillrect(info, rect);
+ else
+ drm_fb_helper_sys_fillrect(info, rect);
+}
+
+static void drm_fbdev_fb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ if (drm_fbdev_use_iomem(info))
+ drm_fb_helper_cfb_copyarea(info, area);
+ else
+ drm_fb_helper_sys_copyarea(info, area);
+}
+
+static void drm_fbdev_fb_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ if (drm_fbdev_use_iomem(info))
+ drm_fb_helper_cfb_imageblit(info, image);
+ else
+ drm_fb_helper_sys_imageblit(info, image);
+}
+
+static const struct fb_ops drm_fbdev_fb_ops = {
+ .owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
+ .fb_open = drm_fbdev_fb_open,
+ .fb_release = drm_fbdev_fb_release,
+ .fb_destroy = drm_fbdev_fb_destroy,
+ .fb_mmap = drm_fbdev_fb_mmap,
+ .fb_read = drm_fbdev_fb_read,
+ .fb_write = drm_fbdev_fb_write,
+ .fb_fillrect = drm_fbdev_fb_fillrect,
+ .fb_copyarea = drm_fbdev_fb_copyarea,
+ .fb_imageblit = drm_fbdev_fb_imageblit,
+};
+
+static struct fb_deferred_io drm_fbdev_defio = {
+ .delay = HZ / 20,
+ .deferred_io = drm_fb_helper_deferred_io,
+};
+
+/*
+ * This function uses the client API to create a framebuffer backed by a dumb buffer.
+ */
+static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_client_dev *client = &fb_helper->client;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_client_buffer *buffer;
+ struct drm_framebuffer *fb;
+ struct fb_info *fbi;
+ u32 format;
+ struct iosys_map map;
+ int ret;
+
+ drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
+
+ format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
+ buffer = drm_client_framebuffer_create(client, sizes->surface_width,
+ sizes->surface_height, format);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ fb_helper->buffer = buffer;
+ fb_helper->fb = buffer->fb;
+ fb = buffer->fb;
+
+ fbi = drm_fb_helper_alloc_info(fb_helper);
+ if (IS_ERR(fbi))
+ return PTR_ERR(fbi);
+
+ fbi->fbops = &drm_fbdev_fb_ops;
+ fbi->screen_size = sizes->surface_height * fb->pitches[0];
+ fbi->fix.smem_len = fbi->screen_size;
+ fbi->flags = FBINFO_DEFAULT;
+
+ drm_fb_helper_fill_info(fbi, fb_helper, sizes);
+
+ if (drm_fbdev_use_shadow_fb(fb_helper)) {
+ fbi->screen_buffer = vzalloc(fbi->screen_size);
+ if (!fbi->screen_buffer)
+ return -ENOMEM;
+ fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
+
+ fbi->fbdefio = &drm_fbdev_defio;
+ fb_deferred_io_init(fbi);
+ } else {
+ /* buffer is mapped for HW framebuffer */
+ ret = drm_client_buffer_vmap(fb_helper->buffer, &map);
+ if (ret)
+ return ret;
+ if (map.is_iomem) {
+ fbi->screen_base = map.vaddr_iomem;
+ } else {
+ fbi->screen_buffer = map.vaddr;
+ fbi->flags |= FBINFO_VIRTFB;
+ }
+
+ /*
+ * Shamelessly leak the physical address to user-space. As
+ * page_to_phys() is undefined for I/O memory, warn in this
+ * case.
+ */
+#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
+ if (fb_helper->hint_leak_smem_start && fbi->fix.smem_start == 0 &&
+ !drm_WARN_ON_ONCE(dev, map.is_iomem))
+ fbi->fix.smem_start =
+ page_to_phys(virt_to_page(fbi->screen_buffer));
+#endif
+ }
+
+ return 0;
+}
+
+static void drm_fbdev_damage_blit_real(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip,
+ struct iosys_map *dst)
+{
+ struct drm_framebuffer *fb = fb_helper->fb;
+ size_t offset = clip->y1 * fb->pitches[0];
+ size_t len = clip->x2 - clip->x1;
+ unsigned int y;
+ void *src;
+
+ switch (drm_format_info_bpp(fb->format, 0)) {
+ case 1:
+ offset += clip->x1 / 8;
+ len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
+ break;
+ case 2:
+ offset += clip->x1 / 4;
+ len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
+ break;
+ case 4:
+ offset += clip->x1 / 2;
+ len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
+ break;
+ default:
+ offset += clip->x1 * fb->format->cpp[0];
+ len *= fb->format->cpp[0];
+ break;
+ }
+
+ src = fb_helper->info->screen_buffer + offset;
+ iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ iosys_map_memcpy_to(dst, 0, src, len);
+ iosys_map_incr(dst, fb->pitches[0]);
+ src += fb->pitches[0];
+ }
+}
+
+static int drm_fbdev_damage_blit(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip)
+{
+ struct drm_client_buffer *buffer = fb_helper->buffer;
+ struct iosys_map map, dst;
+ int ret;
+
+ /*
+ * We have to pin the client buffer to its current location while
+ * flushing the shadow buffer. In the general case, concurrent
+ * modesetting operations could try to move the buffer and would
+ * fail. The modeset has to be serialized by acquiring the reservation
+ * object of the underlying BO here.
+ *
+ * For fbdev emulation, we only have to protect against fbdev modeset
+ * operations. Nothing else will involve the client buffer's BO. So it
+ * is sufficient to acquire struct drm_fb_helper.lock here.
+ */
+ mutex_lock(&fb_helper->lock);
+
+ ret = drm_client_buffer_vmap(buffer, &map);
+ if (ret)
+ goto out;
+
+ dst = map;
+ drm_fbdev_damage_blit_real(fb_helper, clip, &dst);
+
+ drm_client_buffer_vunmap(buffer);
+
+out:
+ mutex_unlock(&fb_helper->lock);
+
+ return ret;
+}
+
+static int drm_fbdev_fb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
+{
+ struct drm_device *dev = helper->dev;
+ int ret;
+
+ if (!drm_fbdev_use_shadow_fb(helper))
+ return 0;
+
+ /* Call damage handlers only if necessary */
+ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+ return 0;
+
+ if (helper->buffer) {
+ ret = drm_fbdev_damage_blit(helper, clip);
+ if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
+ return ret;
+ }
+
+ if (helper->fb->funcs->dirty) {
+ ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+ if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = {
+ .fb_probe = drm_fbdev_fb_probe,
+ .fb_dirty = drm_fbdev_fb_dirty,
+};
+
+static void drm_fbdev_client_unregister(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ if (fb_helper->info)
+ /* drm_fbdev_fb_destroy() takes care of cleanup */
+ drm_fb_helper_unregister_info(fb_helper);
+ else
+ drm_fbdev_release(fb_helper);
+}
+
+static int drm_fbdev_client_restore(struct drm_client_dev *client)
+{
+ drm_fb_helper_lastclose(client->dev);
+
+ return 0;
+}
+
+static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+ struct drm_device *dev = client->dev;
+ int ret;
+
+ /* Setup is not retried if it has failed */
+ if (!fb_helper->dev && fb_helper->funcs)
+ return 0;
+
+ if (dev->fb_helper)
+ return drm_fb_helper_hotplug_event(dev->fb_helper);
+
+ if (!dev->mode_config.num_connector) {
+ drm_dbg_kms(dev, "No connectors found, will not create framebuffer!\n");
+ return 0;
+ }
+
+ drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs);
+
+ ret = drm_fb_helper_init(dev, fb_helper);
+ if (ret)
+ goto err;
+
+ if (!drm_drv_uses_atomic_modeset(dev))
+ drm_helper_disable_unused_functions(dev);
+
+ ret = drm_fb_helper_initial_config(fb_helper, fb_helper->preferred_bpp);
+ if (ret)
+ goto err_cleanup;
+
+ return 0;
+
+err_cleanup:
+ drm_fbdev_cleanup(fb_helper);
+err:
+ fb_helper->dev = NULL;
+ fb_helper->info = NULL;
+
+ drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
+
+ return ret;
+}
+
+static const struct drm_client_funcs drm_fbdev_client_funcs = {
+ .owner = THIS_MODULE,
+ .unregister = drm_fbdev_client_unregister,
+ .restore = drm_fbdev_client_restore,
+ .hotplug = drm_fbdev_client_hotplug,
+};
+
+/**
+ * drm_fbdev_generic_setup() - Setup generic fbdev emulation
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device.
+ * @dev->mode_config.preferred_depth is used if this is zero.
+ *
+ * This function sets up generic fbdev emulation for drivers that supports
+ * dumb buffers with a virtual address and that can be mmap'ed.
+ * drm_fbdev_generic_setup() shall be called after the DRM driver registered
+ * the new DRM device with drm_dev_register().
+ *
+ * Restore, hotplug events and teardown are all taken care of. Drivers that do
+ * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
+ * Simple drivers might use drm_mode_config_helper_suspend().
+ *
+ * Drivers that set the dirty callback on their framebuffer will get a shadow
+ * fbdev buffer that is blitted onto the real buffer. This is done in order to
+ * make deferred I/O work with all kinds of buffers. A shadow buffer can be
+ * requested explicitly by setting struct drm_mode_config.prefer_shadow or
+ * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is
+ * required to use generic fbdev emulation with SHMEM helpers.
+ *
+ * This function is safe to call even when there are no connectors present.
+ * Setup will be retried on the next hotplug event.
+ *
+ * The fbdev is destroyed by drm_dev_unregister().
+ */
+void drm_fbdev_generic_setup(struct drm_device *dev,
+ unsigned int preferred_bpp)
+{
+ struct drm_fb_helper *fb_helper;
+ int ret;
+
+ drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
+ drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
+
+ fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
+ if (!fb_helper)
+ return;
+
+ ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
+ if (ret) {
+ kfree(fb_helper);
+ drm_err(dev, "Failed to register client: %d\n", ret);
+ return;
+ }
+
+ /*
+ * FIXME: This mixes up depth with bpp, which results in a glorious
+ * mess, resulting in some drivers picking wrong fbdev defaults and
+ * others wrong preferred_depth defaults.
+ */
+ if (!preferred_bpp)
+ preferred_bpp = dev->mode_config.preferred_depth;
+ if (!preferred_bpp)
+ preferred_bpp = 32;
+ fb_helper->preferred_bpp = preferred_bpp;
+
+ ret = drm_fbdev_client_hotplug(&fb_helper->client);
+ if (ret)
+ drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
+
+ drm_client_register(&fb_helper->client);
+}
+EXPORT_SYMBOL(drm_fbdev_generic_setup);
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index b6a0110eb64a..e42800718f51 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -360,48 +360,43 @@ void drm_gem_reset_shadow_plane(struct drm_plane *plane)
EXPORT_SYMBOL(drm_gem_reset_shadow_plane);
/**
- * drm_gem_prepare_shadow_fb - prepares shadow framebuffers
+ * drm_gem_begin_shadow_fb_access - prepares shadow framebuffers for CPU access
* @plane: the plane
* @plane_state: the plane state of type struct drm_shadow_plane_state
*
- * This function implements struct &drm_plane_helper_funcs.prepare_fb. It
+ * This function implements struct &drm_plane_helper_funcs.begin_fb_access. It
* maps all buffer objects of the plane's framebuffer into kernel address
- * space and stores them in &struct drm_shadow_plane_state.map. The
- * framebuffer will be synchronized as part of the atomic commit.
+ * space and stores them in struct &drm_shadow_plane_state.map. The first data
+ * bytes are available in struct &drm_shadow_plane_state.data.
*
- * See drm_gem_cleanup_shadow_fb() for cleanup.
+ * See drm_gem_end_shadow_fb_access() for cleanup.
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
-int drm_gem_prepare_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state)
+int drm_gem_begin_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state)
{
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
- int ret;
if (!fb)
return 0;
- ret = drm_gem_plane_helper_prepare_fb(plane, plane_state);
- if (ret)
- return ret;
-
return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data);
}
-EXPORT_SYMBOL(drm_gem_prepare_shadow_fb);
+EXPORT_SYMBOL(drm_gem_begin_shadow_fb_access);
/**
- * drm_gem_cleanup_shadow_fb - releases shadow framebuffers
+ * drm_gem_end_shadow_fb_access - releases shadow framebuffers from CPU access
* @plane: the plane
* @plane_state: the plane state of type struct drm_shadow_plane_state
*
- * This function implements struct &drm_plane_helper_funcs.cleanup_fb.
- * This function unmaps all buffer objects of the plane's framebuffer.
+ * This function implements struct &drm_plane_helper_funcs.end_fb_access. It
+ * undoes all effects of drm_gem_begin_shadow_fb_access() in reverse order.
*
- * See drm_gem_prepare_shadow_fb() for more information.
+ * See drm_gem_begin_shadow_fb_access() for more information.
*/
-void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state)
+void drm_gem_end_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state)
{
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
@@ -411,46 +406,45 @@ void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *
drm_gem_fb_vunmap(fb, shadow_plane_state->map);
}
-EXPORT_SYMBOL(drm_gem_cleanup_shadow_fb);
+EXPORT_SYMBOL(drm_gem_end_shadow_fb_access);
/**
- * drm_gem_simple_kms_prepare_shadow_fb - prepares shadow framebuffers
+ * drm_gem_simple_kms_begin_shadow_fb_access - prepares shadow framebuffers for CPU access
* @pipe: the simple display pipe
* @plane_state: the plane state of type struct drm_shadow_plane_state
*
- * This function implements struct drm_simple_display_funcs.prepare_fb. It
- * maps all buffer objects of the plane's framebuffer into kernel address
- * space and stores them in struct drm_shadow_plane_state.map. The
- * framebuffer will be synchronized as part of the atomic commit.
+ * This function implements struct drm_simple_display_funcs.begin_fb_access.
*
- * See drm_gem_simple_kms_cleanup_shadow_fb() for cleanup.
+ * See drm_gem_begin_shadow_fb_access() for details and
+ * drm_gem_simple_kms_cleanup_shadow_fb() for cleanup.
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
-int drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state)
+int drm_gem_simple_kms_begin_shadow_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
{
- return drm_gem_prepare_shadow_fb(&pipe->plane, plane_state);
+ return drm_gem_begin_shadow_fb_access(&pipe->plane, plane_state);
}
-EXPORT_SYMBOL(drm_gem_simple_kms_prepare_shadow_fb);
+EXPORT_SYMBOL(drm_gem_simple_kms_begin_shadow_fb_access);
/**
- * drm_gem_simple_kms_cleanup_shadow_fb - releases shadow framebuffers
+ * drm_gem_simple_kms_end_shadow_fb_access - releases shadow framebuffers from CPU access
* @pipe: the simple display pipe
* @plane_state: the plane state of type struct drm_shadow_plane_state
*
- * This function implements struct drm_simple_display_funcs.cleanup_fb.
- * This function unmaps all buffer objects of the plane's framebuffer.
+ * This function implements struct drm_simple_display_funcs.end_fb_access.
+ * It undoes all effects of drm_gem_simple_kms_begin_shadow_fb_access() in
+ * reverse order.
*
- * See drm_gem_simple_kms_prepare_shadow_fb().
+ * See drm_gem_simple_kms_begin_shadow_fb_access().
*/
-void drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state)
+void drm_gem_simple_kms_end_shadow_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
{
- drm_gem_cleanup_shadow_fb(&pipe->plane, plane_state);
+ drm_gem_end_shadow_fb_access(&pipe->plane, plane_state);
}
-EXPORT_SYMBOL(drm_gem_simple_kms_cleanup_shadow_fb);
+EXPORT_SYMBOL(drm_gem_simple_kms_end_shadow_fb_access);
/**
* drm_gem_simple_kms_reset_shadow_plane - resets a shadow-buffered plane
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index e35e224e6303..e93533b86037 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <drm/drm_damage_helper.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 2fc21df709bc..bcd9611dabfd 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -36,7 +36,6 @@
#include <drm/drm_client.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 31233c6ae3c4..3ef420ec4534 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -285,6 +285,30 @@ static void drm_simple_kms_plane_cleanup_fb(struct drm_plane *plane,
pipe->funcs->cleanup_fb(pipe, state);
}
+static int drm_simple_kms_plane_begin_fb_access(struct drm_plane *plane,
+ struct drm_plane_state *new_plane_state)
+{
+ struct drm_simple_display_pipe *pipe;
+
+ pipe = container_of(plane, struct drm_simple_display_pipe, plane);
+ if (!pipe->funcs || !pipe->funcs->begin_fb_access)
+ return 0;
+
+ return pipe->funcs->begin_fb_access(pipe, new_plane_state);
+}
+
+static void drm_simple_kms_plane_end_fb_access(struct drm_plane *plane,
+ struct drm_plane_state *new_plane_state)
+{
+ struct drm_simple_display_pipe *pipe;
+
+ pipe = container_of(plane, struct drm_simple_display_pipe, plane);
+ if (!pipe->funcs || !pipe->funcs->end_fb_access)
+ return;
+
+ pipe->funcs->end_fb_access(pipe, new_plane_state);
+}
+
static bool drm_simple_kms_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
@@ -295,6 +319,8 @@ static bool drm_simple_kms_format_mod_supported(struct drm_plane *plane,
static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = {
.prepare_fb = drm_simple_kms_plane_prepare_fb,
.cleanup_fb = drm_simple_kms_plane_cleanup_fb,
+ .begin_fb_access = drm_simple_kms_plane_begin_fb_access,
+ .end_fb_access = drm_simple_kms_plane_end_fb_access,
.atomic_check = drm_simple_kms_plane_atomic_check,
.atomic_update = drm_simple_kms_plane_atomic_update,
};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index f32f4771dada..2bb4c25565dc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -6,13 +6,14 @@
#ifndef __ETNAVIV_DRV_H__
#define __ETNAVIV_DRV_H__
+#include <linux/io.h>
#include <linux/list.h>
#include <linux/mm_types.h>
#include <linux/sizes.h>
#include <linux/time64.h>
#include <linux/types.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_gem.h>
#include <drm/etnaviv_drm.h>
#include <drm/gpu_scheduler.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 767afd2bfa82..55c92372fca0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -49,6 +49,8 @@ static const struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_mmap = exynos_drm_fb_mmap,
+ .fb_read = drm_fb_helper_cfb_read,
+ .fb_write = drm_fb_helper_cfb_write,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
@@ -63,7 +65,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
unsigned int size = fb->width * fb->height * fb->format->cpp[0];
unsigned long offset;
- fbi = drm_fb_helper_alloc_fbi(helper);
+ fbi = drm_fb_helper_alloc_info(helper);
if (IS_ERR(fbi)) {
DRM_DEV_ERROR(to_dma_dev(helper->dev),
"failed to allocate fb info.\n");
@@ -201,7 +203,7 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
drm_framebuffer_remove(fb);
}
- drm_fb_helper_unregister_fbi(fb_helper);
+ drm_fb_helper_unregister_info(fb_helper);
drm_fb_helper_fini(fb_helper);
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index b4acc3422ba4..8579c7629f5e 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -20,7 +20,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 5f502a0048ab..8d5a37b8f110 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -147,6 +147,8 @@ static const struct fb_ops psbfb_unaccel_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psbfb_setcolreg,
+ .fb_read = drm_fb_helper_cfb_read,
+ .fb_write = drm_fb_helper_cfb_write,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
@@ -268,7 +270,7 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
memset(dev_priv->vram_addr + backing->offset, 0, size);
- info = drm_fb_helper_alloc_fbi(fb_helper);
+ info = drm_fb_helper_alloc_info(fb_helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto err_drm_gem_object_put;
@@ -383,7 +385,7 @@ static int psb_fbdev_destroy(struct drm_device *dev,
{
struct drm_framebuffer *fb = fb_helper->fb;
- drm_fb_helper_unregister_fbi(fb_helper);
+ drm_fb_helper_unregister_info(fb_helper);
drm_fb_helper_fini(fb_helper);
drm_framebuffer_unregister_private(fb);
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index 8d1630b8edac..d57dab104358 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -18,7 +18,7 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 5a2e1cac06b2..22053c613644 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 4a0cd22c10e2..f957552c6c50 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -19,7 +19,6 @@
#include <linux/i2c.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
struct hibmc_connector {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index c228091fb0e6..8c6d2ea2a472 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -11,6 +11,8 @@
* Jianhua Li <lijianhua@huawei.com>
*/
+#include <linux/io.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 73ee7f25f734..9c5d49bf40c9 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -19,7 +19,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index ca127ff797f7..427c20ba3404 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -11,7 +11,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_simple_kms_helper.h>
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
index 28e732f94bf2..6c6b57298797 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
@@ -8,7 +8,6 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index ab385d18ddcc..5575d7abdc09 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -124,6 +124,8 @@ static const struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_set_par = intel_fbdev_set_par,
+ .fb_read = drm_fb_helper_cfb_read,
+ .fb_write = drm_fb_helper_cfb_write,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
@@ -254,7 +256,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unlock;
}
- info = drm_fb_helper_alloc_fbi(helper);
+ info = drm_fb_helper_alloc_info(helper);
if (IS_ERR(info)) {
drm_err(&dev_priv->drm, "Failed to allocate fb_info (%pe)\n", info);
ret = PTR_ERR(info);
@@ -584,7 +586,7 @@ void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
if (!current_is_async())
intel_fbdev_sync(ifbdev);
- drm_fb_helper_unregister_fbi(&ifbdev->helper);
+ drm_fb_helper_unregister_info(&ifbdev->helper);
}
void intel_fbdev_fini(struct drm_i915_private *dev_priv)
@@ -627,7 +629,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
if (!ifbdev || !ifbdev->vma)
goto set_suspend;
- info = ifbdev->helper.fbdev;
+ info = ifbdev->helper.info;
if (synchronous) {
/* Flush any pending work to turn the console on, and then
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
index b4f82ebca532..18df3888b7f9 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-kms.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
@@ -7,7 +7,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
@@ -21,7 +21,6 @@ DEFINE_DRM_GEM_DMA_FOPS(dcss_cma_fops);
static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 8dd8b0f912af..e060fa6cbcb9 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -16,7 +16,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 41799011f73b..c45fc8f4744d 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/i2c.h>
#include <linux/media-bus-format.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
@@ -23,7 +24,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 6b34fac3f73a..d64ebd2cf15e 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -19,7 +19,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 06723b2e9b84..0fa0b590830b 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -8,6 +8,7 @@
#include <linux/component.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/videodev2.h>
@@ -16,7 +17,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index ab0515d2c420..3d5af44bf92d 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -32,7 +32,7 @@
#include <drm/drm_encoder.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_fb_dma_helper.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
@@ -1018,7 +1018,6 @@ static const struct drm_bridge_funcs ingenic_drm_bridge_funcs = {
static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = {
.fb_create = ingenic_drm_gem_fb_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -1629,7 +1628,11 @@ static int ingenic_drm_init(void)
return err;
}
- return platform_driver_register(&ingenic_drm_driver);
+ err = platform_driver_register(&ingenic_drm_driver);
+ if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && err)
+ platform_driver_unregister(ingenic_ipu_driver_ptr);
+
+ return err;
}
module_init(ingenic_drm_init);
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index 2382ccb3ee99..d29c678f6c91 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -15,7 +15,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index a42f63f6f957..d172a302f902 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -9,7 +9,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_dma_helper.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c
index cc9a4e965f77..9de24d9f0c96 100644
--- a/drivers/gpu/drm/logicvc/logicvc_drm.c
+++ b/drivers/gpu/drm/logicvc/logicvc_drm.c
@@ -17,7 +17,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/logicvc/logicvc_mode.c b/drivers/gpu/drm/logicvc/logicvc_mode.c
index d8207ffda1af..9971950ebd4e 100644
--- a/drivers/gpu/drm/logicvc/logicvc_mode.c
+++ b/drivers/gpu/drm/logicvc/logicvc_mode.c
@@ -10,7 +10,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mode_config.h>
@@ -26,7 +25,6 @@
static const struct drm_mode_config_funcs logicvc_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 1c4482ad507d..4aedb050d2a5 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -69,7 +69,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -203,7 +203,6 @@ DEFINE_DRM_GEM_DMA_FOPS(drm_fops);
static const struct drm_driver mcde_drm_driver = {
.driver_features =
DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- .lastclose = drm_fb_helper_lastclose,
.ioctls = NULL,
.fops = &drm_fops,
.name = "mcde",
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 91f58db5915f..39a42dc8fb85 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -17,7 +17,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_dma_helper.h>
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 3b24a924b7b9..79bfe3938d3c 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -18,7 +18,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index 5675bc2a92cf..3f73b211fa8e 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -116,9 +116,10 @@ static int meson_encoder_cvbs_get_modes(struct drm_bridge *bridge,
return i;
}
-static int meson_encoder_cvbs_mode_valid(struct drm_bridge *bridge,
- const struct drm_display_info *display_info,
- const struct drm_display_mode *mode)
+static enum drm_mode_status
+meson_encoder_cvbs_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *display_info,
+ const struct drm_display_mode *mode)
{
if (meson_cvbs_get_mode(mode))
return MODE_OK;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index ece6cd102dbb..976f0ab2006b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -11,6 +11,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index f0c2349404b4..9e604dbb8e44 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -18,7 +18,6 @@
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_plane.h>
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index b373e3000320..31e1e30cb52a 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -93,7 +93,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
goto fail;
}
- fbi = drm_fb_helper_alloc_fbi(helper);
+ fbi = drm_fb_helper_alloc_info(helper);
if (IS_ERR(fbi)) {
DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi);
@@ -182,7 +182,7 @@ void msm_fbdev_free(struct drm_device *dev)
DBG();
- drm_fb_helper_unregister_fbi(helper);
+ drm_fb_helper_unregister_info(helper);
drm_fb_helper_fini(helper);
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
index 075002ed6fb0..cc2ceb301b96 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
@@ -16,7 +16,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mode_config.h>
diff --git a/drivers/gpu/drm/mxsfb/lcdif_kms.c b/drivers/gpu/drm/mxsfb/lcdif_kms.c
index f0ad6e2a9352..262bc43b1079 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_kms.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_kms.c
@@ -5,6 +5,7 @@
* This code is based on drivers/gpu/drm/mxsfb/mxsfb*
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -332,6 +333,18 @@ static void lcdif_enable_controller(struct lcdif_drm_private *lcdif)
{
u32 reg;
+ /* Set FIFO Panic watermarks, low 1/3, high 2/3 . */
+ writel(FIELD_PREP(PANIC0_THRES_LOW_MASK, 1 * PANIC0_THRES_MAX / 3) |
+ FIELD_PREP(PANIC0_THRES_HIGH_MASK, 2 * PANIC0_THRES_MAX / 3),
+ lcdif->base + LCDC_V8_PANIC0_THRES);
+
+ /*
+ * Enable FIFO Panic, this does not generate interrupt, but
+ * boosts NoC priority based on FIFO Panic watermarks.
+ */
+ writel(INT_ENABLE_D1_PLANE_PANIC_EN,
+ lcdif->base + LCDC_V8_INT_ENABLE_D1);
+
reg = readl(lcdif->base + LCDC_V8_DISP_PARA);
reg |= DISP_PARA_DISP_ON;
writel(reg, lcdif->base + LCDC_V8_DISP_PARA);
@@ -359,6 +372,9 @@ static void lcdif_disable_controller(struct lcdif_drm_private *lcdif)
reg = readl(lcdif->base + LCDC_V8_DISP_PARA);
reg &= ~DISP_PARA_DISP_ON;
writel(reg, lcdif->base + LCDC_V8_DISP_PARA);
+
+ /* Disable FIFO Panic NoC priority booster. */
+ writel(0, lcdif->base + LCDC_V8_INT_ENABLE_D1);
}
static void lcdif_reset_block(struct lcdif_drm_private *lcdif)
diff --git a/drivers/gpu/drm/mxsfb/lcdif_regs.h b/drivers/gpu/drm/mxsfb/lcdif_regs.h
index fb74eb5ccbf1..c55dfb236c1d 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_regs.h
+++ b/drivers/gpu/drm/mxsfb/lcdif_regs.h
@@ -255,6 +255,7 @@
#define PANIC0_THRES_LOW_MASK GENMASK(24, 16)
#define PANIC0_THRES_HIGH_MASK GENMASK(8, 0)
+#define PANIC0_THRES_MAX 511
#define LCDIF_MIN_XRES 120
#define LCDIF_MIN_YRES 120
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index b29b332ed381..810edea0a31e 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -20,7 +20,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index a19f18b251f3..80f154b6adab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -34,6 +34,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
new file mode 100644
index 000000000000..e87de7906f78
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -0,0 +1,613 @@
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * David Airlie
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/screen_info.h>
+#include <linux/vga_switcheroo.h>
+#include <linux/console.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_atomic.h>
+
+#include "nouveau_drv.h"
+#include "nouveau_gem.h"
+#include "nouveau_bo.h"
+#include "nouveau_fbcon.h"
+#include "nouveau_chan.h"
+#include "nouveau_vmm.h"
+
+#include "nouveau_crtc.h"
+
+MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
+int nouveau_nofbaccel = 0;
+module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
+
+MODULE_PARM_DESC(fbcon_bpp, "fbcon bits-per-pixel (default: auto)");
+static int nouveau_fbcon_bpp;
+module_param_named(fbcon_bpp, nouveau_fbcon_bpp, int, 0400);
+
+static void
+nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct nouveau_fbdev *fbcon = info->par;
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
+ struct nvif_device *device = &drm->client.device;
+ int ret;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ ret = -ENODEV;
+ if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+ mutex_trylock(&drm->client.mutex)) {
+ if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
+ ret = nv04_fbcon_fillrect(info, rect);
+ else
+ if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
+ ret = nv50_fbcon_fillrect(info, rect);
+ else
+ ret = nvc0_fbcon_fillrect(info, rect);
+ mutex_unlock(&drm->client.mutex);
+ }
+
+ if (ret == 0)
+ return;
+
+ if (ret != -ENODEV)
+ nouveau_fbcon_gpu_lockup(info);
+ drm_fb_helper_cfb_fillrect(info, rect);
+}
+
+static void
+nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
+{
+ struct nouveau_fbdev *fbcon = info->par;
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
+ struct nvif_device *device = &drm->client.device;
+ int ret;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ ret = -ENODEV;
+ if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+ mutex_trylock(&drm->client.mutex)) {
+ if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
+ ret = nv04_fbcon_copyarea(info, image);
+ else
+ if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
+ ret = nv50_fbcon_copyarea(info, image);
+ else
+ ret = nvc0_fbcon_copyarea(info, image);
+ mutex_unlock(&drm->client.mutex);
+ }
+
+ if (ret == 0)
+ return;
+
+ if (ret != -ENODEV)
+ nouveau_fbcon_gpu_lockup(info);
+ drm_fb_helper_cfb_copyarea(info, image);
+}
+
+static void
+nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ struct nouveau_fbdev *fbcon = info->par;
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
+ struct nvif_device *device = &drm->client.device;
+ int ret;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ ret = -ENODEV;
+ if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+ mutex_trylock(&drm->client.mutex)) {
+ if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
+ ret = nv04_fbcon_imageblit(info, image);
+ else
+ if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
+ ret = nv50_fbcon_imageblit(info, image);
+ else
+ ret = nvc0_fbcon_imageblit(info, image);
+ mutex_unlock(&drm->client.mutex);
+ }
+
+ if (ret == 0)
+ return;
+
+ if (ret != -ENODEV)
+ nouveau_fbcon_gpu_lockup(info);
+ drm_fb_helper_cfb_imageblit(info, image);
+}
+
+static int
+nouveau_fbcon_sync(struct fb_info *info)
+{
+ struct nouveau_fbdev *fbcon = info->par;
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
+ struct nouveau_channel *chan = drm->channel;
+ int ret;
+
+ if (!chan || !chan->accel_done || in_interrupt() ||
+ info->state != FBINFO_STATE_RUNNING ||
+ info->flags & FBINFO_HWACCEL_DISABLED)
+ return 0;
+
+ if (!mutex_trylock(&drm->client.mutex))
+ return 0;
+
+ ret = nouveau_channel_idle(chan);
+ mutex_unlock(&drm->client.mutex);
+ if (ret) {
+ nouveau_fbcon_gpu_lockup(info);
+ return 0;
+ }
+
+ chan->accel_done = false;
+ return 0;
+}
+
+static int
+nouveau_fbcon_open(struct fb_info *info, int user)
+{
+ struct nouveau_fbdev *fbcon = info->par;
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
+ int ret = pm_runtime_get_sync(drm->dev->dev);
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put(drm->dev->dev);
+ return ret;
+ }
+ return 0;
+}
+
+static int
+nouveau_fbcon_release(struct fb_info *info, int user)
+{
+ struct nouveau_fbdev *fbcon = info->par;
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
+ pm_runtime_put(drm->dev->dev);
+ return 0;
+}
+
+static const struct fb_ops nouveau_fbcon_ops = {
+ .owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
+ .fb_open = nouveau_fbcon_open,
+ .fb_release = nouveau_fbcon_release,
+ .fb_fillrect = nouveau_fbcon_fillrect,
+ .fb_copyarea = nouveau_fbcon_copyarea,
+ .fb_imageblit = nouveau_fbcon_imageblit,
+ .fb_sync = nouveau_fbcon_sync,
+};
+
+static const struct fb_ops nouveau_fbcon_sw_ops = {
+ .owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
+ .fb_open = nouveau_fbcon_open,
+ .fb_release = nouveau_fbcon_release,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
+};
+
+void
+nouveau_fbcon_accel_save_disable(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ if (drm->fbcon && drm->fbcon->helper.info) {
+ drm->fbcon->saved_flags = drm->fbcon->helper.info->flags;
+ drm->fbcon->helper.info->flags |= FBINFO_HWACCEL_DISABLED;
+ }
+}
+
+void
+nouveau_fbcon_accel_restore(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ if (drm->fbcon && drm->fbcon->helper.info)
+ drm->fbcon->helper.info->flags = drm->fbcon->saved_flags;
+}
+
+static void
+nouveau_fbcon_accel_fini(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_fbdev *fbcon = drm->fbcon;
+ if (fbcon && drm->channel) {
+ console_lock();
+ if (fbcon->helper.info)
+ fbcon->helper.info->flags |= FBINFO_HWACCEL_DISABLED;
+ console_unlock();
+ nouveau_channel_idle(drm->channel);
+ nvif_object_dtor(&fbcon->twod);
+ nvif_object_dtor(&fbcon->blit);
+ nvif_object_dtor(&fbcon->gdi);
+ nvif_object_dtor(&fbcon->patt);
+ nvif_object_dtor(&fbcon->rop);
+ nvif_object_dtor(&fbcon->clip);
+ nvif_object_dtor(&fbcon->surf2d);
+ }
+}
+
+static void
+nouveau_fbcon_accel_init(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_fbdev *fbcon = drm->fbcon;
+ struct fb_info *info = fbcon->helper.info;
+ int ret;
+
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA)
+ ret = nv04_fbcon_accel_init(info);
+ else
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
+ ret = nv50_fbcon_accel_init(info);
+ else
+ ret = nvc0_fbcon_accel_init(info);
+
+ if (ret == 0)
+ info->fbops = &nouveau_fbcon_ops;
+}
+
+static void
+nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon)
+{
+ struct fb_info *info = fbcon->helper.info;
+ struct fb_fillrect rect;
+
+ /* Clear the entire fbcon. The drm will program every connector
+ * with it's preferred mode. If the sizes differ, one display will
+ * quite likely have garbage around the console.
+ */
+ rect.dx = rect.dy = 0;
+ rect.width = info->var.xres_virtual;
+ rect.height = info->var.yres_virtual;
+ rect.color = 0;
+ rect.rop = ROP_COPY;
+ info->fbops->fb_fillrect(info, &rect);
+}
+
+static int
+nouveau_fbcon_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct nouveau_fbdev *fbcon =
+ container_of(helper, struct nouveau_fbdev, helper);
+ struct drm_device *dev = fbcon->helper.dev;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvif_device *device = &drm->client.device;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct nouveau_channel *chan;
+ struct nouveau_bo *nvbo;
+ struct drm_mode_fb_cmd2 mode_cmd = {};
+ int ret;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3);
+ mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256);
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ ret = nouveau_gem_new(&drm->client, mode_cmd.pitches[0] *
+ mode_cmd.height, 0, NOUVEAU_GEM_DOMAIN_VRAM,
+ 0, 0x0000, &nvbo);
+ if (ret) {
+ NV_ERROR(drm, "failed to allocate framebuffer\n");
+ goto out;
+ }
+
+ ret = nouveau_framebuffer_new(dev, &mode_cmd, &nvbo->bo.base, &fb);
+ if (ret)
+ goto out_unref;
+
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
+ if (ret) {
+ NV_ERROR(drm, "failed to pin fb: %d\n", ret);
+ goto out_unref;
+ }
+
+ ret = nouveau_bo_map(nvbo);
+ if (ret) {
+ NV_ERROR(drm, "failed to map fb: %d\n", ret);
+ goto out_unpin;
+ }
+
+ chan = nouveau_nofbaccel ? NULL : drm->channel;
+ if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ ret = nouveau_vma_new(nvbo, chan->vmm, &fbcon->vma);
+ if (ret) {
+ NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
+ chan = NULL;
+ }
+ }
+
+ info = drm_fb_helper_alloc_info(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ goto out_unlock;
+ }
+
+ /* setup helper */
+ fbcon->helper.fb = fb;
+
+ if (!chan)
+ info->flags = FBINFO_HWACCEL_DISABLED;
+ else
+ info->flags = FBINFO_HWACCEL_COPYAREA |
+ FBINFO_HWACCEL_FILLRECT |
+ FBINFO_HWACCEL_IMAGEBLIT;
+ info->fbops = &nouveau_fbcon_sw_ops;
+ info->fix.smem_start = nvbo->bo.resource->bus.offset;
+ info->fix.smem_len = nvbo->bo.base.size;
+
+ info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
+ info->screen_size = nvbo->bo.base.size;
+
+ drm_fb_helper_fill_info(info, &fbcon->helper, sizes);
+
+ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+
+ if (chan)
+ nouveau_fbcon_accel_init(dev);
+ nouveau_fbcon_zfill(dev, fbcon);
+
+ /* To allow resizeing without swapping buffers */
+ NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
+ fb->width, fb->height, nvbo->offset, nvbo);
+
+ if (dev_is_pci(dev->dev))
+ vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), info);
+
+ return 0;
+
+out_unlock:
+ if (chan)
+ nouveau_vma_del(&fbcon->vma);
+ nouveau_bo_unmap(nvbo);
+out_unpin:
+ nouveau_bo_unpin(nvbo);
+out_unref:
+ nouveau_bo_ref(NULL, &nvbo);
+out:
+ return ret;
+}
+
+static int
+nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
+{
+ struct drm_framebuffer *fb = fbcon->helper.fb;
+ struct nouveau_bo *nvbo;
+
+ drm_fb_helper_unregister_info(&fbcon->helper);
+ drm_fb_helper_fini(&fbcon->helper);
+
+ if (fb && fb->obj[0]) {
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ nouveau_vma_del(&fbcon->vma);
+ nouveau_bo_unmap(nvbo);
+ nouveau_bo_unpin(nvbo);
+ drm_framebuffer_put(fb);
+ }
+
+ return 0;
+}
+
+void nouveau_fbcon_gpu_lockup(struct fb_info *info)
+{
+ struct nouveau_fbdev *fbcon = info->par;
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
+
+ NV_ERROR(drm, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+}
+
+static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
+ .fb_probe = nouveau_fbcon_create,
+};
+
+static void
+nouveau_fbcon_set_suspend_work(struct work_struct *work)
+{
+ struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
+ int state = READ_ONCE(drm->fbcon_new_state);
+
+ if (state == FBINFO_STATE_RUNNING)
+ pm_runtime_get_sync(drm->dev->dev);
+
+ console_lock();
+ if (state == FBINFO_STATE_RUNNING)
+ nouveau_fbcon_accel_restore(drm->dev);
+ drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
+ if (state != FBINFO_STATE_RUNNING)
+ nouveau_fbcon_accel_save_disable(drm->dev);
+ console_unlock();
+
+ if (state == FBINFO_STATE_RUNNING) {
+ nouveau_fbcon_hotplug_resume(drm->fbcon);
+ pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_put_autosuspend(drm->dev->dev);
+ }
+}
+
+void
+nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (!drm->fbcon)
+ return;
+
+ drm->fbcon_new_state = state;
+ /* Since runtime resume can happen as a result of a sysfs operation,
+ * it's possible we already have the console locked. So handle fbcon
+ * init/deinit from a seperate work thread
+ */
+ schedule_work(&drm->fbcon_work);
+}
+
+void
+nouveau_fbcon_output_poll_changed(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_fbdev *fbcon = drm->fbcon;
+ int ret;
+
+ if (!fbcon)
+ return;
+
+ mutex_lock(&fbcon->hotplug_lock);
+
+ ret = pm_runtime_get(dev->dev);
+ if (ret == 1 || ret == -EACCES) {
+ drm_fb_helper_hotplug_event(&fbcon->helper);
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+ } else if (ret == 0) {
+ /* If the GPU was already in the process of suspending before
+ * this event happened, then we can't block here as we'll
+ * deadlock the runtime pmops since they wait for us to
+ * finish. So, just defer this event for when we runtime
+ * resume again. It will be handled by fbcon_work.
+ */
+ NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n");
+ fbcon->hotplug_waiting = true;
+ pm_runtime_put_noidle(drm->dev->dev);
+ } else {
+ DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n",
+ ret);
+ }
+
+ mutex_unlock(&fbcon->hotplug_lock);
+}
+
+void
+nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon)
+{
+ struct nouveau_drm *drm;
+
+ if (!fbcon)
+ return;
+ drm = nouveau_drm(fbcon->helper.dev);
+
+ mutex_lock(&fbcon->hotplug_lock);
+ if (fbcon->hotplug_waiting) {
+ fbcon->hotplug_waiting = false;
+
+ NV_DEBUG(drm, "Handling deferred fbcon HPD events\n");
+ drm_fb_helper_hotplug_event(&fbcon->helper);
+ }
+ mutex_unlock(&fbcon->hotplug_lock);
+}
+
+int
+nouveau_fbcon_init(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_fbdev *fbcon;
+ int preferred_bpp = nouveau_fbcon_bpp;
+ int ret;
+
+ if (!dev->mode_config.num_crtc ||
+ (to_pci_dev(dev->dev)->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ return 0;
+
+ fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
+ if (!fbcon)
+ return -ENOMEM;
+
+ drm->fbcon = fbcon;
+ INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
+ mutex_init(&fbcon->hotplug_lock);
+
+ drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
+
+ ret = drm_fb_helper_init(dev, &fbcon->helper);
+ if (ret)
+ goto free;
+
+ if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) {
+ if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
+ preferred_bpp = 8;
+ else
+ if (drm->client.device.info.ram_size <= 64 * 1024 * 1024)
+ preferred_bpp = 16;
+ else
+ preferred_bpp = 32;
+ }
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ if (!drm_drv_uses_atomic_modeset(dev))
+ drm_helper_disable_unused_functions(dev);
+
+ ret = drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
+ if (ret)
+ goto fini;
+
+ if (fbcon->helper.info)
+ fbcon->helper.info->pixmap.buf_align = 4;
+ return 0;
+
+fini:
+ drm_fb_helper_fini(&fbcon->helper);
+free:
+ kfree(fbcon);
+ drm->fbcon = NULL;
+ return ret;
+}
+
+void
+nouveau_fbcon_fini(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (!drm->fbcon)
+ return;
+
+ drm_kms_helper_poll_fini(dev);
+ nouveau_fbcon_accel_fini(dev);
+ nouveau_fbcon_destroy(dev, drm->fbcon);
+ kfree(drm->fbcon);
+ drm->fbcon = NULL;
+}
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index ed67dd25794c..98d8758048fc 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -38,7 +38,7 @@ static struct drm_fb_helper *get_fb(struct fb_info *fbi);
static void pan_worker(struct work_struct *work)
{
struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work);
- struct fb_info *fbi = fbdev->base.fbdev;
+ struct fb_info *fbi = fbdev->base.info;
int npages;
/* DMM roll shifts in 4K pages: */
@@ -161,7 +161,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
goto fail;
}
- fbi = drm_fb_helper_alloc_fbi(helper);
+ fbi = drm_fb_helper_alloc_info(helper);
if (IS_ERR(fbi)) {
dev_err(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi);
@@ -272,7 +272,7 @@ void omap_fbdev_fini(struct drm_device *dev)
if (!helper)
return;
- drm_fb_helper_unregister_fbi(helper);
+ drm_fb_helper_unregister_info(helper);
drm_fb_helper_fini(helper);
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index a582ddd583c2..6f6cc6662b2e 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -203,6 +203,16 @@ config DRM_PANEL_INNOLUX_P079ZCA
24 bit RGB per pixel. It provides a MIPI DSI interface to
the host and has a built-in LED backlight.
+config DRM_PANEL_JADARD_JD9365DA_H3
+ tristate "Jadard JD9365DA-H3 WXGA DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Jadard JD9365DA-H3
+ WXGA MIPI DSI panel. The panel support TFT dot matrix LCD with
+ 800RGBx1280 dots at maximum.
+
config DRM_PANEL_JDI_LT070ME05000
tristate "JDI LT070ME05000 WUXGA DSI panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 34e717382dbb..af0b1ebdbac8 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_EJ030NA) += panel-innolux-ej030na.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
+obj-$(CONFIG_DRM_PANEL_JADARD_JD9365DA_H3) += panel-jadard-jd9365da-h3.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_JDI_R63452) += panel-jdi-fhd-r63452.o
obj-$(CONFIG_DRM_PANEL_KHADAS_TS050) += panel-khadas-ts050.o
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
index 39dc40cf681f..384a724f2822 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
@@ -18,6 +18,7 @@
* Copyright 2018 David Lechner <david@lechnology.com>
*/
+#include <linux/backlight.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
@@ -30,7 +31,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
new file mode 100644
index 000000000000..48c1702a863b
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2019 Radxa Limited
+ * Copyright (c) 2022 Edgeble AI Technologies Pvt. Ltd.
+ *
+ * Author:
+ * - Jagan Teki <jagan@amarulasolutions.com>
+ * - Stephen Chen <stephen@radxa.com>
+ */
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#define JD9365DA_INIT_CMD_LEN 2
+
+struct jadard_init_cmd {
+ u8 data[JD9365DA_INIT_CMD_LEN];
+};
+
+struct jadard_panel_desc {
+ const struct drm_display_mode mode;
+ unsigned int lanes;
+ enum mipi_dsi_pixel_format format;
+ const struct jadard_init_cmd *init_cmds;
+ u32 num_init_cmds;
+};
+
+struct jadard {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ const struct jadard_panel_desc *desc;
+
+ struct regulator *vdd;
+ struct regulator *vccio;
+ struct gpio_desc *reset;
+};
+
+static inline struct jadard *panel_to_jadard(struct drm_panel *panel)
+{
+ return container_of(panel, struct jadard, panel);
+}
+
+static int jadard_enable(struct drm_panel *panel)
+{
+ struct device *dev = panel->dev;
+ struct jadard *jadard = panel_to_jadard(panel);
+ const struct jadard_panel_desc *desc = jadard->desc;
+ struct mipi_dsi_device *dsi = jadard->dsi;
+ unsigned int i;
+ int err;
+
+ msleep(10);
+
+ for (i = 0; i < desc->num_init_cmds; i++) {
+ const struct jadard_init_cmd *cmd = &desc->init_cmds[i];
+
+ err = mipi_dsi_dcs_write_buffer(dsi, cmd->data, JD9365DA_INIT_CMD_LEN);
+ if (err < 0)
+ return err;
+ }
+
+ msleep(120);
+
+ err = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (err < 0)
+ DRM_DEV_ERROR(dev, "failed to exit sleep mode ret = %d\n", err);
+
+ err = mipi_dsi_dcs_set_display_on(dsi);
+ if (err < 0)
+ DRM_DEV_ERROR(dev, "failed to set display on ret = %d\n", err);
+
+ return 0;
+}
+
+static int jadard_disable(struct drm_panel *panel)
+{
+ struct device *dev = panel->dev;
+ struct jadard *jadard = panel_to_jadard(panel);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(jadard->dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "failed to set display off: %d\n", ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(jadard->dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "failed to enter sleep mode: %d\n", ret);
+
+ return 0;
+}
+
+static int jadard_prepare(struct drm_panel *panel)
+{
+ struct jadard *jadard = panel_to_jadard(panel);
+ int ret;
+
+ ret = regulator_enable(jadard->vccio);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(jadard->vdd);
+ if (ret)
+ return ret;
+
+ gpiod_set_value(jadard->reset, 1);
+ msleep(5);
+
+ gpiod_set_value(jadard->reset, 0);
+ msleep(10);
+
+ gpiod_set_value(jadard->reset, 1);
+ msleep(120);
+
+ return 0;
+}
+
+static int jadard_unprepare(struct drm_panel *panel)
+{
+ struct jadard *jadard = panel_to_jadard(panel);
+
+ gpiod_set_value(jadard->reset, 1);
+ msleep(120);
+
+ regulator_disable(jadard->vdd);
+ regulator_disable(jadard->vccio);
+
+ return 0;
+}
+
+static int jadard_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct jadard *jadard = panel_to_jadard(panel);
+ const struct drm_display_mode *desc_mode = &jadard->desc->mode;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, desc_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(&jadard->dsi->dev, "failed to add mode %ux%ux@%u\n",
+ desc_mode->hdisplay, desc_mode->vdisplay,
+ drm_mode_vrefresh(desc_mode));
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs jadard_funcs = {
+ .disable = jadard_disable,
+ .unprepare = jadard_unprepare,
+ .prepare = jadard_prepare,
+ .enable = jadard_enable,
+ .get_modes = jadard_get_modes,
+};
+
+static const struct jadard_init_cmd cz101b4001_init_cmds[] = {
+ { .data = { 0xE0, 0x00 } },
+ { .data = { 0xE1, 0x93 } },
+ { .data = { 0xE2, 0x65 } },
+ { .data = { 0xE3, 0xF8 } },
+ { .data = { 0x80, 0x03 } },
+ { .data = { 0xE0, 0x01 } },
+ { .data = { 0x00, 0x00 } },
+ { .data = { 0x01, 0x3B } },
+ { .data = { 0x0C, 0x74 } },
+ { .data = { 0x17, 0x00 } },
+ { .data = { 0x18, 0xAF } },
+ { .data = { 0x19, 0x00 } },
+ { .data = { 0x1A, 0x00 } },
+ { .data = { 0x1B, 0xAF } },
+ { .data = { 0x1C, 0x00 } },
+ { .data = { 0x35, 0x26 } },
+ { .data = { 0x37, 0x09 } },
+ { .data = { 0x38, 0x04 } },
+ { .data = { 0x39, 0x00 } },
+ { .data = { 0x3A, 0x01 } },
+ { .data = { 0x3C, 0x78 } },
+ { .data = { 0x3D, 0xFF } },
+ { .data = { 0x3E, 0xFF } },
+ { .data = { 0x3F, 0x7F } },
+ { .data = { 0x40, 0x06 } },
+ { .data = { 0x41, 0xA0 } },
+ { .data = { 0x42, 0x81 } },
+ { .data = { 0x43, 0x14 } },
+ { .data = { 0x44, 0x23 } },
+ { .data = { 0x45, 0x28 } },
+ { .data = { 0x55, 0x02 } },
+ { .data = { 0x57, 0x69 } },
+ { .data = { 0x59, 0x0A } },
+ { .data = { 0x5A, 0x2A } },
+ { .data = { 0x5B, 0x17 } },
+ { .data = { 0x5D, 0x7F } },
+ { .data = { 0x5E, 0x6B } },
+ { .data = { 0x5F, 0x5C } },
+ { .data = { 0x60, 0x4F } },
+ { .data = { 0x61, 0x4D } },
+ { .data = { 0x62, 0x3F } },
+ { .data = { 0x63, 0x42 } },
+ { .data = { 0x64, 0x2B } },
+ { .data = { 0x65, 0x44 } },
+ { .data = { 0x66, 0x43 } },
+ { .data = { 0x67, 0x43 } },
+ { .data = { 0x68, 0x63 } },
+ { .data = { 0x69, 0x52 } },
+ { .data = { 0x6A, 0x5A } },
+ { .data = { 0x6B, 0x4F } },
+ { .data = { 0x6C, 0x4E } },
+ { .data = { 0x6D, 0x20 } },
+ { .data = { 0x6E, 0x0F } },
+ { .data = { 0x6F, 0x00 } },
+ { .data = { 0x70, 0x7F } },
+ { .data = { 0x71, 0x6B } },
+ { .data = { 0x72, 0x5C } },
+ { .data = { 0x73, 0x4F } },
+ { .data = { 0x74, 0x4D } },
+ { .data = { 0x75, 0x3F } },
+ { .data = { 0x76, 0x42 } },
+ { .data = { 0x77, 0x2B } },
+ { .data = { 0x78, 0x44 } },
+ { .data = { 0x79, 0x43 } },
+ { .data = { 0x7A, 0x43 } },
+ { .data = { 0x7B, 0x63 } },
+ { .data = { 0x7C, 0x52 } },
+ { .data = { 0x7D, 0x5A } },
+ { .data = { 0x7E, 0x4F } },
+ { .data = { 0x7F, 0x4E } },
+ { .data = { 0x80, 0x20 } },
+ { .data = { 0x81, 0x0F } },
+ { .data = { 0x82, 0x00 } },
+ { .data = { 0xE0, 0x02 } },
+ { .data = { 0x00, 0x02 } },
+ { .data = { 0x01, 0x02 } },
+ { .data = { 0x02, 0x00 } },
+ { .data = { 0x03, 0x00 } },
+ { .data = { 0x04, 0x1E } },
+ { .data = { 0x05, 0x1E } },
+ { .data = { 0x06, 0x1F } },
+ { .data = { 0x07, 0x1F } },
+ { .data = { 0x08, 0x1F } },
+ { .data = { 0x09, 0x17 } },
+ { .data = { 0x0A, 0x17 } },
+ { .data = { 0x0B, 0x37 } },
+ { .data = { 0x0C, 0x37 } },
+ { .data = { 0x0D, 0x47 } },
+ { .data = { 0x0E, 0x47 } },
+ { .data = { 0x0F, 0x45 } },
+ { .data = { 0x10, 0x45 } },
+ { .data = { 0x11, 0x4B } },
+ { .data = { 0x12, 0x4B } },
+ { .data = { 0x13, 0x49 } },
+ { .data = { 0x14, 0x49 } },
+ { .data = { 0x15, 0x1F } },
+ { .data = { 0x16, 0x01 } },
+ { .data = { 0x17, 0x01 } },
+ { .data = { 0x18, 0x00 } },
+ { .data = { 0x19, 0x00 } },
+ { .data = { 0x1A, 0x1E } },
+ { .data = { 0x1B, 0x1E } },
+ { .data = { 0x1C, 0x1F } },
+ { .data = { 0x1D, 0x1F } },
+ { .data = { 0x1E, 0x1F } },
+ { .data = { 0x1F, 0x17 } },
+ { .data = { 0x20, 0x17 } },
+ { .data = { 0x21, 0x37 } },
+ { .data = { 0x22, 0x37 } },
+ { .data = { 0x23, 0x46 } },
+ { .data = { 0x24, 0x46 } },
+ { .data = { 0x25, 0x44 } },
+ { .data = { 0x26, 0x44 } },
+ { .data = { 0x27, 0x4A } },
+ { .data = { 0x28, 0x4A } },
+ { .data = { 0x29, 0x48 } },
+ { .data = { 0x2A, 0x48 } },
+ { .data = { 0x2B, 0x1F } },
+ { .data = { 0x2C, 0x01 } },
+ { .data = { 0x2D, 0x01 } },
+ { .data = { 0x2E, 0x00 } },
+ { .data = { 0x2F, 0x00 } },
+ { .data = { 0x30, 0x1F } },
+ { .data = { 0x31, 0x1F } },
+ { .data = { 0x32, 0x1E } },
+ { .data = { 0x33, 0x1E } },
+ { .data = { 0x34, 0x1F } },
+ { .data = { 0x35, 0x17 } },
+ { .data = { 0x36, 0x17 } },
+ { .data = { 0x37, 0x37 } },
+ { .data = { 0x38, 0x37 } },
+ { .data = { 0x39, 0x08 } },
+ { .data = { 0x3A, 0x08 } },
+ { .data = { 0x3B, 0x0A } },
+ { .data = { 0x3C, 0x0A } },
+ { .data = { 0x3D, 0x04 } },
+ { .data = { 0x3E, 0x04 } },
+ { .data = { 0x3F, 0x06 } },
+ { .data = { 0x40, 0x06 } },
+ { .data = { 0x41, 0x1F } },
+ { .data = { 0x42, 0x02 } },
+ { .data = { 0x43, 0x02 } },
+ { .data = { 0x44, 0x00 } },
+ { .data = { 0x45, 0x00 } },
+ { .data = { 0x46, 0x1F } },
+ { .data = { 0x47, 0x1F } },
+ { .data = { 0x48, 0x1E } },
+ { .data = { 0x49, 0x1E } },
+ { .data = { 0x4A, 0x1F } },
+ { .data = { 0x4B, 0x17 } },
+ { .data = { 0x4C, 0x17 } },
+ { .data = { 0x4D, 0x37 } },
+ { .data = { 0x4E, 0x37 } },
+ { .data = { 0x4F, 0x09 } },
+ { .data = { 0x50, 0x09 } },
+ { .data = { 0x51, 0x0B } },
+ { .data = { 0x52, 0x0B } },
+ { .data = { 0x53, 0x05 } },
+ { .data = { 0x54, 0x05 } },
+ { .data = { 0x55, 0x07 } },
+ { .data = { 0x56, 0x07 } },
+ { .data = { 0x57, 0x1F } },
+ { .data = { 0x58, 0x40 } },
+ { .data = { 0x5B, 0x30 } },
+ { .data = { 0x5C, 0x16 } },
+ { .data = { 0x5D, 0x34 } },
+ { .data = { 0x5E, 0x05 } },
+ { .data = { 0x5F, 0x02 } },
+ { .data = { 0x63, 0x00 } },
+ { .data = { 0x64, 0x6A } },
+ { .data = { 0x67, 0x73 } },
+ { .data = { 0x68, 0x1D } },
+ { .data = { 0x69, 0x08 } },
+ { .data = { 0x6A, 0x6A } },
+ { .data = { 0x6B, 0x08 } },
+ { .data = { 0x6C, 0x00 } },
+ { .data = { 0x6D, 0x00 } },
+ { .data = { 0x6E, 0x00 } },
+ { .data = { 0x6F, 0x88 } },
+ { .data = { 0x75, 0xFF } },
+ { .data = { 0x77, 0xDD } },
+ { .data = { 0x78, 0x3F } },
+ { .data = { 0x79, 0x15 } },
+ { .data = { 0x7A, 0x17 } },
+ { .data = { 0x7D, 0x14 } },
+ { .data = { 0x7E, 0x82 } },
+ { .data = { 0xE0, 0x04 } },
+ { .data = { 0x00, 0x0E } },
+ { .data = { 0x02, 0xB3 } },
+ { .data = { 0x09, 0x61 } },
+ { .data = { 0x0E, 0x48 } },
+ { .data = { 0xE0, 0x00 } },
+ { .data = { 0xE6, 0x02 } },
+ { .data = { 0xE7, 0x0C } },
+};
+
+static const struct jadard_panel_desc cz101b4001_desc = {
+ .mode = {
+ .clock = 70000,
+
+ .hdisplay = 800,
+ .hsync_start = 800 + 40,
+ .hsync_end = 800 + 40 + 18,
+ .htotal = 800 + 40 + 18 + 20,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 20,
+ .vsync_end = 1280 + 20 + 4,
+ .vtotal = 1280 + 20 + 4 + 20,
+
+ .width_mm = 62,
+ .height_mm = 110,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_cmds = cz101b4001_init_cmds,
+ .num_init_cmds = ARRAY_SIZE(cz101b4001_init_cmds),
+};
+
+static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct jadard_panel_desc *desc;
+ struct jadard *jadard;
+ int ret;
+
+ jadard = devm_kzalloc(&dsi->dev, sizeof(*jadard), GFP_KERNEL);
+ if (!jadard)
+ return -ENOMEM;
+
+ desc = of_device_get_match_data(dev);
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_NO_EOT_PACKET;
+ dsi->format = desc->format;
+ dsi->lanes = desc->lanes;
+
+ jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(jadard->reset)) {
+ DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n");
+ return PTR_ERR(jadard->reset);
+ }
+
+ jadard->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(jadard->vdd)) {
+ DRM_DEV_ERROR(&dsi->dev, "failed to get vdd regulator\n");
+ return PTR_ERR(jadard->vdd);
+ }
+
+ jadard->vccio = devm_regulator_get(dev, "vccio");
+ if (IS_ERR(jadard->vccio)) {
+ DRM_DEV_ERROR(&dsi->dev, "failed to get vccio regulator\n");
+ return PTR_ERR(jadard->vccio);
+ }
+
+ drm_panel_init(&jadard->panel, dev, &jadard_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&jadard->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&jadard->panel);
+
+ mipi_dsi_set_drvdata(dsi, jadard);
+ jadard->dsi = dsi;
+ jadard->desc = desc;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0)
+ drm_panel_remove(&jadard->panel);
+
+ return ret;
+}
+
+static void jadard_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ struct jadard *jadard = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&jadard->panel);
+}
+
+static const struct of_device_id jadard_of_match[] = {
+ { .compatible = "chongzhou,cz101b4001", .data = &cz101b4001_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, jadard_of_match);
+
+static struct mipi_dsi_driver jadard_driver = {
+ .probe = jadard_dsi_probe,
+ .remove = jadard_dsi_remove,
+ .driver = {
+ .name = "jadard-jd9365da",
+ .of_match_table = jadard_of_match,
+ },
+};
+module_mipi_dsi_driver(jadard_driver);
+
+MODULE_AUTHOR("Jagan Teki <jagan@edgeble.ai>");
+MODULE_AUTHOR("Stephen Chen <stephen@radxa.com>");
+MODULE_DESCRIPTION("Jadard JD9365DA-H3 WXGA DSI panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index eb25eedb5ee0..00deba0b7271 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -48,7 +48,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 3044ca948ce2..a3b83f89e061 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -37,6 +37,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_file.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_module.h>
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 432758ad39a3..76f060810f63 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -38,7 +38,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_gem.h>
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index cc6754d88b81..c1710ed1cab8 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -80,6 +80,8 @@ static const struct fb_ops radeonfb_ops = {
DRM_FB_HELPER_DEFAULT_OPS,
.fb_open = radeonfb_open,
.fb_release = radeonfb_release,
+ .fb_read = drm_fb_helper_cfb_read,
+ .fb_write = drm_fb_helper_cfb_write,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
@@ -243,7 +245,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
rbo = gem_to_radeon_bo(gobj);
/* okay we have an object now allocate the framebuffer */
- info = drm_fb_helper_alloc_fbi(helper);
+ info = drm_fb_helper_alloc_info(helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto out;
@@ -309,7 +311,7 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
{
struct drm_framebuffer *fb = &rfbdev->fb;
- drm_fb_helper_unregister_fbi(&rfbdev->helper);
+ drm_fb_helper_unregister_info(&rfbdev->helper);
if (fb->obj[0]) {
radeonfb_destroy_pinned_object(fb->obj[0]);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index a2776f1d6f2c..d003e8d9e7a2 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -20,7 +20,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 813f9f8c8698..6e0788d14c10 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -17,7 +17,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 1641440837af..aeb03a57240f 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -9,10 +9,10 @@
#ifndef _ROCKCHIP_DRM_DRV_H
#define _ROCKCHIP_DRM_DRV_H
-#include <drm/drm_fb_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem.h>
+#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/component.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 409eaa1bf092..cfe8b793d344 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -9,7 +9,6 @@
#include <drm/drm.h>
#include <drm/drm_atomic.h>
#include <drm/drm_damage_helper.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -72,7 +71,6 @@ rockchip_fb_create(struct drm_device *dev, struct drm_file *file,
static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
.fb_create = rockchip_fb_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 614e97aaac80..bf1120e0f573 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -9,6 +9,7 @@
#include <linux/vmalloc.h>
#include <drm/drm.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_prime.h>
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index c97bc1149663..fe09e5be79bd 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -140,6 +140,73 @@ bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
return true;
}
+static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
+{
+ struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
+
+ drm_sched_fence_finished(job->s_fence);
+ WARN_ON(job->s_fence->parent);
+ job->sched->ops->free_job(job);
+}
+
+/* Signal the scheduler finished fence when the entity in question is killed. */
+static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
+ struct dma_fence_cb *cb)
+{
+ struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
+ finish_cb);
+ int r;
+
+ dma_fence_put(f);
+
+ /* Wait for all dependencies to avoid data corruptions */
+ while (!xa_empty(&job->dependencies)) {
+ f = xa_erase(&job->dependencies, job->last_dependency++);
+ r = dma_fence_add_callback(f, &job->finish_cb,
+ drm_sched_entity_kill_jobs_cb);
+ if (!r)
+ return;
+
+ dma_fence_put(f);
+ }
+
+ INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
+ schedule_work(&job->work);
+}
+
+/* Remove the entity from the scheduler and kill all pending jobs */
+static void drm_sched_entity_kill(struct drm_sched_entity *entity)
+{
+ struct drm_sched_job *job;
+ struct dma_fence *prev;
+
+ if (!entity->rq)
+ return;
+
+ spin_lock(&entity->rq_lock);
+ entity->stopped = true;
+ drm_sched_rq_remove_entity(entity->rq, entity);
+ spin_unlock(&entity->rq_lock);
+
+ /* Make sure this entity is not used by the scheduler at the moment */
+ wait_for_completion(&entity->entity_idle);
+
+ prev = dma_fence_get(entity->last_scheduled);
+ while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
+ struct drm_sched_fence *s_fence = job->s_fence;
+
+ dma_fence_set_error(&s_fence->finished, -ESRCH);
+
+ dma_fence_get(&s_fence->finished);
+ if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
+ drm_sched_entity_kill_jobs_cb))
+ drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+
+ prev = &s_fence->finished;
+ }
+ dma_fence_put(prev);
+}
+
/**
* drm_sched_entity_flush - Flush a context entity
*
@@ -180,91 +247,13 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
/* For killed process disable any more IBs enqueue right now */
last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
if ((!last_user || last_user == current->group_leader) &&
- (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
- spin_lock(&entity->rq_lock);
- entity->stopped = true;
- drm_sched_rq_remove_entity(entity->rq, entity);
- spin_unlock(&entity->rq_lock);
- }
+ (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
+ drm_sched_entity_kill(entity);
return ret;
}
EXPORT_SYMBOL(drm_sched_entity_flush);
-static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
-{
- struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
-
- drm_sched_fence_finished(job->s_fence);
- WARN_ON(job->s_fence->parent);
- job->sched->ops->free_job(job);
-}
-
-
-/* Signal the scheduler finished fence when the entity in question is killed. */
-static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
- struct dma_fence_cb *cb)
-{
- struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
- finish_cb);
-
- dma_fence_put(f);
- INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
- schedule_work(&job->work);
-}
-
-static struct dma_fence *
-drm_sched_job_dependency(struct drm_sched_job *job,
- struct drm_sched_entity *entity)
-{
- if (!xa_empty(&job->dependencies))
- return xa_erase(&job->dependencies, job->last_dependency++);
-
- if (job->sched->ops->dependency)
- return job->sched->ops->dependency(job, entity);
-
- return NULL;
-}
-
-static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
-{
- struct drm_sched_job *job;
- struct dma_fence *f;
- int r;
-
- while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
- struct drm_sched_fence *s_fence = job->s_fence;
-
- /* Wait for all dependencies to avoid data corruptions */
- while ((f = drm_sched_job_dependency(job, entity))) {
- dma_fence_wait(f, false);
- dma_fence_put(f);
- }
-
- drm_sched_fence_scheduled(s_fence);
- dma_fence_set_error(&s_fence->finished, -ESRCH);
-
- /*
- * When pipe is hanged by older entity, new entity might
- * not even have chance to submit it's first job to HW
- * and so entity->last_scheduled will remain NULL
- */
- if (!entity->last_scheduled) {
- drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
- continue;
- }
-
- dma_fence_get(entity->last_scheduled);
- r = dma_fence_add_callback(entity->last_scheduled,
- &job->finish_cb,
- drm_sched_entity_kill_jobs_cb);
- if (r == -ENOENT)
- drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
- else if (r)
- DRM_ERROR("fence add callback failed (%d)\n", r);
- }
-}
-
/**
* drm_sched_entity_fini - Destroy a context entity
*
@@ -278,33 +267,17 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
*/
void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = NULL;
-
- if (entity->rq) {
- sched = entity->rq->sched;
- drm_sched_rq_remove_entity(entity->rq, entity);
- }
-
- /* Consumption of existing IBs wasn't completed. Forcefully
- * remove them here.
+ /*
+ * If consumption of existing IBs wasn't completed. Forcefully remove
+ * them here. Also makes sure that the scheduler won't touch this entity
+ * any more.
*/
- if (spsc_queue_count(&entity->job_queue)) {
- if (sched) {
- /*
- * Wait for thread to idle to make sure it isn't processing
- * this entity.
- */
- wait_for_completion(&entity->entity_idle);
+ drm_sched_entity_kill(entity);
- }
- if (entity->dependency) {
- dma_fence_remove_callback(entity->dependency,
- &entity->cb);
- dma_fence_put(entity->dependency);
- entity->dependency = NULL;
- }
-
- drm_sched_entity_kill_jobs(entity);
+ if (entity->dependency) {
+ dma_fence_remove_callback(entity->dependency, &entity->cb);
+ dma_fence_put(entity->dependency);
+ entity->dependency = NULL;
}
dma_fence_put(entity->last_scheduled);
@@ -417,6 +390,19 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
return false;
}
+static struct dma_fence *
+drm_sched_job_dependency(struct drm_sched_job *job,
+ struct drm_sched_entity *entity)
+{
+ if (!xa_empty(&job->dependencies))
+ return xa_erase(&job->dependencies, job->last_dependency++);
+
+ if (job->sched->ops->prepare_job)
+ return job->sched->ops->prepare_job(job, entity);
+
+ return NULL;
+}
+
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
struct drm_sched_job *sched_job;
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index d0ff9e11cb69..6ce04c2e90c0 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -286,32 +286,6 @@ static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
}
/**
- * drm_sched_dependency_optimized - test if the dependency can be optimized
- *
- * @fence: the dependency fence
- * @entity: the entity which depends on the above fence
- *
- * Returns true if the dependency can be optimized and false otherwise
- */
-bool drm_sched_dependency_optimized(struct dma_fence* fence,
- struct drm_sched_entity *entity)
-{
- struct drm_gpu_scheduler *sched = entity->rq->sched;
- struct drm_sched_fence *s_fence;
-
- if (!fence || dma_fence_is_signaled(fence))
- return false;
- if (fence->context == entity->fence_context)
- return true;
- s_fence = to_drm_sched_fence(fence);
- if (s_fence && s_fence->sched == sched)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(drm_sched_dependency_optimized);
-
-/**
* drm_sched_start_timeout - start timeout for reset worker
*
* @sched: scheduler instance to start the worker for
@@ -773,32 +747,28 @@ int drm_sched_job_add_dependency(struct drm_sched_job *job,
EXPORT_SYMBOL(drm_sched_job_add_dependency);
/**
- * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
- * dependencies
+ * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
* @job: scheduler job to add the dependencies to
- * @obj: the gem object to add new dependencies from.
- * @write: whether the job might write the object (so we need to depend on
- * shared fences in the reservation object).
+ * @resv: the dma_resv object to get the fences from
+ * @usage: the dma_resv_usage to use to filter the fences
*
- * This should be called after drm_gem_lock_reservations() on your array of
- * GEM objects used in the job but before updating the reservations with your
- * own fences.
+ * This adds all fences matching the given usage from @resv to @job.
+ * Must be called with the @resv lock held.
*
* Returns:
* 0 on success, or an error on failing to expand the array.
*/
-int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
- struct drm_gem_object *obj,
- bool write)
+int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
+ struct dma_resv *resv,
+ enum dma_resv_usage usage)
{
struct dma_resv_iter cursor;
struct dma_fence *fence;
int ret;
- dma_resv_assert_held(obj->resv);
+ dma_resv_assert_held(resv);
- dma_resv_for_each_fence(&cursor, obj->resv, dma_resv_usage_rw(write),
- fence) {
+ dma_resv_for_each_fence(&cursor, resv, usage, fence) {
/* Make sure to grab an additional ref on the added fence */
dma_fence_get(fence);
ret = drm_sched_job_add_dependency(job, fence);
@@ -809,8 +779,31 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
}
return 0;
}
-EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
+EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
+/**
+ * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
+ * dependencies
+ * @job: scheduler job to add the dependencies to
+ * @obj: the gem object to add new dependencies from.
+ * @write: whether the job might write the object (so we need to depend on
+ * shared fences in the reservation object).
+ *
+ * This should be called after drm_gem_lock_reservations() on your array of
+ * GEM objects used in the job but before updating the reservations with your
+ * own fences.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
+ struct drm_gem_object *obj,
+ bool write)
+{
+ return drm_sched_job_add_resv_dependencies(job, obj->resv,
+ dma_resv_usage_rw(write));
+}
+EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
/**
* drm_sched_job_cleanup - clean up scheduler job resources
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index f2795f90ea69..53464afc2b9a 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -23,7 +23,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 7abf010a3293..ef6a4e63198f 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -14,7 +14,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index d7914f5122df..50410bd99dfe 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -18,7 +18,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index d06ffd99d86e..cc94efbbf2d4 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -17,7 +17,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 34234a144e87..760ff05eabf4 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -1101,12 +1101,16 @@ static const struct component_ops sun6i_dsi_ops = {
static int sun6i_dsi_probe(struct platform_device *pdev)
{
+ const struct sun6i_dsi_variant *variant;
struct device *dev = &pdev->dev;
- const char *bus_clk_name = NULL;
struct sun6i_dsi *dsi;
void __iomem *base;
int ret;
+ variant = device_get_match_data(dev);
+ if (!variant)
+ return -EINVAL;
+
dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
@@ -1114,10 +1118,7 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
dsi->dev = dev;
dsi->host.ops = &sun6i_dsi_host_ops;
dsi->host.dev = dev;
-
- if (of_device_is_compatible(dev->of_node,
- "allwinner,sun6i-a31-mipi-dsi"))
- bus_clk_name = "bus";
+ dsi->variant = variant;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
@@ -1142,7 +1143,7 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
return PTR_ERR(dsi->regs);
}
- dsi->bus_clk = devm_clk_get(dev, bus_clk_name);
+ dsi->bus_clk = devm_clk_get(dev, variant->has_mod_clk ? "bus" : NULL);
if (IS_ERR(dsi->bus_clk))
return dev_err_probe(dev, PTR_ERR(dsi->bus_clk),
"Couldn't get the DSI bus clock\n");
@@ -1151,21 +1152,21 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (of_device_is_compatible(dev->of_node,
- "allwinner,sun6i-a31-mipi-dsi")) {
+ if (variant->has_mod_clk) {
dsi->mod_clk = devm_clk_get(dev, "mod");
if (IS_ERR(dsi->mod_clk)) {
dev_err(dev, "Couldn't get the DSI mod clock\n");
ret = PTR_ERR(dsi->mod_clk);
goto err_attach_clk;
}
- }
- /*
- * In order to operate properly, that clock seems to be always
- * set to 297MHz.
- */
- clk_set_rate_exclusive(dsi->mod_clk, 297000000);
+ /*
+ * In order to operate properly, the module clock on the
+ * A31 variant always seems to be set to 297MHz.
+ */
+ if (variant->set_mod_clk)
+ clk_set_rate_exclusive(dsi->mod_clk, 297000000);
+ }
dsi->dphy = devm_phy_get(dev, "dphy");
if (IS_ERR(dsi->dphy)) {
@@ -1191,7 +1192,8 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
err_remove_dsi_host:
mipi_dsi_host_unregister(&dsi->host);
err_unprotect_clk:
- clk_rate_exclusive_put(dsi->mod_clk);
+ if (dsi->variant->has_mod_clk && dsi->variant->set_mod_clk)
+ clk_rate_exclusive_put(dsi->mod_clk);
err_attach_clk:
regmap_mmio_detach_clk(dsi->regs);
@@ -1205,16 +1207,39 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
component_del(&pdev->dev, &sun6i_dsi_ops);
mipi_dsi_host_unregister(&dsi->host);
- clk_rate_exclusive_put(dsi->mod_clk);
+ if (dsi->variant->has_mod_clk && dsi->variant->set_mod_clk)
+ clk_rate_exclusive_put(dsi->mod_clk);
regmap_mmio_detach_clk(dsi->regs);
return 0;
}
+static const struct sun6i_dsi_variant sun6i_a31_mipi_dsi_variant = {
+ .has_mod_clk = true,
+ .set_mod_clk = true,
+};
+
+static const struct sun6i_dsi_variant sun50i_a64_mipi_dsi_variant = {
+};
+
+static const struct sun6i_dsi_variant sun50i_a100_mipi_dsi_variant = {
+ .has_mod_clk = true,
+};
+
static const struct of_device_id sun6i_dsi_of_table[] = {
- { .compatible = "allwinner,sun6i-a31-mipi-dsi" },
- { .compatible = "allwinner,sun50i-a64-mipi-dsi" },
+ {
+ .compatible = "allwinner,sun6i-a31-mipi-dsi",
+ .data = &sun6i_a31_mipi_dsi_variant,
+ },
+ {
+ .compatible = "allwinner,sun50i-a64-mipi-dsi",
+ .data = &sun50i_a64_mipi_dsi_variant,
+ },
+ {
+ .compatible = "allwinner,sun50i-a100-mipi-dsi",
+ .data = &sun50i_a100_mipi_dsi_variant,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sun6i_dsi_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
index c863900ae3b4..f1ddefe0f554 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
@@ -15,6 +15,11 @@
#define SUN6I_DSI_TCON_DIV 4
+struct sun6i_dsi_variant {
+ bool has_mod_clk;
+ bool set_mod_clk;
+};
+
struct sun6i_dsi {
struct drm_connector connector;
struct drm_encoder encoder;
@@ -31,6 +36,8 @@ struct sun6i_dsi {
struct mipi_dsi_device *device;
struct drm_device *drm;
struct drm_panel *panel;
+
+ const struct sun6i_dsi_variant *variant;
};
static inline struct sun6i_dsi *host_to_sun6i_dsi(struct mipi_dsi_host *host)
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index bce71c0ccc9e..a900300ae5bd 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -206,6 +206,8 @@ static int tegra_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
static const struct fb_ops tegra_fb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
+ .fb_read = drm_fb_helper_sys_read,
+ .fb_write = drm_fb_helper_sys_write,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
@@ -243,7 +245,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
if (IS_ERR(bo))
return PTR_ERR(bo);
- info = drm_fb_helper_alloc_fbi(helper);
+ info = drm_fb_helper_alloc_info(helper);
if (IS_ERR(info)) {
dev_err(drm->dev, "failed to allocate framebuffer info\n");
drm_gem_object_put(&bo->gem);
@@ -261,7 +263,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
fb = fbdev->fb;
helper->fb = fb;
- helper->fbdev = info;
+ helper->info = info;
info->fbops = &tegra_fb_ops;
@@ -347,7 +349,7 @@ fini:
static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
{
- drm_fb_helper_unregister_fbi(&fbdev->base);
+ drm_fb_helper_unregister_info(&fbdev->base);
if (fbdev->fb) {
struct tegra_bo *bo = tegra_fb_get_plane(fbdev->fb, 0);
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index 15cd9b91b7e2..07d94b1e8089 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -14,7 +14,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_module.h>
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index afb2879980c6..345bcc3011e4 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -10,7 +10,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index f72755b8ea14..80615ecdae0b 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -16,7 +16,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index a300b03a3c7a..f6889f649bc1 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -53,7 +53,7 @@ config DRM_GM12U320
config DRM_OFDRM
tristate "Open Firmware display driver"
- depends on DRM && OF && (PPC || COMPILE_TEST)
+ depends on DRM && MMU && OF && (PPC || COMPILE_TEST)
select APERTURE_HELPERS
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c
index bb302a3fd6b5..611bbee15071 100644
--- a/drivers/gpu/drm/tiny/arcpgu.c
+++ b/drivers/gpu/drm/tiny/arcpgu.c
@@ -12,7 +12,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_dma_helper.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index 04682f831544..024346054c70 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -7,7 +7,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index 354d5e854a6f..678c2ef1cae7 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -30,7 +30,7 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_file.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 7441d992a5d7..130fd07a967d 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -12,7 +12,7 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_file.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c
index 48c24aa8c28a..9f634f720817 100644
--- a/drivers/gpu/drm/tiny/hx8357d.c
+++ b/drivers/gpu/drm/tiny/hx8357d.c
@@ -18,7 +18,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c
index 9a1a5943bee0..ca0451f79962 100644
--- a/drivers/gpu/drm/tiny/ili9163.c
+++ b/drivers/gpu/drm/tiny/ili9163.c
@@ -9,7 +9,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_mipi_dbi.h>
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index a79da2b4af64..815bab285823 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -20,7 +20,7 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c
index 69b265e78096..420f6005a956 100644
--- a/drivers/gpu/drm/tiny/ili9341.c
+++ b/drivers/gpu/drm/tiny/ili9341.c
@@ -17,7 +17,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
index c80028bb1d11..1bb847466b10 100644
--- a/drivers/gpu/drm/tiny/ili9486.c
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -16,7 +16,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c
index bc522fb3d94d..47df2b5a3048 100644
--- a/drivers/gpu/drm/tiny/mi0283qt.c
+++ b/drivers/gpu/drm/tiny/mi0283qt.c
@@ -15,7 +15,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c
index 0e1cc2369afc..dc9e4d71b12a 100644
--- a/drivers/gpu/drm/tiny/ofdrm.c
+++ b/drivers/gpu/drm/tiny/ofdrm.c
@@ -11,7 +11,7 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
@@ -231,7 +231,7 @@ static u64 display_get_address_of(struct drm_device *dev, struct device_node *of
return address;
}
-static bool is_avivo(__be32 vendor, __be32 device)
+static bool is_avivo(u32 vendor, u32 device)
{
/* This will match most R5xx */
return (vendor == PCI_VENDOR_ID_ATI) &&
@@ -265,8 +265,13 @@ static enum ofdrm_model display_get_model_of(struct drm_device *dev, struct devi
of_parent = of_get_parent(of_node);
vendor_p = of_get_property(of_parent, "vendor-id", NULL);
device_p = of_get_property(of_parent, "device-id", NULL);
- if (vendor_p && device_p && is_avivo(*vendor_p, *device_p))
- model = OFDRM_MODEL_AVIVO;
+ if (vendor_p && device_p) {
+ u32 vendor = be32_to_cpup(vendor_p);
+ u32 device = be32_to_cpup(device_p);
+
+ if (is_avivo(vendor, device))
+ model = OFDRM_MODEL_AVIVO;
+ }
of_node_put(of_parent);
} else if (of_device_is_compatible(of_node, "qemu,std-vga")) {
model = OFDRM_MODEL_QEMU;
@@ -433,21 +438,21 @@ static void __iomem *get_cmap_address_of(struct ofdrm_device *odev, struct devic
if (!addr_p)
addr_p = of_get_address(of_node, bar_no, &max_size, &flags);
if (!addr_p)
- return ERR_PTR(-ENODEV);
+ return IOMEM_ERR_PTR(-ENODEV);
if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0)
- return ERR_PTR(-ENODEV);
+ return IOMEM_ERR_PTR(-ENODEV);
if ((offset + size) >= max_size)
- return ERR_PTR(-ENODEV);
+ return IOMEM_ERR_PTR(-ENODEV);
address = of_translate_address(of_node, addr_p);
if (address == OF_BAD_ADDR)
- return ERR_PTR(-ENODEV);
+ return IOMEM_ERR_PTR(-ENODEV);
mem = devm_ioremap(dev->dev, address + offset, size);
if (!mem)
- return ERR_PTR(-ENOMEM);
+ return IOMEM_ERR_PTR(-ENOMEM);
return mem;
}
@@ -465,7 +470,7 @@ static void __iomem *ofdrm_mach64_cmap_ioremap(struct ofdrm_device *odev,
cmap_base = devm_ioremap(dev->dev, address, 0x1000);
if (!cmap_base)
- return ERR_PTR(-ENOMEM);
+ return IOMEM_ERR_PTR(-ENOMEM);
return cmap_base;
}
@@ -624,11 +629,11 @@ static void __iomem *ofdrm_qemu_cmap_ioremap(struct ofdrm_device *odev,
address = of_translate_address(of_node, io_of_addr);
if (address == OF_BAD_ADDR)
- return ERR_PTR(-ENODEV);
+ return IOMEM_ERR_PTR(-ENODEV);
cmap_base = devm_ioremap(dev->dev, address + 0x3c8, 2);
if (!cmap_base)
- return ERR_PTR(-ENOMEM);
+ return IOMEM_ERR_PTR(-ENOMEM);
return cmap_base;
}
diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
index 955a61d628e7..03a7d569cd56 100644
--- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c
+++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
@@ -16,7 +16,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index e62f4d16b2c6..c2677d081a7b 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -26,7 +26,7 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index cbb100753154..162eb44dcba8 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -15,7 +15,7 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index b6f620b902e6..ce57fa9917e5 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -16,7 +16,7 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index c36ba08acda1..15d9cf283c66 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -18,7 +18,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 21b61631f73a..9f6764bf3b15 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -344,6 +344,28 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
return p->private;
}
+/* Called when we got a page, either from a pool or newly allocated */
+static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
+ struct page *p, dma_addr_t **dma_addr,
+ unsigned long *num_pages,
+ struct page ***pages)
+{
+ unsigned int i;
+ int r;
+
+ if (*dma_addr) {
+ r = ttm_pool_map(pool, order, p, dma_addr);
+ if (r)
+ return r;
+ }
+
+ *num_pages -= 1 << order;
+ for (i = 1 << order; i; --i, ++(*pages), ++p)
+ **pages = p;
+
+ return 0;
+}
+
/**
* ttm_pool_alloc - Fill a ttm_tt object
*
@@ -385,45 +407,57 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
num_pages;
order = min_t(unsigned int, order, __fls(num_pages))) {
- bool apply_caching = false;
struct ttm_pool_type *pt;
pt = ttm_pool_select_type(pool, tt->caching, order);
p = pt ? ttm_pool_type_take(pt) : NULL;
if (p) {
- apply_caching = true;
- } else {
- p = ttm_pool_alloc_page(pool, gfp_flags, order);
- if (p && PageHighMem(p))
- apply_caching = true;
- }
-
- if (!p) {
- if (order) {
- --order;
- continue;
- }
- r = -ENOMEM;
- goto error_free_all;
- }
-
- if (apply_caching) {
r = ttm_pool_apply_caching(caching, pages,
tt->caching);
if (r)
goto error_free_page;
- caching = pages + (1 << order);
+
+ do {
+ r = ttm_pool_page_allocated(pool, order, p,
+ &dma_addr,
+ &num_pages,
+ &pages);
+ if (r)
+ goto error_free_page;
+
+ if (num_pages < (1 << order))
+ break;
+
+ p = ttm_pool_type_take(pt);
+ } while (p);
+ caching = pages;
}
- if (dma_addr) {
- r = ttm_pool_map(pool, order, p, &dma_addr);
+ while (num_pages >= (1 << order) &&
+ (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
+
+ if (PageHighMem(p)) {
+ r = ttm_pool_apply_caching(caching, pages,
+ tt->caching);
+ if (r)
+ goto error_free_page;
+ }
+ r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
+ &num_pages, &pages);
if (r)
goto error_free_page;
+ if (PageHighMem(p))
+ caching = pages;
}
- num_pages -= 1 << order;
- for (i = 1 << order; i; --i)
- *(pages++) = p++;
+ if (!p) {
+ if (order) {
+ --order;
+ continue;
+ }
+ r = -ENOMEM;
+ goto error_free_all;
+ }
}
r = ttm_pool_apply_caching(caching, pages, tt->caching);
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 04db72e3fa9c..0d05c386d303 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -32,6 +32,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
@@ -39,7 +40,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 91effdcefb6d..e81352126a0f 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -7,7 +7,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_file.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index e8c975b81585..478f1f0f60de 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -22,7 +22,6 @@
#include <linux/reset.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_managed.h>
#include <uapi/drm/v3d_drm.h>
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index f4f2bd79a7cb..b450f449a3ab 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -14,7 +14,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
@@ -178,8 +178,6 @@ static const struct drm_driver driver = {
.driver_features =
DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- .lastclose = drm_fb_helper_lastclose,
-
.fops = &vbox_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index c9e8b3a63c62..3b83e550f4df 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -11,7 +11,6 @@
#include <linux/pci.h>
#include <linux/vbox_err.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 2027063fdc30..b66bf7aea632 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -33,7 +33,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_vblank.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 0035affc3e59..ae97b98750b6 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -35,6 +35,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_file.h>
#include "virtgpu_drv.h"
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 9b98470593b0..b7a64c7dcc2c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -35,7 +35,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 0ffe5f0e33f7..293dbca50c31 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -17,7 +17,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_file.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 63496773f714..bd02cb0e6837 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -35,7 +35,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_module.h>
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index e31554d7139f..4c95ebcdcc2d 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -12,7 +12,6 @@
#include <linux/scatterlist.h>
#include <linux/shmem_fs.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
index 1847792cf13d..776ef5480206 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_kms.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
@@ -19,7 +19,7 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_generic.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index fddd0d1af689..b111dc7ada78 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -30,13 +30,12 @@
#ifndef DRM_FB_HELPER_H
#define DRM_FB_HELPER_H
+struct drm_clip_rect;
struct drm_fb_helper;
-#include <drm/drm_client.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_device.h>
#include <linux/fb.h>
-#include <linux/kgdb.h>
+
+#include <drm/drm_client.h>
enum mode_set_atomic {
LEAVE_ATOMIC_MODE_SET,
@@ -91,6 +90,20 @@ struct drm_fb_helper_funcs {
*/
int (*fb_probe)(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes);
+
+ /**
+ * @fb_dirty:
+ *
+ * Driver callback to update the framebuffer memory. If set, fbdev
+ * emulation will invoke this callback in regular intervals after
+ * the framebuffer has been written.
+ *
+ * This callback is optional.
+ *
+ * Returns:
+ * 0 on success, or an error code otherwise.
+ */
+ int (*fb_dirty)(struct drm_fb_helper *helper, struct drm_clip_rect *clip);
};
/**
@@ -98,7 +111,7 @@ struct drm_fb_helper_funcs {
* @fb: Scanout framebuffer object
* @dev: DRM device
* @funcs: driver callbacks for fb helper
- * @fbdev: emulated fbdev device info struct
+ * @info: emulated fbdev device info struct
* @pseudo_palette: fake palette of 16 colors
* @damage_clip: clip rectangle used with deferred_io to accumulate damage to
* the screen buffer
@@ -129,7 +142,7 @@ struct drm_fb_helper {
struct drm_framebuffer *fb;
struct drm_device *dev;
const struct drm_fb_helper_funcs *funcs;
- struct fb_info *fbdev;
+ struct fb_info *info;
u32 pseudo_palette[17];
struct drm_clip_rect damage_clip;
spinlock_t damage_lock;
@@ -186,6 +199,15 @@ struct drm_fb_helper {
* See also: @deferred_setup
*/
int preferred_bpp;
+
+ /**
+ * @hint_leak_smem_start:
+ *
+ * Hint to the fbdev emulation to store the framebuffer's physical
+ * address in struct &fb_info.fix.smem_start. If the hint is unset,
+ * the smem_start field should always be cleared to zero.
+ */
+ bool hint_leak_smem_start;
};
static inline struct drm_fb_helper *
@@ -224,8 +246,8 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
-struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper);
-void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper);
+struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper);
+void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper);
void drm_fb_helper_fill_info(struct fb_info *info,
struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes);
@@ -244,6 +266,11 @@ void drm_fb_helper_sys_copyarea(struct fb_info *info,
void drm_fb_helper_sys_imageblit(struct fb_info *info,
const struct fb_image *image);
+ssize_t drm_fb_helper_cfb_read(struct fb_info *info, char __user *buf,
+ size_t count, loff_t *ppos);
+ssize_t drm_fb_helper_cfb_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos);
+
void drm_fb_helper_cfb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect);
void drm_fb_helper_cfb_copyarea(struct fb_info *info,
@@ -267,9 +294,6 @@ int drm_fb_helper_debug_leave(struct fb_info *info);
void drm_fb_helper_lastclose(struct drm_device *dev);
void drm_fb_helper_output_poll_changed(struct drm_device *dev);
-
-void drm_fbdev_generic_setup(struct drm_device *dev,
- unsigned int preferred_bpp);
#else
static inline void drm_fb_helper_prepare(struct drm_device *dev,
struct drm_fb_helper *helper,
@@ -322,12 +346,12 @@ drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
}
static inline struct fb_info *
-drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
+drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
{
return NULL;
}
-static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
+static inline void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper)
{
}
@@ -389,6 +413,18 @@ static inline void drm_fb_helper_sys_imageblit(struct fb_info *info,
{
}
+static inline ssize_t drm_fb_helper_cfb_read(struct fb_info *info, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
+static inline ssize_t drm_fb_helper_cfb_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
@@ -442,12 +478,6 @@ static inline void drm_fb_helper_lastclose(struct drm_device *dev)
static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
{
}
-
-static inline void
-drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
-{
-}
-
#endif
#endif
diff --git a/include/drm/drm_fbdev_generic.h b/include/drm/drm_fbdev_generic.h
new file mode 100644
index 000000000000..75799342098d
--- /dev/null
+++ b/include/drm/drm_fbdev_generic.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_FBDEV_GENERIC_H
+#define DRM_FBDEV_GENERIC_H
+
+struct drm_device;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp);
+#else
+static inline void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
+{ }
+#endif
+
+#endif
diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h
index 6e3319e9001a..6970ccb787e2 100644
--- a/include/drm/drm_gem_atomic_helper.h
+++ b/include/drm/drm_gem_atomic_helper.h
@@ -103,8 +103,8 @@ void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane,
.atomic_duplicate_state = drm_gem_duplicate_shadow_plane_state, \
.atomic_destroy_state = drm_gem_destroy_shadow_plane_state
-int drm_gem_prepare_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state);
-void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state);
+int drm_gem_begin_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state);
+void drm_gem_end_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state);
/**
* DRM_GEM_SHADOW_PLANE_HELPER_FUNCS -
@@ -115,13 +115,13 @@ void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *
* functions.
*/
#define DRM_GEM_SHADOW_PLANE_HELPER_FUNCS \
- .prepare_fb = drm_gem_prepare_shadow_fb, \
- .cleanup_fb = drm_gem_cleanup_shadow_fb
+ .begin_fb_access = drm_gem_begin_shadow_fb_access, \
+ .end_fb_access = drm_gem_end_shadow_fb_access
-int drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state);
-void drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state);
+int drm_gem_simple_kms_begin_shadow_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void drm_gem_simple_kms_end_shadow_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe);
struct drm_plane_state *
drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe);
@@ -137,8 +137,8 @@ void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pip
* functions.
*/
#define DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS \
- .prepare_fb = drm_gem_simple_kms_prepare_shadow_fb, \
- .cleanup_fb = drm_gem_simple_kms_cleanup_shadow_fb, \
+ .begin_fb_access = drm_gem_simple_kms_begin_shadow_fb_access, \
+ .end_fb_access = drm_gem_simple_kms_end_shadow_fb_access, \
.reset_plane = drm_gem_simple_kms_reset_shadow_plane, \
.duplicate_plane_state = drm_gem_simple_kms_duplicate_shadow_plane_state, \
.destroy_plane_state = drm_gem_simple_kms_destroy_shadow_plane_state
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index fafa70ac1337..d9f2254a039a 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -1184,11 +1184,20 @@ struct drm_plane_helper_funcs {
* can call drm_gem_plane_helper_prepare_fb() from their @prepare_fb
* hook.
*
+ * The resources acquired in @prepare_fb persist after the end of
+ * the atomic commit. Resources that can be release at the commit's end
+ * should be acquired in @begin_fb_access and released in @end_fb_access.
+ * For example, a GEM buffer's pin operation belongs into @prepare_fb to
+ * keep the buffer pinned after the commit. But a vmap operation for
+ * shadow-plane helpers belongs into @begin_fb_access, so that atomic
+ * helpers remove the mapping at the end of the commit.
+ *
* The helpers will call @cleanup_fb with matching arguments for every
* successful call to this hook.
*
* This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * transitional plane helpers, but it is optional. See @begin_fb_access
+ * for preparing per-commit resources.
*
* RETURNS:
*
@@ -1212,6 +1221,36 @@ struct drm_plane_helper_funcs {
struct drm_plane_state *old_state);
/**
+ * @begin_fb_access:
+ *
+ * This hook prepares the plane for access during an atomic commit.
+ * In contrast to @prepare_fb, resources acquired in @begin_fb_access,
+ * are released at the end of the atomic commit in @end_fb_access.
+ *
+ * For example, with shadow-plane helpers, the GEM buffer's vmap
+ * operation belongs into @begin_fb_access, so that the buffer's
+ * memory will be unmapped at the end of the commit in @end_fb_access.
+ * But a GEM buffer's pin operation belongs into @prepare_fb
+ * to keep the buffer pinned after the commit.
+ *
+ * The callback is used by the atomic modeset helpers, but it is optional.
+ * See @end_fb_cleanup for undoing the effects of @begin_fb_access and
+ * @prepare_fb for acquiring resources until the next pageflip.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+ int (*begin_fb_access)(struct drm_plane *plane, struct drm_plane_state *new_plane_state);
+
+ /**
+ * @end_fb_access:
+ *
+ * This hook cleans up resources allocated by @begin_fb_access. It it called
+ * at the end of a commit for the new plane state.
+ */
+ void (*end_fb_access)(struct drm_plane *plane, struct drm_plane_state *new_plane_state);
+
+ /**
* @atomic_check:
*
* Drivers should check plane specific constraints in this hook.
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 0b3647e614dd..2298fe3af4cd 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -136,6 +136,26 @@ struct drm_simple_display_pipe_funcs {
struct drm_plane_state *plane_state);
/**
+ * @begin_fb_access:
+ *
+ * Optional, called by &drm_plane_helper_funcs.begin_fb_access. Please read
+ * the documentation for the &drm_plane_helper_funcs.begin_fb_access hook for
+ * more details.
+ */
+ int (*begin_fb_access)(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *new_plane_state);
+
+ /**
+ * @end_fb_access:
+ *
+ * Optional, called by &drm_plane_helper_funcs.end_fb_access. Please read
+ * the documentation for the &drm_plane_helper_funcs.end_fb_access hook for
+ * more details.
+ */
+ void (*end_fb_access)(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+
+ /**
* @enable_vblank:
*
* Optional, called by &drm_crtc_funcs.enable_vblank. Please read
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index ca11716d084a..cec147f7c50b 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -41,6 +41,8 @@
*/
#define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
+enum dma_resv_usage;
+struct dma_resv;
struct drm_gem_object;
struct drm_gpu_scheduler;
@@ -327,7 +329,7 @@ struct drm_sched_job {
*/
union {
struct dma_fence_cb finish_cb;
- struct work_struct work;
+ struct work_struct work;
};
uint64_t id;
@@ -375,18 +377,17 @@ enum drm_gpu_sched_stat {
*/
struct drm_sched_backend_ops {
/**
- * @dependency:
+ * @prepare_job:
*
* Called when the scheduler is considering scheduling this job next, to
* get another struct dma_fence for this job to block on. Once it
* returns NULL, run_job() may be called.
*
- * If a driver exclusively uses drm_sched_job_add_dependency() and
- * drm_sched_job_add_implicit_dependencies() this can be ommitted and
- * left as NULL.
+ * Can be NULL if no additional preparation to the dependencies are
+ * necessary. Skipped when jobs are killed instead of run.
*/
- struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
- struct drm_sched_entity *s_entity);
+ struct dma_fence *(*prepare_job)(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity);
/**
* @run_job: Called to execute the job once all of the dependencies
@@ -514,6 +515,9 @@ int drm_sched_job_init(struct drm_sched_job *job,
void drm_sched_job_arm(struct drm_sched_job *job);
int drm_sched_job_add_dependency(struct drm_sched_job *job,
struct dma_fence *fence);
+int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
+ struct dma_resv *resv,
+ enum dma_resv_usage usage);
int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
struct drm_gem_object *obj,
bool write);
@@ -532,8 +536,6 @@ void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max);
void drm_sched_increase_karma(struct drm_sched_job *bad);
void drm_sched_reset_karma(struct drm_sched_job *bad);
void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
-bool drm_sched_dependency_optimized(struct dma_fence* fence,
- struct drm_sched_entity *entity);
void drm_sched_fault(struct drm_gpu_scheduler *sched);
void drm_sched_job_kickout(struct drm_sched_job *s_job);