summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h1
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c15
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c4
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h3
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c1
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c29
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c2
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c9
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c6
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c10
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/Makefile4
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c (renamed from drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c)2
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi-audio.h (renamed from drivers/gpu/drm/bridge/dw_hdmi-audio.h)0
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.c (renamed from drivers/gpu/drm/bridge/dw_hdmi.c)10
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.h (renamed from drivers/gpu/drm/bridge/dw_hdmi.h)0
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c6
-rw-r--r--drivers/gpu/drm/drm_atomic.c93
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c304
-rw-r--r--drivers/gpu/drm/drm_bridge.c69
-rw-r--r--drivers/gpu/drm/drm_crtc.c95
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c99
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c4
-rw-r--r--drivers/gpu/drm/drm_drv.c31
-rw-r--r--drivers/gpu/drm/drm_edid.c62
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c2
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c6
-rw-r--r--drivers/gpu/drm/drm_fops.c142
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c14
-rw-r--r--drivers/gpu/drm/drm_irq.c54
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c38
-rw-r--r--drivers/gpu/drm/drm_modes.c102
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c103
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c9
-rw-r--r--drivers/gpu/drm/drm_prime.c16
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c135
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig20
-rw-r--r--drivers/gpu/drm/etnaviv/Makefile14
-rw-r--r--drivers/gpu/drm/etnaviv/cmdstream.xml.h218
-rw-r--r--drivers/gpu/drm/etnaviv/common.xml.h249
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c268
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c209
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c707
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h161
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c227
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.h54
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c897
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h117
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c122
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c443
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c1644
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h209
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c240
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.h28
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c33
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h25
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c299
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.h71
-rw-r--r--drivers/gpu/drm/etnaviv/state.xml.h351
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h407
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c104
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c161
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c189
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c76
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h81
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c161
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c18
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c142
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h28
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c214
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c41
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c59
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c239
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c7
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c6
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c12
-rw-r--r--drivers/gpu/drm/gma500/gem.c19
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c13
-rw-r--r--drivers/gpu/drm/gma500/gtt.c1
-rw-r--r--drivers/gpu/drm/gma500/mdfld_device.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c12
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c5
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c22
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c8
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c2
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c28
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c124
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c186
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h68
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c36
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c33
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c117
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h58
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c5
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c35
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c175
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c27
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c2
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h14
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c27
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c614
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c16
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c49
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c51
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c41
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c10
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c38
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c76
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c3
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c2
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c6
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c32
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h5
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c10
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c11
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c65
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c10
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c14
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c6
-rw-r--r--drivers/gpu/drm/msm/Kconfig8
-rw-r--r--drivers/gpu/drm/msm/Makefile3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c52
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h6
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c35
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c508
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c195
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c3
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h10
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c533
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c87
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c198
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c278
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h15
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c23
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c20
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c129
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c76
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h7
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c43
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c2
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.c7
-rw-r--r--drivers/gpu/drm/panel/Kconfig19
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c334
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c387
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c123
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c16
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c30
-rw-r--r--drivers/gpu/drm/radeon/cik.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c2
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c3
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c49
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c2
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c7
-rw-r--r--drivers/gpu/drm/tegra/dc.c15
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c41
-rw-r--r--drivers/gpu/drm/tegra/drm.c90
-rw-r--r--drivers/gpu/drm/tegra/drm.h27
-rw-r--r--drivers/gpu/drm/tegra/dsi.c2
-rw-r--r--drivers/gpu/drm/tegra/fb.c26
-rw-r--r--drivers/gpu/drm/tegra/gem.c15
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c2
-rw-r--r--drivers/gpu/drm/tegra/rgb.c2
-rw-r--r--drivers/gpu/drm/tegra/sor.c147
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c4
-rw-r--r--drivers/gpu/drm/udl/udl_encoder.c3
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c2
-rw-r--r--drivers/gpu/drm/vc4/Makefile11
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c517
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c101
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c36
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h318
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c866
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c210
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c149
-rw-r--r--drivers/gpu/drm/vc4/vc4_packet.h399
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c42
-rw-r--r--drivers/gpu/drm/vc4/vc4_qpu_defines.h264
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c634
-rw-r--r--drivers/gpu/drm/vc4/vc4_trace.h63
-rw-r--r--drivers/gpu/drm/vc4/vc4_trace_points.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c262
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c900
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c513
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c9
308 files changed, 18711 insertions, 3061 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index b42c1ba8df9a..59babd5a5396 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -267,3 +267,5 @@ source "drivers/gpu/drm/amd/amdkfd/Kconfig"
source "drivers/gpu/drm/imx/Kconfig"
source "drivers/gpu/drm/vc4/Kconfig"
+
+source "drivers/gpu/drm/etnaviv/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 1e9ff4c3e3db..f858aa25fbb2 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -75,3 +75,4 @@ obj-y += i2c/
obj-y += panel/
obj-y += bridge/
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
+obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a3fc43e52483..fca4ef78589c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -541,6 +541,7 @@ struct amdgpu_bo {
/* Constant after initialization */
struct amdgpu_device *adev;
struct drm_gem_object gem_base;
+ struct amdgpu_bo *parent;
struct ttm_bo_kmap_obj dma_buf_vmap;
pid_t pid;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 6ce595ff1aff..fa0e3276e8da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -222,6 +222,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
}
p->uf.bo = gem_to_amdgpu_bo(gobj);
+ amdgpu_bo_ref(p->uf.bo);
+ drm_gem_object_unreference_unlocked(gobj);
p->uf.offset = fence_data->offset;
} else {
ret = -EINVAL;
@@ -487,7 +489,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
amdgpu_ib_free(parser->adev, &parser->ibs[i]);
kfree(parser->ibs);
if (parser->uf.bo)
- drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
+ amdgpu_bo_unref(&parser->uf.bo);
}
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@@ -776,7 +778,7 @@ static int amdgpu_cs_free_job(struct amdgpu_job *job)
amdgpu_ib_free(job->adev, &job->ibs[i]);
kfree(job->ibs);
if (job->uf.bo)
- drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base);
+ amdgpu_bo_unref(&job->uf.bo);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 7d5e0583c95c..acd066d0a805 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -73,6 +73,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
struct drm_crtc *crtc = &amdgpuCrtc->base;
unsigned long flags;
unsigned i;
+ int vpos, hpos, stat, min_udelay;
+ struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
amdgpu_flip_wait_fence(adev, &work->excl);
for (i = 0; i < work->shared_count; ++i)
@@ -81,6 +83,41 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
/* We borrow the event spin lock for protecting flip_status */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ /* If this happens to execute within the "virtually extended" vblank
+ * interval before the start of the real vblank interval then it needs
+ * to delay programming the mmio flip until the real vblank is entered.
+ * This prevents completing a flip too early due to the way we fudge
+ * our vblank counter and vblank timestamps in order to work around the
+ * problem that the hw fires vblank interrupts before actual start of
+ * vblank (when line buffer refilling is done for a frame). It
+ * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for
+ * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts.
+ *
+ * In practice this won't execute very often unless on very fast
+ * machines because the time window for this to happen is very small.
+ */
+ for (;;) {
+ /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
+ * start in hpos, and to the "fudged earlier" vblank start in
+ * vpos.
+ */
+ stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id,
+ GET_DISTANCE_TO_VBLANKSTART,
+ &vpos, &hpos, NULL, NULL,
+ &crtc->hwmode);
+
+ if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
+ !(vpos >= 0 && hpos <= 0))
+ break;
+
+ /* Sleep at least until estimated real start of hw vblank */
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+ usleep_range(min_udelay, 2 * min_udelay);
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ };
+
/* do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
/* set the flip status */
@@ -109,7 +146,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
} else
DRM_ERROR("failed to reserve buffer after flip\n");
- drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+ amdgpu_bo_unref(&work->old_rbo);
kfree(work->shared);
kfree(work);
}
@@ -148,8 +185,8 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
obj = old_amdgpu_fb->obj;
/* take a reference to the old object */
- drm_gem_object_reference(obj);
work->old_rbo = gem_to_amdgpu_bo(obj);
+ amdgpu_bo_ref(work->old_rbo);
new_amdgpu_fb = to_amdgpu_framebuffer(fb);
obj = new_amdgpu_fb->obj;
@@ -222,7 +259,7 @@ pflip_cleanup:
amdgpu_bo_unreserve(new_rbo);
cleanup:
- drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+ amdgpu_bo_unref(&work->old_rbo);
fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
fence_put(work->shared[i]);
@@ -712,6 +749,15 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
* \param dev Device to query.
* \param pipe Crtc to query.
* \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
+ * For driver internal use only also supports these flags:
+ *
+ * USE_REAL_VBLANKSTART to use the real start of vblank instead
+ * of a fudged earlier start of vblank.
+ *
+ * GET_DISTANCE_TO_VBLANKSTART to return distance to the
+ * fudged earlier start of vblank in *vpos and the distance
+ * to true start of vblank in *hpos.
+ *
* \param *vpos Location where vertical scanout position should be stored.
* \param *hpos Location where horizontal scanout position should go.
* \param *stime Target location for timestamp taken immediately before
@@ -776,10 +822,40 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
vbl_end = 0;
}
+ /* Called from driver internal vblank counter query code? */
+ if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+ /* Caller wants distance from real vbl_start in *hpos */
+ *hpos = *vpos - vbl_start;
+ }
+
+ /* Fudge vblank to start a few scanlines earlier to handle the
+ * problem that vblank irqs fire a few scanlines before start
+ * of vblank. Some driver internal callers need the true vblank
+ * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
+ *
+ * The cause of the "early" vblank irq is that the irq is triggered
+ * by the line buffer logic when the line buffer read position enters
+ * the vblank, whereas our crtc scanout position naturally lags the
+ * line buffer read position.
+ */
+ if (!(flags & USE_REAL_VBLANKSTART))
+ vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
+
/* Test scanout position against vblank region. */
if ((*vpos < vbl_start) && (*vpos >= vbl_end))
in_vbl = false;
+ /* In vblank? */
+ if (in_vbl)
+ ret |= DRM_SCANOUTPOS_IN_VBLANK;
+
+ /* Called from driver internal vblank counter query code? */
+ if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+ /* Caller wants distance from fudged earlier vbl_start */
+ *vpos -= vbl_start;
+ return ret;
+ }
+
/* Check if inside vblank area and apply corrective offsets:
* vpos will then be >=0 in video scanout area, but negative
* within vblank area, counting down the number of lines until
@@ -795,32 +871,6 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
/* Correct for shifted end of vbl at vbl_end. */
*vpos = *vpos - vbl_end;
- /* In vblank? */
- if (in_vbl)
- ret |= DRM_SCANOUTPOS_IN_VBLANK;
-
- /* Is vpos outside nominal vblank area, but less than
- * 1/100 of a frame height away from start of vblank?
- * If so, assume this isn't a massively delayed vblank
- * interrupt, but a vblank interrupt that fired a few
- * microseconds before true start of vblank. Compensate
- * by adding a full frame duration to the final timestamp.
- * Happens, e.g., on ATI R500, R600.
- *
- * We only do this if DRM_CALLED_FROM_VBLIRQ.
- */
- if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
- vbl_start = mode->crtc_vdisplay;
- vtotal = mode->crtc_vtotal;
-
- if (vbl_start - *vpos < vtotal / 100) {
- *vpos -= vtotal;
-
- /* Signal this correction as "applied". */
- ret |= 0x8;
- }
- }
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 8c5687e4a6d1..6d136b260bb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -235,8 +235,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
AMDGPU_GEM_USERPTR_REGISTER))
return -EINVAL;
- if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
- !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
+ if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && (
+ !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
+ !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) {
/* if we want to write to it we must require anonymous
memory and install a MMU notifier */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 1618e2294a16..e23843f4d877 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -611,13 +611,59 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev,
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
{
struct amdgpu_device *adev = dev->dev_private;
+ int vpos, hpos, stat;
+ u32 count;
if (pipe >= adev->mode_info.num_crtc) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
- return amdgpu_display_vblank_get_counter(adev, pipe);
+ /* The hw increments its frame counter at start of vsync, not at start
+ * of vblank, as is required by DRM core vblank counter handling.
+ * Cook the hw count here to make it appear to the caller as if it
+ * incremented at start of vblank. We measure distance to start of
+ * vblank in vpos. vpos therefore will be >= 0 between start of vblank
+ * and start of vsync, so vpos >= 0 means to bump the hw frame counter
+ * result by 1 to give the proper appearance to caller.
+ */
+ if (adev->mode_info.crtcs[pipe]) {
+ /* Repeat readout if needed to provide stable result if
+ * we cross start of vsync during the queries.
+ */
+ do {
+ count = amdgpu_display_vblank_get_counter(adev, pipe);
+ /* Ask amdgpu_get_crtc_scanoutpos to return vpos as
+ * distance to start of vblank, instead of regular
+ * vertical scanout pos.
+ */
+ stat = amdgpu_get_crtc_scanoutpos(
+ dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
+ &vpos, &hpos, NULL, NULL,
+ &adev->mode_info.crtcs[pipe]->base.hwmode);
+ } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
+
+ if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
+ DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
+ } else {
+ DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
+ pipe, vpos);
+
+ /* Bump counter if we are at >= leading edge of vblank,
+ * but before vsync where vpos would turn negative and
+ * the hw counter really increments.
+ */
+ if (vpos >= 0)
+ count++;
+ }
+ } else {
+ /* Fallback to use value as is. */
+ count = amdgpu_display_vblank_get_counter(adev, pipe);
+ DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
+ }
+
+ return count;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index de4529969778..fdc1be8550da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -35,6 +35,7 @@
#include <drm/drm_dp_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@@ -407,6 +408,7 @@ struct amdgpu_crtc {
u32 line_time;
u32 wm_low;
u32 wm_high;
+ u32 lb_vblank_lead_lines;
struct drm_display_mode hw_mode;
};
@@ -528,6 +530,10 @@ struct amdgpu_framebuffer {
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
((em) == ATOM_ENCODER_MODE_DP_MST))
+/* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */
+#define USE_REAL_VBLANKSTART (1 << 30)
+#define GET_DISTANCE_TO_VBLANKSTART (1 << 31)
+
void amdgpu_link_encoder_connector(struct drm_device *dev);
struct drm_connector *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 0d524384ff79..c3ce103b6a33 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -100,6 +100,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
list_del_init(&bo->list);
mutex_unlock(&bo->adev->gem.mutex);
drm_gem_object_release(&bo->gem_base);
+ amdgpu_bo_unref(&bo->parent);
kfree(bo->metadata);
kfree(bo);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8051cb9b8c1e..8a1752ff3d8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -801,11 +801,12 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
if (mem && mem->mem_type != TTM_PL_SYSTEM)
flags |= AMDGPU_PTE_VALID;
- if (mem && mem->mem_type == TTM_PL_TT)
+ if (mem && mem->mem_type == TTM_PL_TT) {
flags |= AMDGPU_PTE_SYSTEM;
- if (!ttm || ttm->caching_state == tt_cached)
- flags |= AMDGPU_PTE_SNOOPED;
+ if (ttm->caching_state == tt_cached)
+ flags |= AMDGPU_PTE_SNOOPED;
+ }
if (adev->asic_type >= CHIP_TOPAZ)
flags |= AMDGPU_PTE_EXECUTABLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index d6ff5dad98f6..8f7688e598a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1085,6 +1085,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
if (r)
goto error_free;
+ /* Keep a reference to the page table to avoid freeing
+ * them up in the wrong order.
+ */
+ pt->parent = amdgpu_bo_ref(vm->page_directory);
+
r = amdgpu_vm_clear_bo(adev, pt);
if (r) {
amdgpu_bo_unref(&pt);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index cb0f7747e3dc..093599aba64b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1250,7 +1250,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
u32 pixel_period;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
- u32 tmp, wm_mask;
+ u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
@@ -1333,6 +1333,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
(adev->mode_info.disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
}
+ lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
}
/* select wm A */
@@ -1357,6 +1358,8 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
amdgpu_crtc->line_time = line_time;
amdgpu_crtc->wm_high = latency_watermark_a;
amdgpu_crtc->wm_low = latency_watermark_b;
+ /* Save number of lines the linebuffer leads before the scanout */
+ amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
/**
@@ -3726,7 +3729,7 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC);
+ DRM_MODE_ENCODER_DAC, NULL);
drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
@@ -3737,15 +3740,15 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
amdgpu_encoder->rmx_type = RMX_FULL;
drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_LVDS, NULL);
amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC);
+ DRM_MODE_ENCODER_DAC, NULL);
amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
} else {
drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
}
drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs);
@@ -3763,13 +3766,13 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
amdgpu_encoder->is_ext_encoder = true;
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_LVDS, NULL);
else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC);
+ DRM_MODE_ENCODER_DAC, NULL);
else
drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs);
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 5af3721851d6..8701661a8868 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1238,7 +1238,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
u32 pixel_period;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
- u32 tmp, wm_mask;
+ u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
@@ -1321,6 +1321,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
(adev->mode_info.disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
}
+ lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
}
/* select wm A */
@@ -1345,6 +1346,8 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
amdgpu_crtc->line_time = line_time;
amdgpu_crtc->wm_high = latency_watermark_a;
amdgpu_crtc->wm_low = latency_watermark_b;
+ /* Save number of lines the linebuffer leads before the scanout */
+ amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
/**
@@ -3719,7 +3722,7 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC);
+ DRM_MODE_ENCODER_DAC, NULL);
drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
@@ -3730,15 +3733,15 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
amdgpu_encoder->rmx_type = RMX_FULL;
drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_LVDS, NULL);
amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC);
+ DRM_MODE_ENCODER_DAC, NULL);
amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
} else {
drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
}
drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs);
@@ -3756,13 +3759,13 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
amdgpu_encoder->is_ext_encoder = true;
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_LVDS, NULL);
else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC);
+ DRM_MODE_ENCODER_DAC, NULL);
else
drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs);
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 4f7b49a6dc50..d0e128c24813 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1193,7 +1193,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
u32 pixel_period;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
- u32 tmp, wm_mask;
+ u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
@@ -1276,6 +1276,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
(adev->mode_info.disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
}
+ lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
}
/* select wm A */
@@ -1302,6 +1303,8 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
amdgpu_crtc->line_time = line_time;
amdgpu_crtc->wm_high = latency_watermark_a;
amdgpu_crtc->wm_low = latency_watermark_b;
+ /* Save number of lines the linebuffer leads before the scanout */
+ amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
/**
@@ -3656,7 +3659,7 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC);
+ DRM_MODE_ENCODER_DAC, NULL);
drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
@@ -3667,15 +3670,15 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
amdgpu_encoder->rmx_type = RMX_FULL;
drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_LVDS, NULL);
amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC);
+ DRM_MODE_ENCODER_DAC, NULL);
amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
} else {
drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
}
drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
@@ -3693,13 +3696,13 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
amdgpu_encoder->is_ext_encoder = true;
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_LVDS, NULL);
else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC);
+ DRM_MODE_ENCODER_DAC, NULL);
else
drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 868505753a9a..dababe40a685 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -517,7 +517,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmVM_L2_CNTL3, tmp);
/* setup context0 */
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 6e2331f70b39..adc25f87fc18 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -661,7 +661,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmVM_L2_CNTL4, tmp);
/* setup context0 */
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12));
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index d9b8d3f768ab..e61a3e67852e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -25,6 +25,7 @@
#include <linux/seq_file.h>
#include <linux/types.h>
+#include <linux/errno.h>
#include "amd_shared.h"
#include "cgs_common.h"
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index cebcab560626..0293eb74d777 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -928,11 +928,10 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
}
}
- mutex_lock(&dev->struct_mutex);
if (dcrtc->cursor_obj) {
dcrtc->cursor_obj->update = NULL;
dcrtc->cursor_obj->update_data = NULL;
- drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
+ drm_gem_object_unreference_unlocked(&dcrtc->cursor_obj->obj);
}
dcrtc->cursor_obj = obj;
dcrtc->cursor_w = w;
@@ -942,14 +941,12 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
obj->update_data = dcrtc;
obj->update = cursor_update;
}
- mutex_unlock(&dev->struct_mutex);
return ret;
}
static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
- struct drm_device *dev = crtc->dev;
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
int ret;
@@ -957,11 +954,9 @@ static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
if (!dcrtc->variant->has_spu_adv_reg)
return -EFAULT;
- mutex_lock(&dev->struct_mutex);
dcrtc->cursor_x = x;
dcrtc->cursor_y = y;
ret = armada_drm_crtc_cursor_update(dcrtc, false);
- mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -972,7 +967,7 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
struct armada_private *priv = crtc->dev->dev_private;
if (dcrtc->cursor_obj)
- drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
+ drm_gem_object_unreference_unlocked(&dcrtc->cursor_obj->obj);
priv->dcrtc[dcrtc->num] = NULL;
drm_crtc_cleanup(&dcrtc->crtc);
@@ -1074,7 +1069,7 @@ armada_drm_crtc_set_property(struct drm_crtc *crtc,
return 0;
}
-static struct drm_crtc_funcs armada_crtc_funcs = {
+static const struct drm_crtc_funcs armada_crtc_funcs = {
.cursor_set = armada_drm_crtc_cursor_set,
.cursor_move = armada_drm_crtc_cursor_move,
.destroy = armada_drm_crtc_destroy,
@@ -1216,14 +1211,14 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
&armada_primary_plane_funcs,
armada_primary_formats,
ARRAY_SIZE(armada_primary_formats),
- DRM_PLANE_TYPE_PRIMARY);
+ DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
kfree(primary);
return ret;
}
ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
- &armada_crtc_funcs);
+ &armada_crtc_funcs, NULL);
if (ret)
goto err_crtc_init;
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
index 471e45627f1e..d4f7ab0a30d4 100644
--- a/drivers/gpu/drm/armada/armada_debugfs.c
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -21,9 +21,9 @@ static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data)
struct armada_private *priv = dev->dev_private;
int ret;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&priv->linear_lock);
ret = drm_mm_dump_table(m, &priv->linear);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&priv->linear_lock);
return ret;
}
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index 4df6f2af2b21..3b2bb6128d40 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -57,7 +57,8 @@ struct armada_private {
DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
struct drm_fb_helper *fbdev;
struct armada_crtc *dcrtc[2];
- struct drm_mm linear;
+ struct drm_mm linear; /* protected by linear_lock */
+ struct mutex linear_lock;
struct drm_property *csc_yuv_prop;
struct drm_property *csc_rgb_prop;
struct drm_property *colorkey_prop;
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 77ab93d60125..3bd7e1cde99e 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -102,6 +102,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
dev->mode_config.preferred_depth = 24;
dev->mode_config.funcs = &armada_drm_mode_config_funcs;
drm_mm_init(&priv->linear, mem->start, resource_size(mem));
+ mutex_init(&priv->linear_lock);
ret = component_bind_all(dev->dev, dev);
if (ret)
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 60a688ef81c7..6e731db31aa4 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -46,22 +46,26 @@ static size_t roundup_gem_size(size_t size)
return roundup(size, PAGE_SIZE);
}
-/* dev->struct_mutex is held here */
void armada_gem_free_object(struct drm_gem_object *obj)
{
struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+ struct armada_private *priv = obj->dev->dev_private;
DRM_DEBUG_DRIVER("release obj %p\n", dobj);
drm_gem_free_mmap_offset(&dobj->obj);
+ might_lock(&priv->linear_lock);
+
if (dobj->page) {
/* page backed memory */
unsigned int order = get_order(dobj->obj.size);
__free_pages(dobj->page, order);
} else if (dobj->linear) {
/* linear backed memory */
+ mutex_lock(&priv->linear_lock);
drm_mm_remove_node(dobj->linear);
+ mutex_unlock(&priv->linear_lock);
kfree(dobj->linear);
if (dobj->addr)
iounmap(dobj->addr);
@@ -144,10 +148,10 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
if (!node)
return -ENOSPC;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&priv->linear_lock);
ret = drm_mm_insert_node(&priv->linear, node, size, align,
DRM_MM_SEARCH_DEFAULT);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&priv->linear_lock);
if (ret) {
kfree(node);
return ret;
@@ -158,9 +162,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
/* Ensure that the memory we're returning is cleared. */
ptr = ioremap_wc(obj->linear->start, size);
if (!ptr) {
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&priv->linear_lock);
drm_mm_remove_node(obj->linear);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&priv->linear_lock);
kfree(obj->linear);
obj->linear = NULL;
return -ENOMEM;
@@ -274,18 +278,16 @@ int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
struct armada_gem_object *obj;
int ret = 0;
- mutex_lock(&dev->struct_mutex);
obj = armada_gem_object_lookup(dev, file, handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object\n");
- ret = -EINVAL;
- goto err_unlock;
+ return -EINVAL;
}
/* Don't allow imported objects to be mapped */
if (obj->obj.import_attach) {
ret = -EINVAL;
- goto err_unlock;
+ goto err_unref;
}
ret = drm_gem_create_mmap_offset(&obj->obj);
@@ -294,9 +296,8 @@ int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
}
- drm_gem_object_unreference(&obj->obj);
- err_unlock:
- mutex_unlock(&dev->struct_mutex);
+ err_unref:
+ drm_gem_object_unreference_unlocked(&obj->obj);
return ret;
}
@@ -352,13 +353,13 @@ int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
if (!dobj->obj.filp) {
- drm_gem_object_unreference(&dobj->obj);
+ drm_gem_object_unreference_unlocked(&dobj->obj);
return -EINVAL;
}
addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
MAP_SHARED, args->offset);
- drm_gem_object_unreference(&dobj->obj);
+ drm_gem_object_unreference_unlocked(&dobj->obj);
if (IS_ERR_VALUE(addr))
return addr;
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 5c22b380f8f3..148e8a42b2c6 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -460,7 +460,7 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
&armada_ovl_plane_funcs,
armada_ovl_formats,
ARRAY_SIZE(armada_ovl_formats),
- DRM_PLANE_TYPE_OVERLAY);
+ DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret) {
kfree(dplane);
return ret;
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 69d19f3304a5..0123458cbd83 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -751,7 +751,7 @@ static int ast_encoder_init(struct drm_device *dev)
return -ENOMEM;
drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs,
- DRM_MODE_ENCODER_DAC);
+ DRM_MODE_ENCODER_DAC, NULL);
drm_encoder_helper_add(&ast_encoder->base, &ast_enc_helper_funcs);
ast_encoder->base.possible_crtcs = 1;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 9f6e234e7029..468a14f266a7 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -344,7 +344,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
ret = drm_crtc_init_with_planes(dev, &crtc->base,
&planes->primary->base,
planes->cursor ? &planes->cursor->base : NULL,
- &atmel_hlcdc_crtc_funcs);
+ &atmel_hlcdc_crtc_funcs, NULL);
if (ret < 0)
goto fail;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 816895447155..a45b32ba029e 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -333,6 +333,10 @@ static const struct of_device_id atmel_hlcdc_of_match[] = {
.data = &atmel_hlcdc_dc_at91sam9x5,
},
{
+ .compatible = "atmel,sama5d2-hlcdc",
+ .data = &atmel_hlcdc_dc_sama5d4,
+ },
+ {
.compatible = "atmel,sama5d3-hlcdc",
.data = &atmel_hlcdc_dc_sama5d3,
},
@@ -342,6 +346,7 @@ static const struct of_device_id atmel_hlcdc_of_match[] = {
},
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, atmel_hlcdc_of_match);
int atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc,
struct drm_display_mode *mode)
@@ -733,10 +738,6 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev)
if (!ddev)
return -ENOMEM;
- ret = drm_dev_set_unique(ddev, dev_name(ddev->dev));
- if (ret)
- goto err_unref;
-
ret = atmel_hlcdc_dc_load(ddev);
if (ret)
goto err_unref;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 067e4c144bd6..0f7ec016e7a9 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -146,7 +146,7 @@ atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder,
cfg);
}
-static struct drm_encoder_helper_funcs atmel_hlcdc_panel_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs atmel_hlcdc_panel_encoder_helper_funcs = {
.mode_fixup = atmel_hlcdc_panel_encoder_mode_fixup,
.mode_set = atmel_hlcdc_rgb_encoder_mode_set,
.disable = atmel_hlcdc_panel_encoder_disable,
@@ -192,7 +192,7 @@ atmel_hlcdc_rgb_best_encoder(struct drm_connector *connector)
return &rgb->encoder;
}
-static struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = {
.get_modes = atmel_hlcdc_panel_get_modes,
.mode_valid = atmel_hlcdc_rgb_mode_valid,
.best_encoder = atmel_hlcdc_rgb_best_encoder,
@@ -256,7 +256,7 @@ static int atmel_hlcdc_create_panel_output(struct drm_device *dev,
&atmel_hlcdc_panel_encoder_helper_funcs);
ret = drm_encoder_init(dev, &panel->base.encoder,
&atmel_hlcdc_panel_encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_LVDS, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index d0299aed517e..1ffe9c329c46 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -941,7 +941,7 @@ atmel_hlcdc_plane_create(struct drm_device *dev,
ret = drm_universal_plane_init(dev, &plane->base, 0,
&layer_plane_funcs,
desc->formats->formats,
- desc->formats->nformats, type);
+ desc->formats->nformats, type, NULL);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 26bcd03a8cb6..2849f1b95eec 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -119,7 +119,7 @@ static int bochs_crtc_page_flip(struct drm_crtc *crtc,
bochs_crtc_mode_set_base(crtc, 0, 0, old_fb);
if (event) {
spin_lock_irqsave(&bochs->dev->event_lock, irqflags);
- drm_send_vblank_event(bochs->dev, -1, event);
+ drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irqrestore(&bochs->dev->event_lock, irqflags);
}
return 0;
@@ -196,7 +196,7 @@ static void bochs_encoder_init(struct drm_device *dev)
encoder->possible_crtcs = 0x1;
drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs,
- DRM_MODE_ENCODER_DAC);
+ DRM_MODE_ENCODER_DAC, NULL);
drm_encoder_helper_add(encoder, &bochs_encoder_helper_funcs);
}
@@ -245,13 +245,13 @@ static enum drm_connector_status bochs_connector_detect(struct drm_connector
return connector_status_connected;
}
-struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
.get_modes = bochs_connector_get_modes,
.mode_valid = bochs_connector_mode_valid,
.best_encoder = bochs_connector_best_encoder,
};
-struct drm_connector_funcs bochs_connector_connector_funcs = {
+static const struct drm_connector_funcs bochs_connector_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = bochs_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -283,7 +283,7 @@ int bochs_kms_init(struct bochs_device *bochs)
bochs->dev->mode_config.preferred_depth = 24;
bochs->dev->mode_config.prefer_shadow = 0;
- bochs->dev->mode_config.funcs = (void *)&bochs_mode_funcs;
+ bochs->dev->mode_config.funcs = &bochs_mode_funcs;
bochs_crtc_init(bochs->dev);
bochs_encoder_init(bochs->dev);
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 6dddd392aa42..27e2022de89d 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -22,7 +22,6 @@ config DRM_DW_HDMI_AHB_AUDIO
Designware HDMI block. This is used in conjunction with
the i.MX6 HDMI driver.
-
config DRM_NXP_PTN3460
tristate "NXP PTN3460 DP/LVDS bridge"
depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index d4e28beec30e..f13c33d67c03 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,6 +1,6 @@
ccflags-y := -Iinclude/drm
-obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o
-obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw_hdmi-ahb-audio.o
+obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
+obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
diff --git a/drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c
index 59f630f1c61a..122bb015f4a9 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c
@@ -21,7 +21,7 @@
#include <sound/pcm_drm_eld.h>
#include <sound/pcm_iec958.h>
-#include "dw_hdmi-audio.h"
+#include "dw-hdmi-audio.h"
#define DRIVER_NAME "dw-hdmi-ahb-audio"
diff --git a/drivers/gpu/drm/bridge/dw_hdmi-audio.h b/drivers/gpu/drm/bridge/dw-hdmi-audio.h
index 91f631beecc7..91f631beecc7 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi-audio.h
+++ b/drivers/gpu/drm/bridge/dw-hdmi-audio.h
diff --git a/drivers/gpu/drm/bridge/dw_hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index 56de9f1c95fc..77cafa9aa41c 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -27,8 +27,8 @@
#include <drm/drm_encoder_slave.h>
#include <drm/bridge/dw_hdmi.h>
-#include "dw_hdmi.h"
-#include "dw_hdmi-audio.h"
+#include "dw-hdmi.h"
+#include "dw-hdmi-audio.h"
#define HDMI_EDID_LEN 512
@@ -1514,7 +1514,7 @@ static void dw_hdmi_connector_force(struct drm_connector *connector)
mutex_unlock(&hdmi->mutex);
}
-static struct drm_connector_funcs dw_hdmi_connector_funcs = {
+static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = dw_hdmi_connector_detect,
@@ -1522,13 +1522,13 @@ static struct drm_connector_funcs dw_hdmi_connector_funcs = {
.force = dw_hdmi_connector_force,
};
-static struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
.get_modes = dw_hdmi_connector_get_modes,
.mode_valid = dw_hdmi_connector_mode_valid,
.best_encoder = dw_hdmi_connector_best_encoder,
};
-static struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
+static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
.enable = dw_hdmi_bridge_enable,
.disable = dw_hdmi_bridge_disable,
.pre_enable = dw_hdmi_bridge_nop,
diff --git a/drivers/gpu/drm/bridge/dw_hdmi.h b/drivers/gpu/drm/bridge/dw-hdmi.h
index fc9a560429d6..fc9a560429d6 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi.h
+++ b/drivers/gpu/drm/bridge/dw-hdmi.h
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 0ffa3a6a206a..7ecd59f70b8e 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -242,7 +242,7 @@ static struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
return ptn_bridge->bridge.encoder;
}
-static struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
.get_modes = ptn3460_get_modes,
.best_encoder = ptn3460_best_encoder,
};
@@ -258,7 +258,7 @@ static void ptn3460_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
-static struct drm_connector_funcs ptn3460_connector_funcs = {
+static const struct drm_connector_funcs ptn3460_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = ptn3460_detect,
@@ -299,7 +299,7 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge)
return ret;
}
-static struct drm_bridge_funcs ptn3460_bridge_funcs = {
+static const struct drm_bridge_funcs ptn3460_bridge_funcs = {
.pre_enable = ptn3460_pre_enable,
.enable = ptn3460_enable,
.disable = ptn3460_disable,
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 61385f2298bf..4a02854a6963 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -489,7 +489,7 @@ static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
encoder->possible_crtcs = 0x1;
drm_encoder_init(dev, encoder, &cirrus_encoder_encoder_funcs,
- DRM_MODE_ENCODER_DAC);
+ DRM_MODE_ENCODER_DAC, NULL);
drm_encoder_helper_add(encoder, &cirrus_encoder_helper_funcs);
return encoder;
@@ -533,12 +533,12 @@ static void cirrus_connector_destroy(struct drm_connector *connector)
kfree(connector);
}
-struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = {
.get_modes = cirrus_vga_get_modes,
.best_encoder = cirrus_connector_best_encoder,
};
-struct drm_connector_funcs cirrus_vga_connector_funcs = {
+static const struct drm_connector_funcs cirrus_vga_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = cirrus_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 55b4debad79b..6a21e5c378c1 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -288,8 +288,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
state->crtcs[index] = crtc;
crtc_state->state = state;
- DRM_DEBUG_ATOMIC("Added [CRTC:%d] %p state to %p\n",
- crtc->base.id, crtc_state, state);
+ DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
+ crtc->base.id, crtc->name, crtc_state, state);
return crtc_state;
}
@@ -429,11 +429,20 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
}
EXPORT_SYMBOL(drm_atomic_crtc_set_property);
-/*
+/**
+ * drm_atomic_crtc_get_property - get property value from CRTC state
+ * @crtc: the drm CRTC to set a property on
+ * @state: the state object to get the property value from
+ * @property: the property to set
+ * @val: return location for the property value
+ *
* This function handles generic/core properties and calls out to
* driver's ->atomic_get_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the
* driver hook directly.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
*/
static int
drm_atomic_crtc_get_property(struct drm_crtc *crtc,
@@ -477,8 +486,8 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
*/
if (state->active && !state->enable) {
- DRM_DEBUG_ATOMIC("[CRTC:%d] active without enabled\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
+ crtc->base.id, crtc->name);
return -EINVAL;
}
@@ -487,15 +496,15 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
* be able to trigger. */
if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
WARN_ON(state->enable && !state->mode_blob)) {
- DRM_DEBUG_ATOMIC("[CRTC:%d] enabled without mode blob\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
+ crtc->base.id, crtc->name);
return -EINVAL;
}
if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
WARN_ON(!state->enable && state->mode_blob)) {
- DRM_DEBUG_ATOMIC("[CRTC:%d] disabled with mode blob\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
+ crtc->base.id, crtc->name);
return -EINVAL;
}
@@ -540,8 +549,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
state->planes[index] = plane;
plane_state->state = state;
- DRM_DEBUG_ATOMIC("Added [PLANE:%d] %p state to %p\n",
- plane->base.id, plane_state, state);
+ DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
+ plane->base.id, plane->name, plane_state, state);
if (plane_state->crtc) {
struct drm_crtc_state *crtc_state;
@@ -616,11 +625,20 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
}
EXPORT_SYMBOL(drm_atomic_plane_set_property);
-/*
+/**
+ * drm_atomic_plane_get_property - get property value from plane state
+ * @plane: the drm plane to set a property on
+ * @state: the state object to get the property value from
+ * @property: the property to set
+ * @val: return location for the property value
+ *
* This function handles generic/core properties and calls out to
* driver's ->atomic_get_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the
* driver hook directly.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
*/
static int
drm_atomic_plane_get_property(struct drm_plane *plane,
@@ -752,8 +770,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
}
if (plane_switching_crtc(state->state, plane, state)) {
- DRM_DEBUG_ATOMIC("[PLANE:%d] switching CRTC directly\n",
- plane->base.id);
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
+ plane->base.id, plane->name);
return -EINVAL;
}
@@ -872,11 +890,20 @@ int drm_atomic_connector_set_property(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_atomic_connector_set_property);
-/*
+/**
+ * drm_atomic_connector_get_property - get property value from connector state
+ * @connector: the drm connector to set a property on
+ * @state: the state object to get the property value from
+ * @property: the property to set
+ * @val: return location for the property value
+ *
* This function handles generic/core properties and calls out to
* driver's ->atomic_get_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the
* driver hook directly.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
*/
static int
drm_atomic_connector_get_property(struct drm_connector *connector,
@@ -977,8 +1004,8 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
}
if (crtc)
- DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d]\n",
- plane_state, crtc->base.id);
+ DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
+ plane_state, crtc->base.id, crtc->name);
else
DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
plane_state);
@@ -1045,8 +1072,8 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
conn_state->crtc = crtc;
if (crtc)
- DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d]\n",
- conn_state, crtc->base.id);
+ DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
+ conn_state, crtc->base.id, crtc->name);
else
DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
conn_state);
@@ -1085,8 +1112,8 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
if (ret)
return ret;
- DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d] to %p\n",
- crtc->base.id, state);
+ DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
+ crtc->base.id, crtc->name, state);
/*
* Changed connectors are already in @state, so only need to look at the
@@ -1166,8 +1193,9 @@ drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
num_connected_connectors++;
}
- DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d]\n",
- state, num_connected_connectors, crtc->base.id);
+ DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d:%s]\n",
+ state, num_connected_connectors,
+ crtc->base.id, crtc->name);
return num_connected_connectors;
}
@@ -1188,12 +1216,7 @@ void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
retry:
drm_modeset_backoff(state->acquire_ctx);
- ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
- state->acquire_ctx);
- if (ret)
- goto retry;
- ret = drm_modeset_lock_all_crtcs(state->dev,
- state->acquire_ctx);
+ ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx);
if (ret)
goto retry;
}
@@ -1225,8 +1248,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
for_each_plane_in_state(state, plane, plane_state, i) {
ret = drm_atomic_plane_check(plane, plane_state);
if (ret) {
- DRM_DEBUG_ATOMIC("[PLANE:%d] atomic core check failed\n",
- plane->base.id);
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
+ plane->base.id, plane->name);
return ret;
}
}
@@ -1234,8 +1257,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
for_each_crtc_in_state(state, crtc, crtc_state, i) {
ret = drm_atomic_crtc_check(crtc, crtc_state);
if (ret) {
- DRM_DEBUG_ATOMIC("[CRTC:%d] atomic core check failed\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
+ crtc->base.id, crtc->name);
return ret;
}
}
@@ -1246,8 +1269,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
if (!state->allow_modeset) {
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
- DRM_DEBUG_ATOMIC("[CRTC:%d] requires full modeset\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
+ crtc->base.id, crtc->name);
return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 3731a26979bc..268d37f26960 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -52,6 +52,12 @@
* drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the
* various functions to implement set_property callbacks. New drivers must not
* implement these functions themselves but must use the provided helpers.
+ *
+ * The atomic helper uses the same function table structures as all other
+ * modesetting helpers. See the documentation for struct &drm_crtc_helper_funcs,
+ * struct &drm_encoder_helper_funcs and struct &drm_connector_helper_funcs. It
+ * also shares the struct &drm_plane_helper_funcs function table with the plane
+ * helpers.
*/
static void
drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
@@ -80,6 +86,26 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
}
}
+static bool
+check_pending_encoder_assignment(struct drm_atomic_state *state,
+ struct drm_encoder *new_encoder)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ int i;
+
+ for_each_connector_in_state(state, connector, conn_state, i) {
+ if (conn_state->best_encoder != new_encoder)
+ continue;
+
+ /* encoder already assigned and we're trying to re-steal it! */
+ if (connector->state->best_encoder != conn_state->best_encoder)
+ return false;
+ }
+
+ return true;
+}
+
static struct drm_crtc *
get_current_crtc_for_encoder(struct drm_device *dev,
struct drm_encoder *encoder)
@@ -116,9 +142,9 @@ steal_encoder(struct drm_atomic_state *state,
*/
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
- DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n",
+ DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
encoder->base.id, encoder->name,
- encoder_crtc->base.id);
+ encoder_crtc->base.id, encoder_crtc->name);
crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
if (IS_ERR(crtc_state))
@@ -219,16 +245,24 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
}
if (new_encoder == connector_state->best_encoder) {
- DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
connector->base.id,
connector->name,
new_encoder->base.id,
new_encoder->name,
- connector_state->crtc->base.id);
+ connector_state->crtc->base.id,
+ connector_state->crtc->name);
return 0;
}
+ if (!check_pending_encoder_assignment(state, new_encoder)) {
+ DRM_DEBUG_ATOMIC("Encoder for [CONNECTOR:%d:%s] already assigned\n",
+ connector->base.id,
+ connector->name);
+ return -EINVAL;
+ }
+
encoder_crtc = get_current_crtc_for_encoder(state->dev,
new_encoder);
@@ -251,12 +285,13 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
crtc_state = state->crtc_states[idx];
crtc_state->connectors_changed = true;
- DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
connector->base.id,
connector->name,
new_encoder->base.id,
new_encoder->name,
- connector_state->crtc->base.id);
+ connector_state->crtc->base.id,
+ connector_state->crtc->name);
return 0;
}
@@ -340,8 +375,8 @@ mode_fixup(struct drm_atomic_state *state)
ret = funcs->mode_fixup(crtc, &crtc_state->mode,
&crtc_state->adjusted_mode);
if (!ret) {
- DRM_DEBUG_ATOMIC("[CRTC:%d] fixup failed\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n",
+ crtc->base.id, crtc->name);
return -EINVAL;
}
}
@@ -388,14 +423,14 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) {
- DRM_DEBUG_ATOMIC("[CRTC:%d] mode changed\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
+ crtc->base.id, crtc->name);
crtc_state->mode_changed = true;
}
if (crtc->state->enable != crtc_state->enable) {
- DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n",
+ crtc->base.id, crtc->name);
/*
* For clarity this assignment is done here, but
@@ -436,18 +471,18 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
* a full modeset because update_connector_routing force that.
*/
if (crtc->state->active != crtc_state->active) {
- DRM_DEBUG_ATOMIC("[CRTC:%d] active changed\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n",
+ crtc->base.id, crtc->name);
crtc_state->active_changed = true;
}
if (!drm_atomic_crtc_needs_modeset(crtc_state))
continue;
- DRM_DEBUG_ATOMIC("[CRTC:%d] needs all connectors, enable: %c, active: %c\n",
- crtc->base.id,
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
+ crtc->base.id, crtc->name,
crtc_state->enable ? 'y' : 'n',
- crtc_state->active ? 'y' : 'n');
+ crtc_state->active ? 'y' : 'n');
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret != 0)
@@ -461,8 +496,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
crtc);
if (crtc_state->enable != !!num_connectors) {
- DRM_DEBUG_ATOMIC("[CRTC:%d] enabled/connectors mismatch\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n",
+ crtc->base.id, crtc->name);
return -EINVAL;
}
@@ -509,8 +544,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
ret = funcs->atomic_check(plane, plane_state);
if (ret) {
- DRM_DEBUG_ATOMIC("[PLANE:%d] atomic driver check failed\n",
- plane->base.id);
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
+ plane->base.id, plane->name);
return ret;
}
}
@@ -525,8 +560,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
ret = funcs->atomic_check(crtc, state->crtc_states[i]);
if (ret) {
- DRM_DEBUG_ATOMIC("[CRTC:%d] atomic driver check failed\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
+ crtc->base.id, crtc->name);
return ret;
}
}
@@ -639,8 +674,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs = crtc->helper_private;
- DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n",
+ crtc->base.id, crtc->name);
/* Right function depends upon target state. */
@@ -751,8 +786,8 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs = crtc->helper_private;
if (crtc->state->enable && funcs->mode_set_nofb) {
- DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n",
+ crtc->base.id, crtc->name);
funcs->mode_set_nofb(crtc);
}
@@ -851,8 +886,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
funcs = crtc->helper_private;
if (crtc->state->enable) {
- DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
+ crtc->base.id, crtc->name);
if (funcs->enable)
funcs->enable(crtc);
@@ -1342,6 +1377,49 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
/**
+ * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
+ * @crtc: CRTC
+ * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
+ *
+ * Disables all planes associated with the given CRTC. This can be
+ * used for instance in the CRTC helper disable callback to disable
+ * all planes before shutting down the display pipeline.
+ *
+ * If the atomic-parameter is set the function calls the CRTC's
+ * atomic_begin hook before and atomic_flush hook after disabling the
+ * planes.
+ *
+ * It is a bug to call this function without having implemented the
+ * ->atomic_disable() plane hook.
+ */
+void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
+ bool atomic)
+{
+ const struct drm_crtc_helper_funcs *crtc_funcs =
+ crtc->helper_private;
+ struct drm_plane *plane;
+
+ if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
+ crtc_funcs->atomic_begin(crtc, NULL);
+
+ drm_for_each_plane(plane, crtc->dev) {
+ const struct drm_plane_helper_funcs *plane_funcs =
+ plane->helper_private;
+
+ if (plane->state->crtc != crtc || !plane_funcs)
+ continue;
+
+ WARN_ON(!plane_funcs->atomic_disable);
+ if (plane_funcs->atomic_disable)
+ plane_funcs->atomic_disable(plane, NULL);
+ }
+
+ if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
+ crtc_funcs->atomic_flush(crtc, NULL);
+}
+EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
+
+/**
* drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
* @dev: DRM device
* @old_state: atomic state object with old state structures
@@ -1818,6 +1896,161 @@ commit:
}
/**
+ * drm_atomic_helper_disable_all - disable all currently active outputs
+ * @dev: DRM device
+ * @ctx: lock acquisition context
+ *
+ * Loops through all connectors, finding those that aren't turned off and then
+ * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
+ * that they are connected to.
+ *
+ * This is used for example in suspend/resume to disable all currently active
+ * functions when suspending.
+ *
+ * Note that if callers haven't already acquired all modeset locks this might
+ * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ *
+ * See also:
+ * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
+ */
+int drm_atomic_helper_disable_all(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_connector *conn;
+ int err;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+
+ drm_for_each_connector(conn, dev) {
+ struct drm_crtc *crtc = conn->state->crtc;
+ struct drm_crtc_state *crtc_state;
+
+ if (!crtc || conn->dpms != DRM_MODE_DPMS_ON)
+ continue;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ err = PTR_ERR(crtc_state);
+ goto free;
+ }
+
+ crtc_state->active = false;
+ }
+
+ err = drm_atomic_commit(state);
+
+free:
+ if (err < 0)
+ drm_atomic_state_free(state);
+
+ return err;
+}
+EXPORT_SYMBOL(drm_atomic_helper_disable_all);
+
+/**
+ * drm_atomic_helper_suspend - subsystem-level suspend helper
+ * @dev: DRM device
+ *
+ * Duplicates the current atomic state, disables all active outputs and then
+ * returns a pointer to the original atomic state to the caller. Drivers can
+ * pass this pointer to the drm_atomic_helper_resume() helper upon resume to
+ * restore the output configuration that was active at the time the system
+ * entered suspend.
+ *
+ * Note that it is potentially unsafe to use this. The atomic state object
+ * returned by this function is assumed to be persistent. Drivers must ensure
+ * that this holds true. Before calling this function, drivers must make sure
+ * to suspend fbdev emulation so that nothing can be using the device.
+ *
+ * Returns:
+ * A pointer to a copy of the state before suspend on success or an ERR_PTR()-
+ * encoded error code on failure. Drivers should store the returned atomic
+ * state object and pass it to the drm_atomic_helper_resume() helper upon
+ * resume.
+ *
+ * See also:
+ * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
+ * drm_atomic_helper_resume()
+ */
+struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ int err;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ err = drm_modeset_lock_all_ctx(dev, &ctx);
+ if (err < 0) {
+ state = ERR_PTR(err);
+ goto unlock;
+ }
+
+ state = drm_atomic_helper_duplicate_state(dev, &ctx);
+ if (IS_ERR(state))
+ goto unlock;
+
+ err = drm_atomic_helper_disable_all(dev, &ctx);
+ if (err < 0) {
+ drm_atomic_state_free(state);
+ state = ERR_PTR(err);
+ goto unlock;
+ }
+
+unlock:
+ if (PTR_ERR(state) == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_suspend);
+
+/**
+ * drm_atomic_helper_resume - subsystem-level resume helper
+ * @dev: DRM device
+ * @state: atomic state to resume to
+ *
+ * Calls drm_mode_config_reset() to synchronize hardware and software states,
+ * grabs all modeset locks and commits the atomic state object. This can be
+ * used in conjunction with the drm_atomic_helper_suspend() helper to
+ * implement suspend/resume for drivers that support atomic mode-setting.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ *
+ * See also:
+ * drm_atomic_helper_suspend()
+ */
+int drm_atomic_helper_resume(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ int err;
+
+ drm_mode_config_reset(dev);
+ drm_modeset_lock_all(dev);
+ state->acquire_ctx = config->acquire_ctx;
+ err = drm_atomic_commit(state);
+ drm_modeset_unlock_all(dev);
+
+ return err;
+}
+EXPORT_SYMBOL(drm_atomic_helper_resume);
+
+/**
* drm_atomic_helper_crtc_set_property - helper for crtc properties
* @crtc: DRM crtc
* @property: DRM property
@@ -2173,6 +2406,12 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_dpms);
* The simpler solution is to just reset the software state to everything off,
* which is easiest to do by calling drm_mode_config_reset(). To facilitate this
* the atomic helpers provide default reset implementations for all hooks.
+ *
+ * On the upside the precise state tracking of atomic simplifies system suspend
+ * and resume a lot. For drivers using drm_mode_config_reset() a complete recipe
+ * is implemented in drm_atomic_helper_suspend() and drm_atomic_helper_resume().
+ * For other drivers the building blocks are split out, see the documentation
+ * for these functions.
*/
/**
@@ -2429,7 +2668,9 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
* @ctx: lock acquisition context
*
* Makes a copy of the current atomic state by looping over all objects and
- * duplicating their respective states.
+ * duplicating their respective states. This is used for example by suspend/
+ * resume support code to save the state prior to suspend such that it can
+ * be restored upon resume.
*
* Note that this treats atomic state as persistent between save and restore.
* Drivers must make sure that this is possible and won't result in confusion
@@ -2441,6 +2682,9 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
* Returns:
* A pointer to the copy of the atomic state object on success or an
* ERR_PTR()-encoded error code on failure.
+ *
+ * See also:
+ * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
*/
struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 6b8f7211e543..bd93453afa61 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -31,14 +31,14 @@
/**
* DOC: overview
*
- * drm_bridge represents a device that hangs on to an encoder. These are handy
- * when a regular drm_encoder entity isn't enough to represent the entire
+ * struct &drm_bridge represents a device that hangs on to an encoder. These are
+ * handy when a regular &drm_encoder entity isn't enough to represent the entire
* encoder chain.
*
- * A bridge is always associated to a single drm_encoder at a time, but can be
+ * A bridge is always attached to a single &drm_encoder at a time, but can be
* either connected to it directly, or through an intermediate bridge:
*
- * encoder ---> bridge B ---> bridge A
+ * encoder ---> bridge B ---> bridge A
*
* Here, the output of the encoder feeds to bridge B, and that furthers feeds to
* bridge A.
@@ -46,11 +46,16 @@
* The driver using the bridge is responsible to make the associations between
* the encoder and bridges. Once these links are made, the bridges will
* participate along with encoder functions to perform mode_set/enable/disable
- * through the ops provided in drm_bridge_funcs.
+ * through the ops provided in &drm_bridge_funcs.
*
* drm_bridge, like drm_panel, aren't drm_mode_object entities like planes,
- * crtcs, encoders or connectors. They just provide additional hooks to get the
- * desired output at the end of the encoder chain.
+ * CRTCs, encoders or connectors and hence are not visible to userspace. They
+ * just provide additional hooks to get the desired output at the end of the
+ * encoder chain.
+ *
+ * Bridges can also be chained up using the next pointer in struct &drm_bridge.
+ *
+ * Both legacy CRTC helpers and the new atomic modeset helpers support bridges.
*/
static DEFINE_MUTEX(bridge_lock);
@@ -122,34 +127,12 @@ EXPORT_SYMBOL(drm_bridge_attach);
/**
* DOC: bridge callbacks
*
- * The drm_bridge_funcs ops are populated by the bridge driver. The drm
- * internals(atomic and crtc helpers) use the helpers defined in drm_bridge.c
- * These helpers call a specific drm_bridge_funcs op for all the bridges
+ * The &drm_bridge_funcs ops are populated by the bridge driver. The DRM
+ * internals (atomic and CRTC helpers) use the helpers defined in drm_bridge.c
+ * These helpers call a specific &drm_bridge_funcs op for all the bridges
* during encoder configuration.
*
- * When creating a bridge driver, one can implement drm_bridge_funcs op with
- * the help of these rough rules:
- *
- * pre_enable: this contains things needed to be done for the bridge before
- * its clock and timings are enabled by its source. For a bridge, its source
- * is generally the encoder or bridge just before it in the encoder chain.
- *
- * enable: this contains things needed to be done for the bridge once its
- * source is enabled. In other words, enable is called once the source is
- * ready with clock and timing needed by the bridge.
- *
- * disable: this contains things needed to be done for the bridge assuming
- * that its source is still enabled, i.e. clock and timings are still on.
- *
- * post_disable: this contains things needed to be done for the bridge once
- * its source is disabled, i.e. once clocks and timings are off.
- *
- * mode_fixup: this should fixup the given mode for the bridge. It is called
- * after the encoder's mode fixup. mode_fixup can also reject a mode completely
- * if it's unsuitable for the hardware.
- *
- * mode_set: this sets up the mode for the bridge. It assumes that its source
- * (an encoder or a bridge) has set the mode too.
+ * For detailed specification of the bridge callbacks see &drm_bridge_funcs.
*/
/**
@@ -159,7 +142,7 @@ EXPORT_SYMBOL(drm_bridge_attach);
* @mode: desired mode to be set for the bridge
* @adjusted_mode: updated mode that works for this bridge
*
- * Calls 'mode_fixup' drm_bridge_funcs op for all the bridges in the
+ * Calls ->mode_fixup() &drm_bridge_funcs op for all the bridges in the
* encoder chain, starting from the first bridge to the last.
*
* Note: the bridge passed should be the one closest to the encoder
@@ -186,11 +169,11 @@ bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
EXPORT_SYMBOL(drm_bridge_mode_fixup);
/**
- * drm_bridge_disable - calls 'disable' drm_bridge_funcs op for all
+ * drm_bridge_disable - calls ->disable() &drm_bridge_funcs op for all
* bridges in the encoder chain.
* @bridge: bridge control structure
*
- * Calls 'disable' drm_bridge_funcs op for all the bridges in the encoder
+ * Calls ->disable() &drm_bridge_funcs op for all the bridges in the encoder
* chain, starting from the last bridge to the first. These are called before
* calling the encoder's prepare op.
*
@@ -208,11 +191,11 @@ void drm_bridge_disable(struct drm_bridge *bridge)
EXPORT_SYMBOL(drm_bridge_disable);
/**
- * drm_bridge_post_disable - calls 'post_disable' drm_bridge_funcs op for
+ * drm_bridge_post_disable - calls ->post_disable() &drm_bridge_funcs op for
* all bridges in the encoder chain.
* @bridge: bridge control structure
*
- * Calls 'post_disable' drm_bridge_funcs op for all the bridges in the
+ * Calls ->post_disable() &drm_bridge_funcs op for all the bridges in the
* encoder chain, starting from the first bridge to the last. These are called
* after completing the encoder's prepare op.
*
@@ -236,7 +219,7 @@ EXPORT_SYMBOL(drm_bridge_post_disable);
* @mode: desired mode to be set for the bridge
* @adjusted_mode: updated mode that works for this bridge
*
- * Calls 'mode_set' drm_bridge_funcs op for all the bridges in the
+ * Calls ->mode_set() &drm_bridge_funcs op for all the bridges in the
* encoder chain, starting from the first bridge to the last.
*
* Note: the bridge passed should be the one closest to the encoder
@@ -256,11 +239,11 @@ void drm_bridge_mode_set(struct drm_bridge *bridge,
EXPORT_SYMBOL(drm_bridge_mode_set);
/**
- * drm_bridge_pre_enable - calls 'pre_enable' drm_bridge_funcs op for all
+ * drm_bridge_pre_enable - calls ->pre_enable() &drm_bridge_funcs op for all
* bridges in the encoder chain.
* @bridge: bridge control structure
*
- * Calls 'pre_enable' drm_bridge_funcs op for all the bridges in the encoder
+ * Calls ->pre_enable() &drm_bridge_funcs op for all the bridges in the encoder
* chain, starting from the last bridge to the first. These are called
* before calling the encoder's commit op.
*
@@ -278,11 +261,11 @@ void drm_bridge_pre_enable(struct drm_bridge *bridge)
EXPORT_SYMBOL(drm_bridge_pre_enable);
/**
- * drm_bridge_enable - calls 'enable' drm_bridge_funcs op for all bridges
+ * drm_bridge_enable - calls ->enable() &drm_bridge_funcs op for all bridges
* in the encoder chain.
* @bridge: bridge control structure
*
- * Calls 'enable' drm_bridge_funcs op for all the bridges in the encoder
+ * Calls ->enable() &drm_bridge_funcs op for all the bridges in the encoder
* chain, starting from the first bridge to the last. These are called
* after completing the encoder's commit op.
*
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 32dd134700bd..62fa95fa5471 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -649,6 +649,18 @@ EXPORT_SYMBOL(drm_framebuffer_remove);
DEFINE_WW_CLASS(crtc_ww_class);
+static unsigned int drm_num_crtcs(struct drm_device *dev)
+{
+ unsigned int num = 0;
+ struct drm_crtc *tmp;
+
+ drm_for_each_crtc(tmp, dev) {
+ num++;
+ }
+
+ return num;
+}
+
/**
* drm_crtc_init_with_planes - Initialise a new CRTC object with
* specified primary and cursor planes.
@@ -657,6 +669,7 @@ DEFINE_WW_CLASS(crtc_ww_class);
* @primary: Primary plane for CRTC
* @cursor: Cursor plane for CRTC
* @funcs: callbacks for the new CRTC
+ * @name: printf style format string for the CRTC name, or NULL for default name
*
* Inits a new object created as base part of a driver crtc object.
*
@@ -666,7 +679,8 @@ DEFINE_WW_CLASS(crtc_ww_class);
int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary,
struct drm_plane *cursor,
- const struct drm_crtc_funcs *funcs)
+ const struct drm_crtc_funcs *funcs,
+ const char *name, ...)
{
struct drm_mode_config *config = &dev->mode_config;
int ret;
@@ -682,6 +696,21 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
if (ret)
return ret;
+ if (name) {
+ va_list ap;
+
+ va_start(ap, name);
+ crtc->name = kvasprintf(GFP_KERNEL, name, ap);
+ va_end(ap);
+ } else {
+ crtc->name = kasprintf(GFP_KERNEL, "crtc-%d",
+ drm_num_crtcs(dev));
+ }
+ if (!crtc->name) {
+ drm_mode_object_put(dev, &crtc->base);
+ return -ENOMEM;
+ }
+
crtc->base.properties = &crtc->properties;
list_add_tail(&crtc->head, &config->crtc_list);
@@ -728,6 +757,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
if (crtc->state && crtc->funcs->atomic_destroy_state)
crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+ kfree(crtc->name);
+
memset(crtc, 0, sizeof(*crtc));
}
EXPORT_SYMBOL(drm_crtc_cleanup);
@@ -1075,6 +1106,7 @@ EXPORT_SYMBOL(drm_connector_unplug_all);
* @encoder: the encoder to init
* @funcs: callbacks for this encoder
* @encoder_type: user visible type of the encoder
+ * @name: printf style format string for the encoder name, or NULL for default name
*
* Initialises a preallocated encoder. Encoder should be
* subclassed as part of driver encoder objects.
@@ -1085,7 +1117,7 @@ EXPORT_SYMBOL(drm_connector_unplug_all);
int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
- int encoder_type)
+ int encoder_type, const char *name, ...)
{
int ret;
@@ -1098,9 +1130,17 @@ int drm_encoder_init(struct drm_device *dev,
encoder->dev = dev;
encoder->encoder_type = encoder_type;
encoder->funcs = funcs;
- encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
- drm_encoder_enum_list[encoder_type].name,
- encoder->base.id);
+ if (name) {
+ va_list ap;
+
+ va_start(ap, name);
+ encoder->name = kvasprintf(GFP_KERNEL, name, ap);
+ va_end(ap);
+ } else {
+ encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
+ drm_encoder_enum_list[encoder_type].name,
+ encoder->base.id);
+ }
if (!encoder->name) {
ret = -ENOMEM;
goto out_put;
@@ -1141,6 +1181,18 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
}
EXPORT_SYMBOL(drm_encoder_cleanup);
+static unsigned int drm_num_planes(struct drm_device *dev)
+{
+ unsigned int num = 0;
+ struct drm_plane *tmp;
+
+ drm_for_each_plane(tmp, dev) {
+ num++;
+ }
+
+ return num;
+}
+
/**
* drm_universal_plane_init - Initialize a new universal plane object
* @dev: DRM device
@@ -1150,6 +1202,7 @@ EXPORT_SYMBOL(drm_encoder_cleanup);
* @formats: array of supported formats (%DRM_FORMAT_*)
* @format_count: number of elements in @formats
* @type: type of plane (overlay, primary, cursor)
+ * @name: printf style format string for the plane name, or NULL for default name
*
* Initializes a plane object of type @type.
*
@@ -1160,7 +1213,8 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
- enum drm_plane_type type)
+ enum drm_plane_type type,
+ const char *name, ...)
{
struct drm_mode_config *config = &dev->mode_config;
int ret;
@@ -1182,6 +1236,22 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
return -ENOMEM;
}
+ if (name) {
+ va_list ap;
+
+ va_start(ap, name);
+ plane->name = kvasprintf(GFP_KERNEL, name, ap);
+ va_end(ap);
+ } else {
+ plane->name = kasprintf(GFP_KERNEL, "plane-%d",
+ drm_num_planes(dev));
+ }
+ if (!plane->name) {
+ kfree(plane->format_types);
+ drm_mode_object_put(dev, &plane->base);
+ return -ENOMEM;
+ }
+
memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
plane->format_count = format_count;
plane->possible_crtcs = possible_crtcs;
@@ -1240,7 +1310,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
- formats, format_count, type);
+ formats, format_count, type, NULL);
}
EXPORT_SYMBOL(drm_plane_init);
@@ -1272,6 +1342,8 @@ void drm_plane_cleanup(struct drm_plane *plane)
if (plane->state && plane->funcs->atomic_destroy_state)
plane->funcs->atomic_destroy_state(plane, plane->state);
+ kfree(plane->name);
+
memset(plane, 0, sizeof(*plane));
}
EXPORT_SYMBOL(drm_plane_cleanup);
@@ -1801,7 +1873,8 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
copied = 0;
crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
drm_for_each_crtc(crtc, dev) {
- DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n",
+ crtc->base.id, crtc->name);
if (put_user(crtc->base.id, crtc_id + copied)) {
ret = -EFAULT;
goto out;
@@ -2646,7 +2719,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
ret = -ENOENT;
goto out;
}
- DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
if (crtc_req->mode_valid) {
/* If we have a mode we need a framebuffer. */
@@ -4785,9 +4858,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
/* Do DPMS ourselves */
if (property == connector->dev->mode_config.dpms_property) {
- ret = 0;
- if (connector->funcs->dpms)
- ret = (*connector->funcs->dpms)(connector, (int)value);
+ ret = (*connector->funcs->dpms)(connector, (int)value);
} else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, value);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 6b4cf25fed12..a02a7f9a6a9d 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -51,6 +51,11 @@
* the same callbacks which drivers can use to e.g. restore the modeset
* configuration on resume with drm_helper_resume_force_mode().
*
+ * Note that this helper library doesn't track the current power state of CRTCs
+ * and encoders. It can call callbacks like ->dpms() even though the hardware is
+ * already in the desired state. This deficiency has been fixed in the atomic
+ * helpers.
+ *
* The driver callbacks are mostly compatible with the atomic modeset helpers,
* except for the handling of the primary plane: Atomic helpers require that the
* primary plane is implemented as a real standalone plane and not directly tied
@@ -62,6 +67,11 @@
* converting to the plane helpers). New drivers must not use these functions
* but need to implement the atomic interface instead, potentially using the
* atomic helpers for that.
+ *
+ * These legacy modeset helpers use the same function table structures as
+ * all other modesetting helpers. See the documentation for struct
+ * &drm_crtc_helper_funcs, struct &drm_encoder_helper_funcs and struct
+ * &drm_connector_helper_funcs.
*/
MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
@@ -206,8 +216,8 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
* @dev: DRM device
*
* This function walks through the entire mode setting configuration of @dev. It
- * will remove any crtc links of unused encoders and encoder links of
- * disconnected connectors. Then it will disable all unused encoders and crtcs
+ * will remove any CRTC links of unused encoders and encoder links of
+ * disconnected connectors. Then it will disable all unused encoders and CRTCs
* either by calling their disable callback if available or by calling their
* dpms callback with DRM_MODE_DPMS_OFF.
*/
@@ -329,7 +339,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
DRM_DEBUG_KMS("CRTC fixup failed\n");
goto done;
}
- DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
crtc->hwmode = *adjusted_mode;
@@ -445,11 +455,36 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
* drm_crtc_helper_set_config - set a new config from userspace
* @set: mode set configuration
*
- * Setup a new configuration, provided by the upper layers (either an ioctl call
- * from userspace or internally e.g. from the fbdev support code) in @set, and
- * enable it. This is the main helper functions for drivers that implement
- * kernel mode setting with the crtc helper functions and the assorted
- * ->prepare(), ->modeset() and ->commit() helper callbacks.
+ * The drm_crtc_helper_set_config() helper function implements the set_config
+ * callback of struct &drm_crtc_funcs for drivers using the legacy CRTC helpers.
+ *
+ * It first tries to locate the best encoder for each connector by calling the
+ * connector ->best_encoder() (struct &drm_connector_helper_funcs) helper
+ * operation.
+ *
+ * After locating the appropriate encoders, the helper function will call the
+ * mode_fixup encoder and CRTC helper operations to adjust the requested mode,
+ * or reject it completely in which case an error will be returned to the
+ * application. If the new configuration after mode adjustment is identical to
+ * the current configuration the helper function will return without performing
+ * any other operation.
+ *
+ * If the adjusted mode is identical to the current mode but changes to the
+ * frame buffer need to be applied, the drm_crtc_helper_set_config() function
+ * will call the CRTC ->mode_set_base() (struct &drm_crtc_helper_funcs) helper
+ * operation.
+ *
+ * If the adjusted mode differs from the current mode, or if the
+ * ->mode_set_base() helper operation is not provided, the helper function
+ * performs a full mode set sequence by calling the ->prepare(), ->mode_set()
+ * and ->commit() CRTC and encoder helper operations, in that order.
+ * Alternatively it can also use the dpms and disable helper operations. For
+ * details see struct &drm_crtc_helper_funcs and struct
+ * &drm_encoder_helper_funcs.
+ *
+ * This function is deprecated. New drivers must implement atomic modeset
+ * support, for which this function is unsuitable. Instead drivers should use
+ * drm_atomic_helper_set_config().
*
* Returns:
* Returns 0 on success, negative errno numbers on failure.
@@ -484,11 +519,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
set->fb = NULL;
if (set->fb) {
- DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
- set->crtc->base.id, set->fb->base.id,
- (int)set->num_connectors, set->x, set->y);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, set->crtc->name,
+ set->fb->base.id,
+ (int)set->num_connectors, set->x, set->y);
} else {
- DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] [NOFB]\n",
+ set->crtc->base.id, set->crtc->name);
drm_crtc_helper_disable(set->crtc);
return 0;
}
@@ -628,12 +665,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
connector->encoder->crtc = new_crtc;
}
if (new_crtc) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
- connector->base.id, connector->name,
- new_crtc->base.id);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d:%s]\n",
+ connector->base.id, connector->name,
+ new_crtc->base.id, new_crtc->name);
} else {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
- connector->base.id, connector->name);
+ connector->base.id, connector->name);
}
}
@@ -650,8 +687,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
set->x, set->y,
save_set.fb)) {
- DRM_ERROR("failed to set mode on [CRTC:%d]\n",
- set->crtc->base.id);
+ DRM_ERROR("failed to set mode on [CRTC:%d:%s]\n",
+ set->crtc->base.id, set->crtc->name);
set->crtc->primary->fb = save_set.fb;
ret = -EINVAL;
goto fail;
@@ -758,10 +795,18 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
* @connector: affected connector
* @mode: DPMS mode
*
- * This is the main helper function provided by the crtc helper framework for
+ * The drm_helper_connector_dpms() helper function implements the ->dpms()
+ * callback of struct &drm_connector_funcs for drivers using the legacy CRTC helpers.
+ *
+ * This is the main helper function provided by the CRTC helper framework for
* implementing the DPMS connector attribute. It computes the new desired DPMS
- * state for all encoders and crtcs in the output mesh and calls the ->dpms()
- * callback provided by the driver appropriately.
+ * state for all encoders and CRTCs in the output mesh and calls the ->dpms()
+ * callbacks provided by the driver in struct &drm_crtc_helper_funcs and struct
+ * &drm_encoder_helper_funcs appropriately.
+ *
+ * This function is deprecated. New drivers must implement atomic modeset
+ * support, for which this function is unsuitable. Instead drivers should use
+ * drm_atomic_helper_connector_dpms().
*
* Returns:
* Always returns 0.
@@ -855,6 +900,12 @@ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
* due to slight differences in allocating shared resources when the
* configuration is restored in a different order than when userspace set it up)
* need to use their own restore logic.
+ *
+ * This function is deprecated. New drivers should implement atomic mode-
+ * setting and use the atomic suspend/resume helpers.
+ *
+ * See also:
+ * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
*/
void drm_helper_resume_force_mode(struct drm_device *dev)
{
@@ -913,9 +964,9 @@ EXPORT_SYMBOL(drm_helper_resume_force_mode);
* @old_fb: previous framebuffer
*
* This function implements a callback useable as the ->mode_set callback
- * required by the crtc helpers. Besides the atomic plane helper functions for
+ * required by the CRTC helpers. Besides the atomic plane helper functions for
* the primary plane the driver must also provide the ->mode_set_nofb callback
- * to set up the crtc.
+ * to set up the CRTC.
*
* This is a transitional helper useful for converting drivers to the atomic
* interfaces.
@@ -979,7 +1030,7 @@ EXPORT_SYMBOL(drm_helper_crtc_mode_set);
* @old_fb: previous framebuffer
*
* This function implements a callback useable as the ->mode_set_base used
- * required by the crtc helpers. The driver must provide the atomic plane helper
+ * required by the CRTC helpers. The driver must provide the atomic plane helper
* functions for the primary plane.
*
* This is a transitional helper useful for converting drivers to the atomic
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 809959d56d78..3b6627dde9ff 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1673,6 +1673,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
if (mgr->proposed_vcpis[i]) {
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
+ req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
} else {
port = NULL;
req_payload.num_slots = 0;
@@ -1688,6 +1689,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
if (req_payload.num_slots) {
drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
mgr->payloads[i].num_slots = req_payload.num_slots;
+ mgr->payloads[i].vcpi = req_payload.vcpi;
} else if (mgr->payloads[i].num_slots) {
mgr->payloads[i].num_slots = 0;
drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
@@ -1823,7 +1825,7 @@ static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req
{
struct drm_dp_sideband_msg_reply_body reply;
- reply.reply_type = 1;
+ reply.reply_type = 0;
reply.req_type = req_type;
drm_dp_encode_sideband_reply(&reply, msg);
return 0;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 9362609df38a..bf934cdea21c 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -160,6 +160,11 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
+ if (!file_priv->allowed_master) {
+ ret = drm_new_set_master(dev, file_priv);
+ goto out_unlock;
+ }
+
file_priv->minor->master = drm_master_get(file_priv->master);
file_priv->is_master = 1;
if (dev->driver->master_set) {
@@ -628,8 +633,17 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
}
}
+ if (parent) {
+ ret = drm_dev_set_unique(dev, dev_name(parent));
+ if (ret)
+ goto err_setunique;
+ }
+
return dev;
+err_setunique:
+ if (drm_core_check_feature(dev, DRIVER_GEM))
+ drm_gem_destroy(dev);
err_ctxbitmap:
drm_legacy_ctxbitmap_cleanup(dev);
drm_ht_remove(&dev->map_hash);
@@ -792,23 +806,18 @@ EXPORT_SYMBOL(drm_dev_unregister);
/**
* drm_dev_set_unique - Set the unique name of a DRM device
* @dev: device of which to set the unique name
- * @fmt: format string for unique name
+ * @name: unique name
*
- * Sets the unique name of a DRM device using the specified format string and
- * a variable list of arguments. Drivers can use this at driver probe time if
- * the unique name of the devices they drive is static.
+ * Sets the unique name of a DRM device using the specified string. Drivers
+ * can use this at driver probe time if the unique name of the devices they
+ * drive is static.
*
* Return: 0 on success or a negative error code on failure.
*/
-int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
+int drm_dev_set_unique(struct drm_device *dev, const char *name)
{
- va_list ap;
-
kfree(dev->unique);
-
- va_start(ap, fmt);
- dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
- va_end(ap);
+ dev->unique = kstrdup(name, GFP_KERNEL);
return dev->unique ? 0 : -ENOMEM;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index d5d2c03fd136..c214f1246cb4 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2545,6 +2545,33 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
return clock;
}
+static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match,
+ unsigned int clock_tolerance)
+{
+ u8 mode;
+
+ if (!to_match->clock)
+ return 0;
+
+ for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) {
+ const struct drm_display_mode *cea_mode = &edid_cea_modes[mode];
+ unsigned int clock1, clock2;
+
+ /* Check both 60Hz and 59.94Hz */
+ clock1 = cea_mode->clock;
+ clock2 = cea_mode_alternate_clock(cea_mode);
+
+ if (abs(to_match->clock - clock1) > clock_tolerance &&
+ abs(to_match->clock - clock2) > clock_tolerance)
+ continue;
+
+ if (drm_mode_equal_no_clocks(to_match, cea_mode))
+ return mode + 1;
+ }
+
+ return 0;
+}
+
/**
* drm_match_cea_mode - look for a CEA mode matching given mode
* @to_match: display mode
@@ -2609,6 +2636,33 @@ hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
return cea_mode_alternate_clock(hdmi_mode);
}
+static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_match,
+ unsigned int clock_tolerance)
+{
+ u8 mode;
+
+ if (!to_match->clock)
+ return 0;
+
+ for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) {
+ const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode];
+ unsigned int clock1, clock2;
+
+ /* Make sure to also match alternate clocks */
+ clock1 = hdmi_mode->clock;
+ clock2 = hdmi_mode_alternate_clock(hdmi_mode);
+
+ if (abs(to_match->clock - clock1) > clock_tolerance &&
+ abs(to_match->clock - clock2) > clock_tolerance)
+ continue;
+
+ if (drm_mode_equal_no_clocks(to_match, hdmi_mode))
+ return mode + 1;
+ }
+
+ return 0;
+}
+
/*
* drm_match_hdmi_mode - look for a HDMI mode matching given mode
* @to_match: display mode
@@ -3119,14 +3173,18 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
u8 mode_idx;
const char *type;
- mode_idx = drm_match_cea_mode(mode) - 1;
+ /*
+ * allow 5kHz clock difference either way to account for
+ * the 10kHz clock resolution limit of detailed timings.
+ */
+ mode_idx = drm_match_cea_mode_clock_tolerance(mode, 5) - 1;
if (mode_idx < ARRAY_SIZE(edid_cea_modes)) {
type = "CEA";
cea_mode = &edid_cea_modes[mode_idx];
clock1 = cea_mode->clock;
clock2 = cea_mode_alternate_clock(cea_mode);
} else {
- mode_idx = drm_match_hdmi_mode(mode) - 1;
+ mode_idx = drm_match_hdmi_mode_clock_tolerance(mode, 5) - 1;
if (mode_idx < ARRAY_SIZE(edid_4k_modes)) {
type = "HDMI";
cea_mode = &edid_4k_modes[mode_idx];
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index d18b88b755c3..e8629076de32 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -124,7 +124,7 @@ EXPORT_SYMBOL(drm_i2c_encoder_destroy);
* Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
*/
-static inline struct drm_encoder_slave_funcs *
+static inline const struct drm_encoder_slave_funcs *
get_slave_funcs(struct drm_encoder *enc)
{
return to_encoder_slave(enc)->slave_funcs;
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index b7d5b848d2f8..5543fa806aec 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -266,7 +266,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
ret = PTR_ERR(fbi);
- goto err_drm_gem_cma_free_object;
+ goto err_gem_free_object;
}
fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1);
@@ -299,8 +299,8 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
err_fb_info_destroy:
drm_fb_helper_release_fbi(helper);
-err_drm_gem_cma_free_object:
- drm_gem_cma_free_object(&obj->base);
+err_gem_free_object:
+ dev->driver->gem_free_object(&obj->base);
return ret;
}
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index c59ce4d0ef75..1ea8790e5090 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -126,6 +126,60 @@ static int drm_cpu_valid(void)
}
/**
+ * drm_new_set_master - Allocate a new master object and become master for the
+ * associated master realm.
+ *
+ * @dev: The associated device.
+ * @fpriv: File private identifying the client.
+ *
+ * This function must be called with dev::struct_mutex held.
+ * Returns negative error code on failure. Zero on success.
+ */
+int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
+{
+ struct drm_master *old_master;
+ int ret;
+
+ lockdep_assert_held_once(&dev->master_mutex);
+
+ /* create a new master */
+ fpriv->minor->master = drm_master_create(fpriv->minor);
+ if (!fpriv->minor->master)
+ return -ENOMEM;
+
+ /* take another reference for the copy in the local file priv */
+ old_master = fpriv->master;
+ fpriv->master = drm_master_get(fpriv->minor->master);
+
+ if (dev->driver->master_create) {
+ ret = dev->driver->master_create(dev, fpriv->master);
+ if (ret)
+ goto out_err;
+ }
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, fpriv, true);
+ if (ret)
+ goto out_err;
+ }
+
+ fpriv->is_master = 1;
+ fpriv->allowed_master = 1;
+ fpriv->authenticated = 1;
+ if (old_master)
+ drm_master_put(&old_master);
+
+ return 0;
+
+out_err:
+ /* drop both references and restore old master on failure */
+ drm_master_put(&fpriv->minor->master);
+ drm_master_put(&fpriv->master);
+ fpriv->master = old_master;
+
+ return ret;
+}
+
+/**
* Called whenever a process opens /dev/drm.
*
* \param filp file pointer.
@@ -172,6 +226,8 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
init_waitqueue_head(&priv->event_wait);
priv->event_space = 4096; /* set aside 4k for event buffer */
+ mutex_init(&priv->event_read_lock);
+
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_open(dev, priv);
@@ -189,35 +245,9 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
mutex_lock(&dev->master_mutex);
if (drm_is_primary_client(priv) && !priv->minor->master) {
/* create a new master */
- priv->minor->master = drm_master_create(priv->minor);
- if (!priv->minor->master) {
- ret = -ENOMEM;
+ ret = drm_new_set_master(dev, priv);
+ if (ret)
goto out_close;
- }
-
- priv->is_master = 1;
- /* take another reference for the copy in the local file priv */
- priv->master = drm_master_get(priv->minor->master);
- priv->authenticated = 1;
-
- if (dev->driver->master_create) {
- ret = dev->driver->master_create(dev, priv->master);
- if (ret) {
- /* drop both references if this fails */
- drm_master_put(&priv->minor->master);
- drm_master_put(&priv->master);
- goto out_close;
- }
- }
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, priv, true);
- if (ret) {
- /* drop both references if this fails */
- drm_master_put(&priv->minor->master);
- drm_master_put(&priv->master);
- goto out_close;
- }
- }
} else if (drm_is_primary_client(priv)) {
/* get a reference to the master */
priv->master = drm_master_get(priv->minor->master);
@@ -483,14 +513,28 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
{
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
- ssize_t ret = 0;
+ ssize_t ret;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
- spin_lock_irq(&dev->event_lock);
+ ret = mutex_lock_interruptible(&file_priv->event_read_lock);
+ if (ret)
+ return ret;
+
for (;;) {
- if (list_empty(&file_priv->event_list)) {
+ struct drm_pending_event *e = NULL;
+
+ spin_lock_irq(&dev->event_lock);
+ if (!list_empty(&file_priv->event_list)) {
+ e = list_first_entry(&file_priv->event_list,
+ struct drm_pending_event, link);
+ file_priv->event_space += e->event->length;
+ list_del(&e->link);
+ }
+ spin_unlock_irq(&dev->event_lock);
+
+ if (e == NULL) {
if (ret)
break;
@@ -499,36 +543,36 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
break;
}
- spin_unlock_irq(&dev->event_lock);
+ mutex_unlock(&file_priv->event_read_lock);
ret = wait_event_interruptible(file_priv->event_wait,
!list_empty(&file_priv->event_list));
- spin_lock_irq(&dev->event_lock);
- if (ret < 0)
- break;
-
- ret = 0;
+ if (ret >= 0)
+ ret = mutex_lock_interruptible(&file_priv->event_read_lock);
+ if (ret)
+ return ret;
} else {
- struct drm_pending_event *e;
-
- e = list_first_entry(&file_priv->event_list,
- struct drm_pending_event, link);
- if (e->event->length + ret > count)
+ unsigned length = e->event->length;
+
+ if (length > count - ret) {
+put_back_event:
+ spin_lock_irq(&dev->event_lock);
+ file_priv->event_space -= length;
+ list_add(&e->link, &file_priv->event_list);
+ spin_unlock_irq(&dev->event_lock);
break;
+ }
- if (__copy_to_user_inatomic(buffer + ret,
- e->event, e->event->length)) {
+ if (copy_to_user(buffer + ret, e->event, length)) {
if (ret == 0)
ret = -EFAULT;
- break;
+ goto put_back_event;
}
- file_priv->event_space += e->event->length;
- ret += e->event->length;
- list_del(&e->link);
+ ret += length;
e->destroy(e);
}
}
- spin_unlock_irq(&dev->event_lock);
+ mutex_unlock(&file_priv->event_read_lock);
return ret;
}
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index e109b49cd25d..e5df53b6e229 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -59,11 +59,13 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size)
struct drm_gem_object *gem_obj;
int ret;
- cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
- if (!cma_obj)
+ if (drm->driver->gem_create_object)
+ gem_obj = drm->driver->gem_create_object(drm, size);
+ else
+ gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
+ if (!gem_obj)
return ERR_PTR(-ENOMEM);
-
- gem_obj = &cma_obj->base;
+ cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
ret = drm_gem_object_init(drm, gem_obj, size);
if (ret)
@@ -119,7 +121,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
return cma_obj;
error:
- drm_gem_cma_free_object(&cma_obj->base);
+ drm->driver->gem_free_object(&cma_obj->base);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(drm_gem_cma_create);
@@ -169,7 +171,7 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
return cma_obj;
err_handle_create:
- drm_gem_cma_free_object(gem_obj);
+ drm->driver->gem_free_object(gem_obj);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 2151ea551d3b..607f493ae801 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -980,7 +980,8 @@ static void send_vblank_event(struct drm_device *dev,
struct drm_pending_vblank_event *e,
unsigned long seq, struct timeval *now)
{
- WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
+ assert_spin_locked(&dev->event_lock);
+
e->event.sequence = seq;
e->event.tv_sec = now->tv_sec;
e->event.tv_usec = now->tv_usec;
@@ -993,6 +994,57 @@ static void send_vblank_event(struct drm_device *dev,
}
/**
+ * drm_arm_vblank_event - arm vblank event after pageflip
+ * @dev: DRM device
+ * @pipe: CRTC index
+ * @e: the event to prepare to send
+ *
+ * A lot of drivers need to generate vblank events for the very next vblank
+ * interrupt. For example when the page flip interrupt happens when the page
+ * flip gets armed, but not when it actually executes within the next vblank
+ * period. This helper function implements exactly the required vblank arming
+ * behaviour.
+ *
+ * Caller must hold event lock. Caller must also hold a vblank reference for
+ * the event @e, which will be dropped when the next vblank arrives.
+ *
+ * This is the legacy version of drm_crtc_arm_vblank_event().
+ */
+void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
+ struct drm_pending_vblank_event *e)
+{
+ assert_spin_locked(&dev->event_lock);
+
+ e->pipe = pipe;
+ e->event.sequence = drm_vblank_count(dev, pipe);
+ list_add_tail(&e->base.link, &dev->vblank_event_list);
+}
+EXPORT_SYMBOL(drm_arm_vblank_event);
+
+/**
+ * drm_crtc_arm_vblank_event - arm vblank event after pageflip
+ * @crtc: the source CRTC of the vblank event
+ * @e: the event to send
+ *
+ * A lot of drivers need to generate vblank events for the very next vblank
+ * interrupt. For example when the page flip interrupt happens when the page
+ * flip gets armed, but not when it actually executes within the next vblank
+ * period. This helper function implements exactly the required vblank arming
+ * behaviour.
+ *
+ * Caller must hold event lock. Caller must also hold a vblank reference for
+ * the event @e, which will be dropped when the next vblank arrives.
+ *
+ * This is the native KMS version of drm_arm_vblank_event().
+ */
+void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e)
+{
+ drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
+}
+EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
+
+/**
* drm_send_vblank_event - helper to send vblank event after pageflip
* @dev: DRM device
* @pipe: CRTC index
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 2d5ca8eec13a..6e6a9c58d404 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -365,6 +365,44 @@ int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
}
EXPORT_SYMBOL(mipi_dsi_create_packet);
+/**
+ * mipi_dsi_shutdown_peripheral() - sends a Shutdown Peripheral command
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi)
+{
+ struct mipi_dsi_msg msg = {
+ .channel = dsi->channel,
+ .type = MIPI_DSI_SHUTDOWN_PERIPHERAL,
+ .tx_buf = (u8 [2]) { 0, 0 },
+ .tx_len = 2,
+ };
+
+ return mipi_dsi_device_transfer(dsi, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_shutdown_peripheral);
+
+/**
+ * mipi_dsi_turn_on_peripheral() - sends a Turn On Peripheral command
+ * @dsi: DSI peripheral device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi)
+{
+ struct mipi_dsi_msg msg = {
+ .channel = dsi->channel,
+ .type = MIPI_DSI_TURN_ON_PERIPHERAL,
+ .tx_buf = (u8 [2]) { 0, 0 },
+ .tx_len = 2,
+ };
+
+ return mipi_dsi_device_transfer(dsi, &msg);
+}
+EXPORT_SYMBOL(mipi_dsi_turn_on_peripheral);
+
/*
* mipi_dsi_set_maximum_return_packet_size() - specify the maximum size of the
* the payload in a long packet transmitted from the peripheral back to the
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index bde9b2911dc2..20775c05235a 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -553,10 +553,10 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
* drivers/video/fbmon.c
*
* Standard GTF parameters:
- * M = 600
- * C = 40
- * K = 128
- * J = 20
+ * M = 600
+ * C = 40
+ * K = 128
+ * J = 20
*
* Returns:
* The modeline based on the GTF algorithm stored in a drm_display_mode object.
@@ -708,7 +708,8 @@ void drm_mode_set_name(struct drm_display_mode *mode)
}
EXPORT_SYMBOL(drm_mode_set_name);
-/** drm_mode_hsync - get the hsync of a mode
+/**
+ * drm_mode_hsync - get the hsync of a mode
* @mode: mode
*
* Returns:
@@ -917,13 +918,30 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
} else if (mode1->clock != mode2->clock)
return false;
+ return drm_mode_equal_no_clocks(mode1, mode2);
+}
+EXPORT_SYMBOL(drm_mode_equal);
+
+/**
+ * drm_mode_equal_no_clocks - test modes for equality
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+ * Check to see if @mode1 and @mode2 are equivalent, but
+ * don't check the pixel clocks.
+ *
+ * Returns:
+ * True if the modes are equal, false otherwise.
+ */
+bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+{
if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
(mode2->flags & DRM_MODE_FLAG_3D_MASK))
return false;
return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
}
-EXPORT_SYMBOL(drm_mode_equal);
+EXPORT_SYMBOL(drm_mode_equal_no_clocks);
/**
* drm_mode_equal_no_clocks_no_stereo - test modes for equality
@@ -1056,7 +1074,7 @@ static const char * const drm_mode_status_names[] = {
MODE_STATUS(ONE_SIZE),
MODE_STATUS(NO_REDUCED),
MODE_STATUS(NO_STEREO),
- MODE_STATUS(UNVERIFIED),
+ MODE_STATUS(STALE),
MODE_STATUS(BAD),
MODE_STATUS(ERROR),
};
@@ -1154,7 +1172,6 @@ EXPORT_SYMBOL(drm_mode_sort);
/**
* drm_mode_connector_list_update - update the mode list for the connector
* @connector: the connector to update
- * @merge_type_bits: whether to merge or overwrite type bits
*
* This moves the modes from the @connector probed_modes list
* to the actual mode list. It compares the probed mode against the current
@@ -1163,33 +1180,48 @@ EXPORT_SYMBOL(drm_mode_sort);
* This is just a helper functions doesn't validate any modes itself and also
* doesn't prune any invalid modes. Callers need to do that themselves.
*/
-void drm_mode_connector_list_update(struct drm_connector *connector,
- bool merge_type_bits)
+void drm_mode_connector_list_update(struct drm_connector *connector)
{
- struct drm_display_mode *mode;
struct drm_display_mode *pmode, *pt;
- int found_it;
WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex));
- list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
- head) {
- found_it = 0;
+ list_for_each_entry_safe(pmode, pt, &connector->probed_modes, head) {
+ struct drm_display_mode *mode;
+ bool found_it = false;
+
/* go through current modes checking for the new probed mode */
list_for_each_entry(mode, &connector->modes, head) {
- if (drm_mode_equal(pmode, mode)) {
- found_it = 1;
- /* if equal delete the probed mode */
- mode->status = pmode->status;
- /* Merge type bits together */
- if (merge_type_bits)
- mode->type |= pmode->type;
- else
- mode->type = pmode->type;
- list_del(&pmode->head);
- drm_mode_destroy(connector->dev, pmode);
- break;
+ if (!drm_mode_equal(pmode, mode))
+ continue;
+
+ found_it = true;
+
+ /*
+ * If the old matching mode is stale (ie. left over
+ * from a previous probe) just replace it outright.
+ * Otherwise just merge the type bits between all
+ * equal probed modes.
+ *
+ * If two probed modes are considered equal, pick the
+ * actual timings from the one that's marked as
+ * preferred (in case the match isn't 100%). If
+ * multiple or zero preferred modes are present, favor
+ * the mode added to the probed_modes list first.
+ */
+ if (mode->status == MODE_STALE) {
+ drm_mode_copy(mode, pmode);
+ } else if ((mode->type & DRM_MODE_TYPE_PREFERRED) == 0 &&
+ (pmode->type & DRM_MODE_TYPE_PREFERRED) != 0) {
+ pmode->type |= mode->type;
+ drm_mode_copy(mode, pmode);
+ } else {
+ mode->type |= pmode->type;
}
+
+ list_del(&pmode->head);
+ drm_mode_destroy(connector->dev, pmode);
+ break;
}
if (!found_it) {
@@ -1212,7 +1244,7 @@ EXPORT_SYMBOL(drm_mode_connector_list_update);
* This uses the same parameters as the fb modedb.c, except for an extra
* force-enable, force-enable-digital and force-disable bit at the end:
*
- * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
+ * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
*
* The intermediate drm_cmdline_mode structure is required to store additional
* options from the command line modline like the force-enable/disable flag.
@@ -1230,7 +1262,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
bool yres_specified = false, cvt = false, rb = false;
bool interlace = false, margins = false, was_digit = false;
- int i, err;
+ int i;
enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
#ifdef CONFIG_FB
@@ -1250,9 +1282,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
case '@':
if (!refresh_specified && !bpp_specified &&
!yres_specified && !cvt && !rb && was_digit) {
- err = kstrtouint(&name[i + 1], 10, &refresh);
- if (err)
- return false;
+ refresh = simple_strtol(&name[i+1], NULL, 10);
refresh_specified = true;
was_digit = false;
} else
@@ -1261,9 +1291,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
case '-':
if (!bpp_specified && !yres_specified && !cvt &&
!rb && was_digit) {
- err = kstrtouint(&name[i + 1], 10, &bpp);
- if (err)
- return false;
+ bpp = simple_strtol(&name[i+1], NULL, 10);
bpp_specified = true;
was_digit = false;
} else
@@ -1271,9 +1299,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
break;
case 'x':
if (!yres_specified && was_digit) {
- err = kstrtouint(&name[i + 1], 10, &yres);
- if (err)
- return false;
+ yres = simple_strtol(&name[i+1], NULL, 10);
yres_specified = true;
was_digit = false;
} else
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 6675b1428410..e3a4adf03e7b 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -40,28 +40,33 @@
* The basic usage pattern is to:
*
* drm_modeset_acquire_init(&ctx)
- * retry:
+ * retry:
* foreach (lock in random_ordered_set_of_locks) {
- * ret = drm_modeset_lock(lock, &ctx)
- * if (ret == -EDEADLK) {
- * drm_modeset_backoff(&ctx);
- * goto retry;
- * }
+ * ret = drm_modeset_lock(lock, &ctx)
+ * if (ret == -EDEADLK) {
+ * drm_modeset_backoff(&ctx);
+ * goto retry;
+ * }
* }
- *
* ... do stuff ...
- *
* drm_modeset_drop_locks(&ctx);
* drm_modeset_acquire_fini(&ctx);
*/
/**
* drm_modeset_lock_all - take all modeset locks
- * @dev: drm device
+ * @dev: DRM device
*
* This function takes all modeset locks, suitable where a more fine-grained
- * scheme isn't (yet) implemented. Locks must be dropped with
- * drm_modeset_unlock_all.
+ * scheme isn't (yet) implemented. Locks must be dropped by calling the
+ * drm_modeset_unlock_all() function.
+ *
+ * This function is deprecated. It allocates a lock acquisition context and
+ * stores it in the DRM device's ->mode_config. This facilitate conversion of
+ * existing code because it removes the need to manually deal with the
+ * acquisition context, but it is also brittle because the context is global
+ * and care must be taken not to nest calls. New code should use the
+ * drm_modeset_lock_all_ctx() function and pass in the context explicitly.
*/
void drm_modeset_lock_all(struct drm_device *dev)
{
@@ -78,39 +83,43 @@ void drm_modeset_lock_all(struct drm_device *dev)
drm_modeset_acquire_init(ctx, 0);
retry:
- ret = drm_modeset_lock(&config->connection_mutex, ctx);
- if (ret)
- goto fail;
- ret = drm_modeset_lock_all_crtcs(dev, ctx);
- if (ret)
- goto fail;
+ ret = drm_modeset_lock_all_ctx(dev, ctx);
+ if (ret < 0) {
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(ctx);
+ goto retry;
+ }
+
+ drm_modeset_acquire_fini(ctx);
+ kfree(ctx);
+ return;
+ }
WARN_ON(config->acquire_ctx);
- /* now we hold the locks, so now that it is safe, stash the
- * ctx for drm_modeset_unlock_all():
+ /*
+ * We hold the locks now, so it is safe to stash the acquisition
+ * context for drm_modeset_unlock_all().
*/
config->acquire_ctx = ctx;
drm_warn_on_modeset_not_all_locked(dev);
-
- return;
-
-fail:
- if (ret == -EDEADLK) {
- drm_modeset_backoff(ctx);
- goto retry;
- }
-
- kfree(ctx);
}
EXPORT_SYMBOL(drm_modeset_lock_all);
/**
* drm_modeset_unlock_all - drop all modeset locks
- * @dev: device
+ * @dev: DRM device
*
- * This function drop all modeset locks taken by drm_modeset_lock_all.
+ * This function drops all modeset locks taken by a previous call to the
+ * drm_modeset_lock_all() function.
+ *
+ * This function is deprecated. It uses the lock acquisition context stored
+ * in the DRM device's ->mode_config. This facilitates conversion of existing
+ * code because it removes the need to manually deal with the acquisition
+ * context, but it is also brittle because the context is global and care must
+ * be taken not to nest calls. New code should pass the acquisition context
+ * directly to the drm_modeset_drop_locks() function.
*/
void drm_modeset_unlock_all(struct drm_device *dev)
{
@@ -431,14 +440,34 @@ void drm_modeset_unlock(struct drm_modeset_lock *lock)
}
EXPORT_SYMBOL(drm_modeset_unlock);
-/* In some legacy codepaths it's convenient to just grab all the crtc and plane
- * related locks. */
-int drm_modeset_lock_all_crtcs(struct drm_device *dev,
- struct drm_modeset_acquire_ctx *ctx)
+/**
+ * drm_modeset_lock_all_ctx - take all modeset locks
+ * @dev: DRM device
+ * @ctx: lock acquisition context
+ *
+ * This function takes all modeset locks, suitable where a more fine-grained
+ * scheme isn't (yet) implemented.
+ *
+ * Unlike drm_modeset_lock_all(), it doesn't take the dev->mode_config.mutex
+ * since that lock isn't required for modeset state changes. Callers which
+ * need to grab that lock too need to do so outside of the acquire context
+ * @ctx.
+ *
+ * Locks acquired with this function should be released by calling the
+ * drm_modeset_drop_locks() function on @ctx.
+ *
+ * Returns: 0 on success or a negative error-code on failure.
+ */
+int drm_modeset_lock_all_ctx(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_crtc *crtc;
struct drm_plane *plane;
- int ret = 0;
+ int ret;
+
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
+ if (ret)
+ return ret;
drm_for_each_crtc(crtc, dev) {
ret = drm_modeset_lock(&crtc->mutex, ctx);
@@ -454,4 +483,4 @@ int drm_modeset_lock_all_crtcs(struct drm_device *dev,
return 0;
}
-EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);
+EXPORT_SYMBOL(drm_modeset_lock_all_ctx);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index a6983d41920d..369d2898ff9e 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -57,6 +57,10 @@
* by the atomic helpers.
*
* Again drivers are strongly urged to switch to the new interfaces.
+ *
+ * The plane helpers share the function table structures with other helpers,
+ * specifically also the atomic helpers. See struct &drm_plane_helper_funcs for
+ * the details.
*/
/*
@@ -371,7 +375,7 @@ static struct drm_plane *create_primary_plane(struct drm_device *dev)
&drm_primary_helper_funcs,
safe_modeset_formats,
ARRAY_SIZE(safe_modeset_formats),
- DRM_PLANE_TYPE_PRIMARY);
+ DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
kfree(primary);
primary = NULL;
@@ -398,7 +402,8 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary;
primary = create_primary_plane(dev);
- return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs);
+ return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs,
+ NULL);
}
EXPORT_SYMBOL(drm_crtc_init);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 9f935f55d74c..27aa7183b20b 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -313,19 +313,15 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
*
* Export callbacks:
*
- * - @gem_prime_pin (optional): prepare a GEM object for exporting
- *
- * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
- *
- * - @gem_prime_vmap: vmap a buffer exported by your driver
- *
- * - @gem_prime_vunmap: vunmap a buffer exported by your driver
- *
- * - @gem_prime_mmap (optional): mmap a buffer exported by your driver
+ * * @gem_prime_pin (optional): prepare a GEM object for exporting
+ * * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
+ * * @gem_prime_vmap: vmap a buffer exported by your driver
+ * * @gem_prime_vunmap: vunmap a buffer exported by your driver
+ * * @gem_prime_mmap (optional): mmap a buffer exported by your driver
*
* Import callback:
*
- * - @gem_prime_import_sg_table (import): produce a GEM object from another
+ * * @gem_prime_import_sg_table (import): produce a GEM object from another
* driver's scatter/gather table
*/
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 94ba39e34299..e714b5a7955f 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -53,6 +53,9 @@
* This helper library can be used independently of the modeset helper library.
* Drivers can also overwrite different parts e.g. use their own hotplug
* handling code to avoid probing unrelated outputs.
+ *
+ * The probe helpers share the function table structures with other display
+ * helper libraries. See struct &drm_connector_helper_funcs for the details.
*/
static bool drm_kms_helper_poll = true;
@@ -126,9 +129,64 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
-
-static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
- uint32_t maxX, uint32_t maxY, bool merge_type_bits)
+/**
+ * drm_helper_probe_single_connector_modes - get complete set of display modes
+ * @connector: connector to probe
+ * @maxX: max width for modes
+ * @maxY: max height for modes
+ *
+ * Based on the helper callbacks implemented by @connector in struct
+ * &drm_connector_helper_funcs try to detect all valid modes. Modes will first
+ * be added to the connector's probed_modes list, then culled (based on validity
+ * and the @maxX, @maxY parameters) and put into the normal modes list.
+ *
+ * Intended to be used as a generic implementation of the ->fill_modes()
+ * @connector vfunc for drivers that use the CRTC helpers for output mode
+ * filtering and detection.
+ *
+ * The basic procedure is as follows
+ *
+ * 1. All modes currently on the connector's modes list are marked as stale
+ *
+ * 2. New modes are added to the connector's probed_modes list with
+ * drm_mode_probed_add(). New modes start their life with status as OK.
+ * Modes are added from a single source using the following priority order.
+ *
+ * - debugfs 'override_edid' (used for testing only)
+ * - firmware EDID (drm_load_edid_firmware())
+ * - connector helper ->get_modes() vfunc
+ * - if the connector status is connector_status_connected, standard
+ * VESA DMT modes up to 1024x768 are automatically added
+ * (drm_add_modes_noedid())
+ *
+ * Finally modes specified via the kernel command line (video=...) are
+ * added in addition to what the earlier probes produced
+ * (drm_helper_probe_add_cmdline_mode()). These modes are generated
+ * using the VESA GTF/CVT formulas.
+ *
+ * 3. Modes are moved from the probed_modes list to the modes list. Potential
+ * duplicates are merged together (see drm_mode_connector_list_update()).
+ * After this step the probed_modes list will be empty again.
+ *
+ * 4. Any non-stale mode on the modes list then undergoes validation
+ *
+ * - drm_mode_validate_basic() performs basic sanity checks
+ * - drm_mode_validate_size() filters out modes larger than @maxX and @maxY
+ * (if specified)
+ * - drm_mode_validate_flag() checks the modes againt basic connector
+ * capabilites (interlace_allowed,doublescan_allowed,stereo_allowed)
+ * - the optional connector ->mode_valid() helper can perform driver and/or
+ * hardware specific checks
+ *
+ * 5. Any mode whose status is not OK is pruned from the connector's modes list,
+ * accompanied by a debug message indicating the reason for the mode's
+ * rejection (see drm_mode_prune_invalid()).
+ *
+ * Returns:
+ * The number of modes found on @connector.
+ */
+int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
@@ -143,9 +201,9 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
connector->name);
- /* set all modes to the unverified state */
+ /* set all old modes to the stale state */
list_for_each_entry(mode, &connector->modes, head)
- mode->status = MODE_UNVERIFIED;
+ mode->status = MODE_STALE;
old_status = connector->status;
@@ -168,10 +226,11 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
* check here, and if anything changed start the hotplug code.
*/
if (old_status != connector->status) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
connector->name,
- old_status, connector->status);
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->status));
/*
* The hotplug event code might call into the fb
@@ -199,17 +258,16 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
goto prune;
}
+ if (connector->override_edid) {
+ struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
+
+ count = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
+ } else {
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
- count = drm_load_edid_firmware(connector);
- if (count == 0)
+ count = drm_load_edid_firmware(connector);
+ if (count == 0)
#endif
- {
- if (connector->override_edid) {
- struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
-
- count = drm_add_edid_modes(connector, edid);
- drm_edid_to_eld(connector, edid);
- } else
count = (*connector_funcs->get_modes)(connector);
}
@@ -219,7 +277,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
if (count == 0)
goto prune;
- drm_mode_connector_list_update(connector, merge_type_bits);
+ drm_mode_connector_list_update(connector);
if (connector->interlace_allowed)
mode_flags |= DRM_MODE_FLAG_INTERLACE;
@@ -229,7 +287,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
mode_flags |= DRM_MODE_FLAG_3D_MASK;
list_for_each_entry(mode, &connector->modes, head) {
- mode->status = drm_mode_validate_basic(mode);
+ if (mode->status == MODE_OK)
+ mode->status = drm_mode_validate_basic(mode);
if (mode->status == MODE_OK)
mode->status = drm_mode_validate_size(mode, maxX, maxY);
@@ -262,49 +321,9 @@ prune:
return count;
}
-
-/**
- * drm_helper_probe_single_connector_modes - get complete set of display modes
- * @connector: connector to probe
- * @maxX: max width for modes
- * @maxY: max height for modes
- *
- * Based on the helper callbacks implemented by @connector try to detect all
- * valid modes. Modes will first be added to the connector's probed_modes list,
- * then culled (based on validity and the @maxX, @maxY parameters) and put into
- * the normal modes list.
- *
- * Intended to be use as a generic implementation of the ->fill_modes()
- * @connector vfunc for drivers that use the crtc helpers for output mode
- * filtering and detection.
- *
- * Returns:
- * The number of modes found on @connector.
- */
-int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
- uint32_t maxX, uint32_t maxY)
-{
- return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, true);
-}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
/**
- * drm_helper_probe_single_connector_modes_nomerge - get complete set of display modes
- * @connector: connector to probe
- * @maxX: max width for modes
- * @maxY: max height for modes
- *
- * This operates like drm_hehlper_probe_single_connector_modes except it
- * replaces the mode bits instead of merging them for preferred modes.
- */
-int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector *connector,
- uint32_t maxX, uint32_t maxY)
-{
- return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, false);
-}
-EXPORT_SYMBOL(drm_helper_probe_single_connector_modes_nomerge);
-
-/**
* drm_kms_helper_hotplug_event - fire off KMS hotplug events
* @dev: drm_device whose connector state changed
*
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
new file mode 100644
index 000000000000..2cde7a5442fb
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -0,0 +1,20 @@
+
+config DRM_ETNAVIV
+ tristate "ETNAVIV (DRM support for Vivante GPU IP cores)"
+ depends on DRM
+ depends on ARCH_MXC || ARCH_DOVE
+ select SHMEM
+ select TMPFS
+ select IOMMU_API
+ select IOMMU_SUPPORT
+ select WANT_DEV_COREDUMP
+ help
+ DRM driver for Vivante GPUs.
+
+config DRM_ETNAVIV_REGISTER_LOGGING
+ bool "enable ETNAVIV register logging"
+ depends on DRM_ETNAVIV
+ help
+ Compile in support for logging register reads/writes in a format
+ that can be parsed by envytools demsm tool. If enabled, register
+ logging can be switched on via etnaviv.reglog=y module param.
diff --git a/drivers/gpu/drm/etnaviv/Makefile b/drivers/gpu/drm/etnaviv/Makefile
new file mode 100644
index 000000000000..1086e9876f91
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/Makefile
@@ -0,0 +1,14 @@
+etnaviv-y := \
+ etnaviv_buffer.o \
+ etnaviv_cmd_parser.o \
+ etnaviv_drv.o \
+ etnaviv_dump.o \
+ etnaviv_gem_prime.o \
+ etnaviv_gem_submit.o \
+ etnaviv_gem.o \
+ etnaviv_gpu.o \
+ etnaviv_iommu_v2.o \
+ etnaviv_iommu.o \
+ etnaviv_mmu.o
+
+obj-$(CONFIG_DRM_ETNAVIV) += etnaviv.o
diff --git a/drivers/gpu/drm/etnaviv/cmdstream.xml.h b/drivers/gpu/drm/etnaviv/cmdstream.xml.h
new file mode 100644
index 000000000000..8c44ba9a694e
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/cmdstream.xml.h
@@ -0,0 +1,218 @@
+#ifndef CMDSTREAM_XML
+#define CMDSTREAM_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- cmdstream.xml ( 12589 bytes, from 2014-02-17 14:57:56)
+- common.xml ( 18437 bytes, from 2015-03-25 11:27:41)
+
+Copyright (C) 2014
+*/
+
+
+#define FE_OPCODE_LOAD_STATE 0x00000001
+#define FE_OPCODE_END 0x00000002
+#define FE_OPCODE_NOP 0x00000003
+#define FE_OPCODE_DRAW_2D 0x00000004
+#define FE_OPCODE_DRAW_PRIMITIVES 0x00000005
+#define FE_OPCODE_DRAW_INDEXED_PRIMITIVES 0x00000006
+#define FE_OPCODE_WAIT 0x00000007
+#define FE_OPCODE_LINK 0x00000008
+#define FE_OPCODE_STALL 0x00000009
+#define FE_OPCODE_CALL 0x0000000a
+#define FE_OPCODE_RETURN 0x0000000b
+#define FE_OPCODE_CHIP_SELECT 0x0000000d
+#define PRIMITIVE_TYPE_POINTS 0x00000001
+#define PRIMITIVE_TYPE_LINES 0x00000002
+#define PRIMITIVE_TYPE_LINE_STRIP 0x00000003
+#define PRIMITIVE_TYPE_TRIANGLES 0x00000004
+#define PRIMITIVE_TYPE_TRIANGLE_STRIP 0x00000005
+#define PRIMITIVE_TYPE_TRIANGLE_FAN 0x00000006
+#define PRIMITIVE_TYPE_LINE_LOOP 0x00000007
+#define PRIMITIVE_TYPE_QUADS 0x00000008
+#define VIV_FE_LOAD_STATE 0x00000000
+
+#define VIV_FE_LOAD_STATE_HEADER 0x00000000
+#define VIV_FE_LOAD_STATE_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_LOAD_STATE_HEADER_OP__SHIFT 27
+#define VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE 0x08000000
+#define VIV_FE_LOAD_STATE_HEADER_FIXP 0x04000000
+#define VIV_FE_LOAD_STATE_HEADER_COUNT__MASK 0x03ff0000
+#define VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT 16
+#define VIV_FE_LOAD_STATE_HEADER_COUNT(x) (((x) << VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT) & VIV_FE_LOAD_STATE_HEADER_COUNT__MASK)
+#define VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK 0x0000ffff
+#define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT 0
+#define VIV_FE_LOAD_STATE_HEADER_OFFSET(x) (((x) << VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT) & VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK)
+#define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR 2
+
+#define VIV_FE_END 0x00000000
+
+#define VIV_FE_END_HEADER 0x00000000
+#define VIV_FE_END_HEADER_EVENT_ID__MASK 0x0000001f
+#define VIV_FE_END_HEADER_EVENT_ID__SHIFT 0
+#define VIV_FE_END_HEADER_EVENT_ID(x) (((x) << VIV_FE_END_HEADER_EVENT_ID__SHIFT) & VIV_FE_END_HEADER_EVENT_ID__MASK)
+#define VIV_FE_END_HEADER_EVENT_ENABLE 0x00000100
+#define VIV_FE_END_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_END_HEADER_OP__SHIFT 27
+#define VIV_FE_END_HEADER_OP_END 0x10000000
+
+#define VIV_FE_NOP 0x00000000
+
+#define VIV_FE_NOP_HEADER 0x00000000
+#define VIV_FE_NOP_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_NOP_HEADER_OP__SHIFT 27
+#define VIV_FE_NOP_HEADER_OP_NOP 0x18000000
+
+#define VIV_FE_DRAW_2D 0x00000000
+
+#define VIV_FE_DRAW_2D_HEADER 0x00000000
+#define VIV_FE_DRAW_2D_HEADER_COUNT__MASK 0x0000ff00
+#define VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT 8
+#define VIV_FE_DRAW_2D_HEADER_COUNT(x) (((x) << VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_COUNT__MASK)
+#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK 0x07ff0000
+#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT 16
+#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT(x) (((x) << VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK)
+#define VIV_FE_DRAW_2D_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_DRAW_2D_HEADER_OP__SHIFT 27
+#define VIV_FE_DRAW_2D_HEADER_OP_DRAW_2D 0x20000000
+
+#define VIV_FE_DRAW_2D_TOP_LEFT 0x00000008
+#define VIV_FE_DRAW_2D_TOP_LEFT_X__MASK 0x0000ffff
+#define VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT 0
+#define VIV_FE_DRAW_2D_TOP_LEFT_X(x) (((x) << VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_X__MASK)
+#define VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK 0xffff0000
+#define VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT 16
+#define VIV_FE_DRAW_2D_TOP_LEFT_Y(x) (((x) << VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK)
+
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT 0x0000000c
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK 0x0000ffff
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT 0
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X(x) (((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK)
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK 0xffff0000
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT 16
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y(x) (((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK)
+
+#define VIV_FE_DRAW_PRIMITIVES 0x00000000
+
+#define VIV_FE_DRAW_PRIMITIVES_HEADER 0x00000000
+#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__SHIFT 27
+#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP_DRAW_PRIMITIVES 0x28000000
+
+#define VIV_FE_DRAW_PRIMITIVES_COMMAND 0x00000004
+#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK 0x000000ff
+#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT 0
+#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE(x) (((x) << VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK)
+
+#define VIV_FE_DRAW_PRIMITIVES_START 0x00000008
+
+#define VIV_FE_DRAW_PRIMITIVES_COUNT 0x0000000c
+
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES 0x00000000
+
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER 0x00000000
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__SHIFT 27
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP_DRAW_INDEXED_PRIMITIVES 0x30000000
+
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND 0x00000004
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK 0x000000ff
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT 0
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE(x) (((x) << VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK)
+
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_START 0x00000008
+
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COUNT 0x0000000c
+
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_OFFSET 0x00000010
+
+#define VIV_FE_WAIT 0x00000000
+
+#define VIV_FE_WAIT_HEADER 0x00000000
+#define VIV_FE_WAIT_HEADER_DELAY__MASK 0x0000ffff
+#define VIV_FE_WAIT_HEADER_DELAY__SHIFT 0
+#define VIV_FE_WAIT_HEADER_DELAY(x) (((x) << VIV_FE_WAIT_HEADER_DELAY__SHIFT) & VIV_FE_WAIT_HEADER_DELAY__MASK)
+#define VIV_FE_WAIT_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_WAIT_HEADER_OP__SHIFT 27
+#define VIV_FE_WAIT_HEADER_OP_WAIT 0x38000000
+
+#define VIV_FE_LINK 0x00000000
+
+#define VIV_FE_LINK_HEADER 0x00000000
+#define VIV_FE_LINK_HEADER_PREFETCH__MASK 0x0000ffff
+#define VIV_FE_LINK_HEADER_PREFETCH__SHIFT 0
+#define VIV_FE_LINK_HEADER_PREFETCH(x) (((x) << VIV_FE_LINK_HEADER_PREFETCH__SHIFT) & VIV_FE_LINK_HEADER_PREFETCH__MASK)
+#define VIV_FE_LINK_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_LINK_HEADER_OP__SHIFT 27
+#define VIV_FE_LINK_HEADER_OP_LINK 0x40000000
+
+#define VIV_FE_LINK_ADDRESS 0x00000004
+
+#define VIV_FE_STALL 0x00000000
+
+#define VIV_FE_STALL_HEADER 0x00000000
+#define VIV_FE_STALL_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_STALL_HEADER_OP__SHIFT 27
+#define VIV_FE_STALL_HEADER_OP_STALL 0x48000000
+
+#define VIV_FE_STALL_TOKEN 0x00000004
+#define VIV_FE_STALL_TOKEN_FROM__MASK 0x0000001f
+#define VIV_FE_STALL_TOKEN_FROM__SHIFT 0
+#define VIV_FE_STALL_TOKEN_FROM(x) (((x) << VIV_FE_STALL_TOKEN_FROM__SHIFT) & VIV_FE_STALL_TOKEN_FROM__MASK)
+#define VIV_FE_STALL_TOKEN_TO__MASK 0x00001f00
+#define VIV_FE_STALL_TOKEN_TO__SHIFT 8
+#define VIV_FE_STALL_TOKEN_TO(x) (((x) << VIV_FE_STALL_TOKEN_TO__SHIFT) & VIV_FE_STALL_TOKEN_TO__MASK)
+
+#define VIV_FE_CALL 0x00000000
+
+#define VIV_FE_CALL_HEADER 0x00000000
+#define VIV_FE_CALL_HEADER_PREFETCH__MASK 0x0000ffff
+#define VIV_FE_CALL_HEADER_PREFETCH__SHIFT 0
+#define VIV_FE_CALL_HEADER_PREFETCH(x) (((x) << VIV_FE_CALL_HEADER_PREFETCH__SHIFT) & VIV_FE_CALL_HEADER_PREFETCH__MASK)
+#define VIV_FE_CALL_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_CALL_HEADER_OP__SHIFT 27
+#define VIV_FE_CALL_HEADER_OP_CALL 0x50000000
+
+#define VIV_FE_CALL_ADDRESS 0x00000004
+
+#define VIV_FE_CALL_RETURN_PREFETCH 0x00000008
+
+#define VIV_FE_CALL_RETURN_ADDRESS 0x0000000c
+
+#define VIV_FE_RETURN 0x00000000
+
+#define VIV_FE_RETURN_HEADER 0x00000000
+#define VIV_FE_RETURN_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_RETURN_HEADER_OP__SHIFT 27
+#define VIV_FE_RETURN_HEADER_OP_RETURN 0x58000000
+
+#define VIV_FE_CHIP_SELECT 0x00000000
+
+#define VIV_FE_CHIP_SELECT_HEADER 0x00000000
+#define VIV_FE_CHIP_SELECT_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_CHIP_SELECT_HEADER_OP__SHIFT 27
+#define VIV_FE_CHIP_SELECT_HEADER_OP_CHIP_SELECT 0x68000000
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP15 0x00008000
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP14 0x00004000
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP13 0x00002000
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP12 0x00001000
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP11 0x00000800
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP10 0x00000400
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP9 0x00000200
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP8 0x00000100
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP7 0x00000080
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP6 0x00000040
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP5 0x00000020
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP4 0x00000010
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP3 0x00000008
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP2 0x00000004
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP1 0x00000002
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP0 0x00000001
+
+
+#endif /* CMDSTREAM_XML */
diff --git a/drivers/gpu/drm/etnaviv/common.xml.h b/drivers/gpu/drm/etnaviv/common.xml.h
new file mode 100644
index 000000000000..9e585d51fb78
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/common.xml.h
@@ -0,0 +1,249 @@
+#ifndef COMMON_XML
+#define COMMON_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- state_vg.xml ( 5973 bytes, from 2015-03-25 11:26:01)
+- common.xml ( 18437 bytes, from 2015-03-25 11:27:41)
+
+Copyright (C) 2015
+*/
+
+
+#define PIPE_ID_PIPE_3D 0x00000000
+#define PIPE_ID_PIPE_2D 0x00000001
+#define SYNC_RECIPIENT_FE 0x00000001
+#define SYNC_RECIPIENT_RA 0x00000005
+#define SYNC_RECIPIENT_PE 0x00000007
+#define SYNC_RECIPIENT_DE 0x0000000b
+#define SYNC_RECIPIENT_VG 0x0000000f
+#define SYNC_RECIPIENT_TESSELATOR 0x00000010
+#define SYNC_RECIPIENT_VG2 0x00000011
+#define SYNC_RECIPIENT_TESSELATOR2 0x00000012
+#define SYNC_RECIPIENT_VG3 0x00000013
+#define SYNC_RECIPIENT_TESSELATOR3 0x00000014
+#define ENDIAN_MODE_NO_SWAP 0x00000000
+#define ENDIAN_MODE_SWAP_16 0x00000001
+#define ENDIAN_MODE_SWAP_32 0x00000002
+#define chipModel_GC300 0x00000300
+#define chipModel_GC320 0x00000320
+#define chipModel_GC350 0x00000350
+#define chipModel_GC355 0x00000355
+#define chipModel_GC400 0x00000400
+#define chipModel_GC410 0x00000410
+#define chipModel_GC420 0x00000420
+#define chipModel_GC450 0x00000450
+#define chipModel_GC500 0x00000500
+#define chipModel_GC530 0x00000530
+#define chipModel_GC600 0x00000600
+#define chipModel_GC700 0x00000700
+#define chipModel_GC800 0x00000800
+#define chipModel_GC860 0x00000860
+#define chipModel_GC880 0x00000880
+#define chipModel_GC1000 0x00001000
+#define chipModel_GC2000 0x00002000
+#define chipModel_GC2100 0x00002100
+#define chipModel_GC4000 0x00004000
+#define RGBA_BITS_R 0x00000001
+#define RGBA_BITS_G 0x00000002
+#define RGBA_BITS_B 0x00000004
+#define RGBA_BITS_A 0x00000008
+#define chipFeatures_FAST_CLEAR 0x00000001
+#define chipFeatures_SPECIAL_ANTI_ALIASING 0x00000002
+#define chipFeatures_PIPE_3D 0x00000004
+#define chipFeatures_DXT_TEXTURE_COMPRESSION 0x00000008
+#define chipFeatures_DEBUG_MODE 0x00000010
+#define chipFeatures_Z_COMPRESSION 0x00000020
+#define chipFeatures_YUV420_SCALER 0x00000040
+#define chipFeatures_MSAA 0x00000080
+#define chipFeatures_DC 0x00000100
+#define chipFeatures_PIPE_2D 0x00000200
+#define chipFeatures_ETC1_TEXTURE_COMPRESSION 0x00000400
+#define chipFeatures_FAST_SCALER 0x00000800
+#define chipFeatures_HIGH_DYNAMIC_RANGE 0x00001000
+#define chipFeatures_YUV420_TILER 0x00002000
+#define chipFeatures_MODULE_CG 0x00004000
+#define chipFeatures_MIN_AREA 0x00008000
+#define chipFeatures_NO_EARLY_Z 0x00010000
+#define chipFeatures_NO_422_TEXTURE 0x00020000
+#define chipFeatures_BUFFER_INTERLEAVING 0x00040000
+#define chipFeatures_BYTE_WRITE_2D 0x00080000
+#define chipFeatures_NO_SCALER 0x00100000
+#define chipFeatures_YUY2_AVERAGING 0x00200000
+#define chipFeatures_HALF_PE_CACHE 0x00400000
+#define chipFeatures_HALF_TX_CACHE 0x00800000
+#define chipFeatures_YUY2_RENDER_TARGET 0x01000000
+#define chipFeatures_MEM32 0x02000000
+#define chipFeatures_PIPE_VG 0x04000000
+#define chipFeatures_VGTS 0x08000000
+#define chipFeatures_FE20 0x10000000
+#define chipFeatures_BYTE_WRITE_3D 0x20000000
+#define chipFeatures_RS_YUV_TARGET 0x40000000
+#define chipFeatures_32_BIT_INDICES 0x80000000
+#define chipMinorFeatures0_FLIP_Y 0x00000001
+#define chipMinorFeatures0_DUAL_RETURN_BUS 0x00000002
+#define chipMinorFeatures0_ENDIANNESS_CONFIG 0x00000004
+#define chipMinorFeatures0_TEXTURE_8K 0x00000008
+#define chipMinorFeatures0_CORRECT_TEXTURE_CONVERTER 0x00000010
+#define chipMinorFeatures0_SPECIAL_MSAA_LOD 0x00000020
+#define chipMinorFeatures0_FAST_CLEAR_FLUSH 0x00000040
+#define chipMinorFeatures0_2DPE20 0x00000080
+#define chipMinorFeatures0_CORRECT_AUTO_DISABLE 0x00000100
+#define chipMinorFeatures0_RENDERTARGET_8K 0x00000200
+#define chipMinorFeatures0_2BITPERTILE 0x00000400
+#define chipMinorFeatures0_SEPARATE_TILE_STATUS_WHEN_INTERLEAVED 0x00000800
+#define chipMinorFeatures0_SUPER_TILED 0x00001000
+#define chipMinorFeatures0_VG_20 0x00002000
+#define chipMinorFeatures0_TS_EXTENDED_COMMANDS 0x00004000
+#define chipMinorFeatures0_COMPRESSION_FIFO_FIXED 0x00008000
+#define chipMinorFeatures0_HAS_SIGN_FLOOR_CEIL 0x00010000
+#define chipMinorFeatures0_VG_FILTER 0x00020000
+#define chipMinorFeatures0_VG_21 0x00040000
+#define chipMinorFeatures0_SHADER_HAS_W 0x00080000
+#define chipMinorFeatures0_HAS_SQRT_TRIG 0x00100000
+#define chipMinorFeatures0_MORE_MINOR_FEATURES 0x00200000
+#define chipMinorFeatures0_MC20 0x00400000
+#define chipMinorFeatures0_MSAA_SIDEBAND 0x00800000
+#define chipMinorFeatures0_BUG_FIXES0 0x01000000
+#define chipMinorFeatures0_VAA 0x02000000
+#define chipMinorFeatures0_BYPASS_IN_MSAA 0x04000000
+#define chipMinorFeatures0_HZ 0x08000000
+#define chipMinorFeatures0_NEW_TEXTURE 0x10000000
+#define chipMinorFeatures0_2D_A8_TARGET 0x20000000
+#define chipMinorFeatures0_CORRECT_STENCIL 0x40000000
+#define chipMinorFeatures0_ENHANCE_VR 0x80000000
+#define chipMinorFeatures1_RSUV_SWIZZLE 0x00000001
+#define chipMinorFeatures1_V2_COMPRESSION 0x00000002
+#define chipMinorFeatures1_VG_DOUBLE_BUFFER 0x00000004
+#define chipMinorFeatures1_EXTRA_EVENT_STATES 0x00000008
+#define chipMinorFeatures1_NO_STRIPING_NEEDED 0x00000010
+#define chipMinorFeatures1_TEXTURE_STRIDE 0x00000020
+#define chipMinorFeatures1_BUG_FIXES3 0x00000040
+#define chipMinorFeatures1_AUTO_DISABLE 0x00000080
+#define chipMinorFeatures1_AUTO_RESTART_TS 0x00000100
+#define chipMinorFeatures1_DISABLE_PE_GATING 0x00000200
+#define chipMinorFeatures1_L2_WINDOWING 0x00000400
+#define chipMinorFeatures1_HALF_FLOAT 0x00000800
+#define chipMinorFeatures1_PIXEL_DITHER 0x00001000
+#define chipMinorFeatures1_TWO_STENCIL_REFERENCE 0x00002000
+#define chipMinorFeatures1_EXTENDED_PIXEL_FORMAT 0x00004000
+#define chipMinorFeatures1_CORRECT_MIN_MAX_DEPTH 0x00008000
+#define chipMinorFeatures1_2D_DITHER 0x00010000
+#define chipMinorFeatures1_BUG_FIXES5 0x00020000
+#define chipMinorFeatures1_NEW_2D 0x00040000
+#define chipMinorFeatures1_NEW_FP 0x00080000
+#define chipMinorFeatures1_TEXTURE_HALIGN 0x00100000
+#define chipMinorFeatures1_NON_POWER_OF_TWO 0x00200000
+#define chipMinorFeatures1_LINEAR_TEXTURE_SUPPORT 0x00400000
+#define chipMinorFeatures1_HALTI0 0x00800000
+#define chipMinorFeatures1_CORRECT_OVERFLOW_VG 0x01000000
+#define chipMinorFeatures1_NEGATIVE_LOG_FIX 0x02000000
+#define chipMinorFeatures1_RESOLVE_OFFSET 0x04000000
+#define chipMinorFeatures1_OK_TO_GATE_AXI_CLOCK 0x08000000
+#define chipMinorFeatures1_MMU_VERSION 0x10000000
+#define chipMinorFeatures1_WIDE_LINE 0x20000000
+#define chipMinorFeatures1_BUG_FIXES6 0x40000000
+#define chipMinorFeatures1_FC_FLUSH_STALL 0x80000000
+#define chipMinorFeatures2_LINE_LOOP 0x00000001
+#define chipMinorFeatures2_LOGIC_OP 0x00000002
+#define chipMinorFeatures2_UNK2 0x00000004
+#define chipMinorFeatures2_SUPERTILED_TEXTURE 0x00000008
+#define chipMinorFeatures2_UNK4 0x00000010
+#define chipMinorFeatures2_RECT_PRIMITIVE 0x00000020
+#define chipMinorFeatures2_COMPOSITION 0x00000040
+#define chipMinorFeatures2_CORRECT_AUTO_DISABLE_COUNT 0x00000080
+#define chipMinorFeatures2_UNK8 0x00000100
+#define chipMinorFeatures2_UNK9 0x00000200
+#define chipMinorFeatures2_UNK10 0x00000400
+#define chipMinorFeatures2_SAMPLERBASE_16 0x00000800
+#define chipMinorFeatures2_UNK12 0x00001000
+#define chipMinorFeatures2_UNK13 0x00002000
+#define chipMinorFeatures2_UNK14 0x00004000
+#define chipMinorFeatures2_EXTRA_TEXTURE_STATE 0x00008000
+#define chipMinorFeatures2_FULL_DIRECTFB 0x00010000
+#define chipMinorFeatures2_2D_TILING 0x00020000
+#define chipMinorFeatures2_THREAD_WALKER_IN_PS 0x00040000
+#define chipMinorFeatures2_TILE_FILLER 0x00080000
+#define chipMinorFeatures2_UNK20 0x00100000
+#define chipMinorFeatures2_2D_MULTI_SOURCE_BLIT 0x00200000
+#define chipMinorFeatures2_UNK22 0x00400000
+#define chipMinorFeatures2_UNK23 0x00800000
+#define chipMinorFeatures2_UNK24 0x01000000
+#define chipMinorFeatures2_MIXED_STREAMS 0x02000000
+#define chipMinorFeatures2_2D_420_L2CACHE 0x04000000
+#define chipMinorFeatures2_UNK27 0x08000000
+#define chipMinorFeatures2_2D_NO_INDEX8_BRUSH 0x10000000
+#define chipMinorFeatures2_TEXTURE_TILED_READ 0x20000000
+#define chipMinorFeatures2_UNK30 0x40000000
+#define chipMinorFeatures2_UNK31 0x80000000
+#define chipMinorFeatures3_ROTATION_STALL_FIX 0x00000001
+#define chipMinorFeatures3_UNK1 0x00000002
+#define chipMinorFeatures3_2D_MULTI_SOURCE_BLT_EX 0x00000004
+#define chipMinorFeatures3_UNK3 0x00000008
+#define chipMinorFeatures3_UNK4 0x00000010
+#define chipMinorFeatures3_UNK5 0x00000020
+#define chipMinorFeatures3_UNK6 0x00000040
+#define chipMinorFeatures3_UNK7 0x00000080
+#define chipMinorFeatures3_UNK8 0x00000100
+#define chipMinorFeatures3_UNK9 0x00000200
+#define chipMinorFeatures3_BUG_FIXES10 0x00000400
+#define chipMinorFeatures3_UNK11 0x00000800
+#define chipMinorFeatures3_BUG_FIXES11 0x00001000
+#define chipMinorFeatures3_UNK13 0x00002000
+#define chipMinorFeatures3_UNK14 0x00004000
+#define chipMinorFeatures3_UNK15 0x00008000
+#define chipMinorFeatures3_UNK16 0x00010000
+#define chipMinorFeatures3_UNK17 0x00020000
+#define chipMinorFeatures3_UNK18 0x00040000
+#define chipMinorFeatures3_UNK19 0x00080000
+#define chipMinorFeatures3_UNK20 0x00100000
+#define chipMinorFeatures3_UNK21 0x00200000
+#define chipMinorFeatures3_UNK22 0x00400000
+#define chipMinorFeatures3_UNK23 0x00800000
+#define chipMinorFeatures3_UNK24 0x01000000
+#define chipMinorFeatures3_UNK25 0x02000000
+#define chipMinorFeatures3_UNK26 0x04000000
+#define chipMinorFeatures3_UNK27 0x08000000
+#define chipMinorFeatures3_UNK28 0x10000000
+#define chipMinorFeatures3_UNK29 0x20000000
+#define chipMinorFeatures3_UNK30 0x40000000
+#define chipMinorFeatures3_UNK31 0x80000000
+#define chipMinorFeatures4_UNK0 0x00000001
+#define chipMinorFeatures4_UNK1 0x00000002
+#define chipMinorFeatures4_UNK2 0x00000004
+#define chipMinorFeatures4_UNK3 0x00000008
+#define chipMinorFeatures4_UNK4 0x00000010
+#define chipMinorFeatures4_UNK5 0x00000020
+#define chipMinorFeatures4_UNK6 0x00000040
+#define chipMinorFeatures4_UNK7 0x00000080
+#define chipMinorFeatures4_UNK8 0x00000100
+#define chipMinorFeatures4_UNK9 0x00000200
+#define chipMinorFeatures4_UNK10 0x00000400
+#define chipMinorFeatures4_UNK11 0x00000800
+#define chipMinorFeatures4_UNK12 0x00001000
+#define chipMinorFeatures4_UNK13 0x00002000
+#define chipMinorFeatures4_UNK14 0x00004000
+#define chipMinorFeatures4_UNK15 0x00008000
+#define chipMinorFeatures4_UNK16 0x00010000
+#define chipMinorFeatures4_UNK17 0x00020000
+#define chipMinorFeatures4_UNK18 0x00040000
+#define chipMinorFeatures4_UNK19 0x00080000
+#define chipMinorFeatures4_UNK20 0x00100000
+#define chipMinorFeatures4_UNK21 0x00200000
+#define chipMinorFeatures4_UNK22 0x00400000
+#define chipMinorFeatures4_UNK23 0x00800000
+#define chipMinorFeatures4_UNK24 0x01000000
+#define chipMinorFeatures4_UNK25 0x02000000
+#define chipMinorFeatures4_UNK26 0x04000000
+#define chipMinorFeatures4_UNK27 0x08000000
+#define chipMinorFeatures4_UNK28 0x10000000
+#define chipMinorFeatures4_UNK29 0x20000000
+#define chipMinorFeatures4_UNK30 0x40000000
+#define chipMinorFeatures4_UNK31 0x80000000
+
+#endif /* COMMON_XML */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
new file mode 100644
index 000000000000..332c55ebba6d
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2014 Etnaviv Project
+ * Author: Christian Gmeiner <christian.gmeiner@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "etnaviv_gpu.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_mmu.h"
+
+#include "common.xml.h"
+#include "state.xml.h"
+#include "cmdstream.xml.h"
+
+/*
+ * Command Buffer helper:
+ */
+
+
+static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
+{
+ u32 *vaddr = (u32 *)buffer->vaddr;
+
+ BUG_ON(buffer->user_size >= buffer->size);
+
+ vaddr[buffer->user_size / 4] = data;
+ buffer->user_size += 4;
+}
+
+static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
+ u32 reg, u32 value)
+{
+ u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
+
+ buffer->user_size = ALIGN(buffer->user_size, 8);
+
+ /* write a register via cmd stream */
+ OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
+ VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
+ VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
+ OUT(buffer, value);
+}
+
+static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
+{
+ buffer->user_size = ALIGN(buffer->user_size, 8);
+
+ OUT(buffer, VIV_FE_END_HEADER_OP_END);
+}
+
+static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
+{
+ buffer->user_size = ALIGN(buffer->user_size, 8);
+
+ OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
+}
+
+static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
+ u16 prefetch, u32 address)
+{
+ buffer->user_size = ALIGN(buffer->user_size, 8);
+
+ OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
+ VIV_FE_LINK_HEADER_PREFETCH(prefetch));
+ OUT(buffer, address);
+}
+
+static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
+ u32 from, u32 to)
+{
+ buffer->user_size = ALIGN(buffer->user_size, 8);
+
+ OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
+ OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
+}
+
+static void etnaviv_cmd_select_pipe(struct etnaviv_cmdbuf *buffer, u8 pipe)
+{
+ u32 flush;
+ u32 stall;
+
+ /*
+ * This assumes that if we're switching to 2D, we're switching
+ * away from 3D, and vice versa. Hence, if we're switching to
+ * the 2D core, we need to flush the 3D depth and color caches,
+ * otherwise we need to flush the 2D pixel engine cache.
+ */
+ if (pipe == ETNA_PIPE_2D)
+ flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
+ else
+ flush = VIVS_GL_FLUSH_CACHE_PE2D;
+
+ stall = VIVS_GL_SEMAPHORE_TOKEN_FROM(SYNC_RECIPIENT_FE) |
+ VIVS_GL_SEMAPHORE_TOKEN_TO(SYNC_RECIPIENT_PE);
+
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
+ CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN, stall);
+
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+
+ CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
+ VIVS_GL_PIPE_SELECT_PIPE(pipe));
+}
+
+static u32 gpu_va(struct etnaviv_gpu *gpu, struct etnaviv_cmdbuf *buf)
+{
+ return buf->paddr - gpu->memory_base;
+}
+
+static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
+ struct etnaviv_cmdbuf *buf, u32 off, u32 len)
+{
+ u32 size = buf->size;
+ u32 *ptr = buf->vaddr + off;
+
+ dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
+ ptr, gpu_va(gpu, buf) + off, size - len * 4 - off);
+
+ print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
+ ptr, len * 4, 0);
+}
+
+u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_cmdbuf *buffer = gpu->buffer;
+
+ /* initialize buffer */
+ buffer->user_size = 0;
+
+ CMD_WAIT(buffer);
+ CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + buffer->user_size - 4);
+
+ return buffer->user_size / 8;
+}
+
+void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_cmdbuf *buffer = gpu->buffer;
+
+ /* Replace the last WAIT with an END */
+ buffer->user_size -= 16;
+
+ CMD_END(buffer);
+ mb();
+}
+
+void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
+ struct etnaviv_cmdbuf *cmdbuf)
+{
+ struct etnaviv_cmdbuf *buffer = gpu->buffer;
+ u32 *lw = buffer->vaddr + buffer->user_size - 16;
+ u32 back, link_target, link_size, reserve_size, extra_size = 0;
+
+ if (drm_debug & DRM_UT_DRIVER)
+ etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
+
+ /*
+ * If we need to flush the MMU prior to submitting this buffer, we
+ * will need to append a mmu flush load state, followed by a new
+ * link to this buffer - a total of four additional words.
+ */
+ if (gpu->mmu->need_flush || gpu->switch_context) {
+ /* link command */
+ extra_size += 2;
+ /* flush command */
+ if (gpu->mmu->need_flush)
+ extra_size += 2;
+ /* pipe switch commands */
+ if (gpu->switch_context)
+ extra_size += 8;
+ }
+
+ reserve_size = (6 + extra_size) * 4;
+
+ /*
+ * if we are going to completely overflow the buffer, we need to wrap.
+ */
+ if (buffer->user_size + reserve_size > buffer->size)
+ buffer->user_size = 0;
+
+ /* save offset back into main buffer */
+ back = buffer->user_size + reserve_size - 6 * 4;
+ link_target = gpu_va(gpu, buffer) + buffer->user_size;
+ link_size = 6;
+
+ /* Skip over any extra instructions */
+ link_target += extra_size * sizeof(u32);
+
+ if (drm_debug & DRM_UT_DRIVER)
+ pr_info("stream link to 0x%08x @ 0x%08x %p\n",
+ link_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr);
+
+ /* jump back from cmd to main buffer */
+ CMD_LINK(cmdbuf, link_size, link_target);
+
+ link_target = gpu_va(gpu, cmdbuf);
+ link_size = cmdbuf->size / 8;
+
+
+
+ if (drm_debug & DRM_UT_DRIVER) {
+ print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
+ cmdbuf->vaddr, cmdbuf->size, 0);
+
+ pr_info("link op: %p\n", lw);
+ pr_info("link addr: %p\n", lw + 1);
+ pr_info("addr: 0x%08x\n", link_target);
+ pr_info("back: 0x%08x\n", gpu_va(gpu, buffer) + back);
+ pr_info("event: %d\n", event);
+ }
+
+ if (gpu->mmu->need_flush || gpu->switch_context) {
+ u32 new_target = gpu_va(gpu, buffer) + buffer->user_size;
+
+ if (gpu->mmu->need_flush) {
+ /* Add the MMU flush */
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
+ VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
+ VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
+ VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
+ VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
+ VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
+
+ gpu->mmu->need_flush = false;
+ }
+
+ if (gpu->switch_context) {
+ etnaviv_cmd_select_pipe(buffer, cmdbuf->exec_state);
+ gpu->switch_context = false;
+ }
+
+ /* And the link to the first buffer */
+ CMD_LINK(buffer, link_size, link_target);
+
+ /* Update the link target to point to above instructions */
+ link_target = new_target;
+ link_size = extra_size;
+ }
+
+ /* trigger event */
+ CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
+ VIVS_GL_EVENT_FROM_PE);
+
+ /* append WAIT/LINK to main buffer */
+ CMD_WAIT(buffer);
+ CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + (buffer->user_size - 4));
+
+ /* Change WAIT into a LINK command; write the address first. */
+ *(lw + 1) = link_target;
+ mb();
+ *(lw) = VIV_FE_LINK_HEADER_OP_LINK |
+ VIV_FE_LINK_HEADER_PREFETCH(link_size);
+ mb();
+
+ if (drm_debug & DRM_UT_DRIVER)
+ etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
new file mode 100644
index 000000000000..dcfd565c88d1
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+
+#include "etnaviv_gem.h"
+#include "etnaviv_gpu.h"
+
+#include "cmdstream.xml.h"
+
+#define EXTRACT(val, field) (((val) & field##__MASK) >> field##__SHIFT)
+
+struct etna_validation_state {
+ struct etnaviv_gpu *gpu;
+ const struct drm_etnaviv_gem_submit_reloc *relocs;
+ unsigned int num_relocs;
+ u32 *start;
+};
+
+static const struct {
+ u16 offset;
+ u16 size;
+} etnaviv_sensitive_states[] __initconst = {
+#define ST(start, num) { (start) >> 2, (num) }
+ /* 2D */
+ ST(0x1200, 1),
+ ST(0x1228, 1),
+ ST(0x1238, 1),
+ ST(0x1284, 1),
+ ST(0x128c, 1),
+ ST(0x1304, 1),
+ ST(0x1310, 1),
+ ST(0x1318, 1),
+ ST(0x12800, 4),
+ ST(0x128a0, 4),
+ ST(0x128c0, 4),
+ ST(0x12970, 4),
+ ST(0x12a00, 8),
+ ST(0x12b40, 8),
+ ST(0x12b80, 8),
+ ST(0x12ce0, 8),
+ /* 3D */
+ ST(0x0644, 1),
+ ST(0x064c, 1),
+ ST(0x0680, 8),
+ ST(0x1410, 1),
+ ST(0x1430, 1),
+ ST(0x1458, 1),
+ ST(0x1460, 8),
+ ST(0x1480, 8),
+ ST(0x1500, 8),
+ ST(0x1520, 8),
+ ST(0x1608, 1),
+ ST(0x1610, 1),
+ ST(0x1658, 1),
+ ST(0x165c, 1),
+ ST(0x1664, 1),
+ ST(0x1668, 1),
+ ST(0x16a4, 1),
+ ST(0x16c0, 8),
+ ST(0x16e0, 8),
+ ST(0x1740, 8),
+ ST(0x2400, 14 * 16),
+ ST(0x10800, 32 * 16),
+#undef ST
+};
+
+#define ETNAVIV_STATES_SIZE (VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK + 1u)
+static DECLARE_BITMAP(etnaviv_states, ETNAVIV_STATES_SIZE);
+
+void __init etnaviv_validate_init(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(etnaviv_sensitive_states); i++)
+ bitmap_set(etnaviv_states, etnaviv_sensitive_states[i].offset,
+ etnaviv_sensitive_states[i].size);
+}
+
+static void etnaviv_warn_if_non_sensitive(struct etna_validation_state *state,
+ unsigned int buf_offset, unsigned int state_addr)
+{
+ if (state->num_relocs && state->relocs->submit_offset < buf_offset) {
+ dev_warn_once(state->gpu->dev,
+ "%s: relocation for non-sensitive state 0x%x at offset %u\n",
+ __func__, state_addr,
+ state->relocs->submit_offset);
+ while (state->num_relocs &&
+ state->relocs->submit_offset < buf_offset) {
+ state->relocs++;
+ state->num_relocs--;
+ }
+ }
+}
+
+static bool etnaviv_validate_load_state(struct etna_validation_state *state,
+ u32 *ptr, unsigned int state_offset, unsigned int num)
+{
+ unsigned int size = min(ETNAVIV_STATES_SIZE, state_offset + num);
+ unsigned int st_offset = state_offset, buf_offset;
+
+ for_each_set_bit_from(st_offset, etnaviv_states, size) {
+ buf_offset = (ptr - state->start +
+ st_offset - state_offset) * 4;
+
+ etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4);
+ if (state->num_relocs &&
+ state->relocs->submit_offset == buf_offset) {
+ state->relocs++;
+ state->num_relocs--;
+ continue;
+ }
+
+ dev_warn_ratelimited(state->gpu->dev,
+ "%s: load state touches restricted state 0x%x at offset %u\n",
+ __func__, st_offset * 4, buf_offset);
+ return false;
+ }
+
+ if (state->num_relocs) {
+ buf_offset = (ptr - state->start + num) * 4;
+ etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4 +
+ state->relocs->submit_offset -
+ buf_offset);
+ }
+
+ return true;
+}
+
+static uint8_t cmd_length[32] = {
+ [FE_OPCODE_DRAW_PRIMITIVES] = 4,
+ [FE_OPCODE_DRAW_INDEXED_PRIMITIVES] = 6,
+ [FE_OPCODE_NOP] = 2,
+ [FE_OPCODE_STALL] = 2,
+};
+
+bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu, u32 *stream,
+ unsigned int size,
+ struct drm_etnaviv_gem_submit_reloc *relocs,
+ unsigned int reloc_size)
+{
+ struct etna_validation_state state;
+ u32 *buf = stream;
+ u32 *end = buf + size;
+
+ state.gpu = gpu;
+ state.relocs = relocs;
+ state.num_relocs = reloc_size;
+ state.start = stream;
+
+ while (buf < end) {
+ u32 cmd = *buf;
+ unsigned int len, n, off;
+ unsigned int op = cmd >> 27;
+
+ switch (op) {
+ case FE_OPCODE_LOAD_STATE:
+ n = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_COUNT);
+ len = ALIGN(1 + n, 2);
+ if (buf + len > end)
+ break;
+
+ off = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_OFFSET);
+ if (!etnaviv_validate_load_state(&state, buf + 1,
+ off, n))
+ return false;
+ break;
+
+ case FE_OPCODE_DRAW_2D:
+ n = EXTRACT(cmd, VIV_FE_DRAW_2D_HEADER_COUNT);
+ if (n == 0)
+ n = 256;
+ len = 2 + n * 2;
+ break;
+
+ default:
+ len = cmd_length[op];
+ if (len == 0) {
+ dev_err(gpu->dev, "%s: op %u not permitted at offset %tu\n",
+ __func__, op, buf - state.start);
+ return false;
+ }
+ break;
+ }
+
+ buf += len;
+ }
+
+ if (buf > end) {
+ dev_err(gpu->dev, "%s: commands overflow end of buffer: %tu > %u\n",
+ __func__, buf - state.start, size);
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
new file mode 100644
index 000000000000..5c89ebb52fd2
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -0,0 +1,707 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/component.h>
+#include <linux/of_platform.h>
+
+#include "etnaviv_drv.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_mmu.h"
+#include "etnaviv_gem.h"
+
+#ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
+static bool reglog;
+MODULE_PARM_DESC(reglog, "Enable register read/write logging");
+module_param(reglog, bool, 0600);
+#else
+#define reglog 0
+#endif
+
+void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
+ const char *dbgname)
+{
+ struct resource *res;
+ void __iomem *ptr;
+
+ if (name)
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ else
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ ptr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ptr)) {
+ dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name,
+ PTR_ERR(ptr));
+ return ptr;
+ }
+
+ if (reglog)
+ dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n",
+ dbgname, ptr, (size_t)resource_size(res));
+
+ return ptr;
+}
+
+void etnaviv_writel(u32 data, void __iomem *addr)
+{
+ if (reglog)
+ printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
+
+ writel(data, addr);
+}
+
+u32 etnaviv_readl(const void __iomem *addr)
+{
+ u32 val = readl(addr);
+
+ if (reglog)
+ printk(KERN_DEBUG "IO:R %p %08x\n", addr, val);
+
+ return val;
+}
+
+/*
+ * DRM operations:
+ */
+
+
+static void load_gpu(struct drm_device *dev)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ unsigned int i;
+
+ for (i = 0; i < ETNA_MAX_PIPES; i++) {
+ struct etnaviv_gpu *g = priv->gpu[i];
+
+ if (g) {
+ int ret;
+
+ ret = etnaviv_gpu_init(g);
+ if (ret) {
+ dev_err(g->dev, "hw init failed: %d\n", ret);
+ priv->gpu[i] = NULL;
+ }
+ }
+ }
+}
+
+static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct etnaviv_file_private *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ file->driver_priv = ctx;
+
+ return 0;
+}
+
+static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct etnaviv_file_private *ctx = file->driver_priv;
+ unsigned int i;
+
+ for (i = 0; i < ETNA_MAX_PIPES; i++) {
+ struct etnaviv_gpu *gpu = priv->gpu[i];
+
+ if (gpu) {
+ mutex_lock(&gpu->lock);
+ if (gpu->lastctx == ctx)
+ gpu->lastctx = NULL;
+ mutex_unlock(&gpu->lock);
+ }
+ }
+
+ kfree(ctx);
+}
+
+/*
+ * DRM debugfs:
+ */
+
+#ifdef CONFIG_DEBUG_FS
+static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+
+ etnaviv_gem_describe_objects(priv, m);
+
+ return 0;
+}
+
+static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
+{
+ int ret;
+
+ read_lock(&dev->vma_offset_manager->vm_lock);
+ ret = drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
+ read_unlock(&dev->vma_offset_manager->vm_lock);
+
+ return ret;
+}
+
+static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
+{
+ seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
+
+ mutex_lock(&gpu->mmu->lock);
+ drm_mm_dump_table(m, &gpu->mmu->mm);
+ mutex_unlock(&gpu->mmu->lock);
+
+ return 0;
+}
+
+static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
+{
+ struct etnaviv_cmdbuf *buf = gpu->buffer;
+ u32 size = buf->size;
+ u32 *ptr = buf->vaddr;
+ u32 i;
+
+ seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
+ buf->vaddr, (u64)buf->paddr, size - buf->user_size);
+
+ for (i = 0; i < size / 4; i++) {
+ if (i && !(i % 4))
+ seq_puts(m, "\n");
+ if (i % 4 == 0)
+ seq_printf(m, "\t0x%p: ", ptr + i);
+ seq_printf(m, "%08x ", *(ptr + i));
+ }
+ seq_puts(m, "\n");
+}
+
+static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
+{
+ seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
+
+ mutex_lock(&gpu->lock);
+ etnaviv_buffer_dump(gpu, m);
+ mutex_unlock(&gpu->lock);
+
+ return 0;
+}
+
+static int show_unlocked(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ int (*show)(struct drm_device *dev, struct seq_file *m) =
+ node->info_ent->data;
+
+ return show(dev, m);
+}
+
+static int show_each_gpu(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct etnaviv_gpu *gpu;
+ int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
+ node->info_ent->data;
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < ETNA_MAX_PIPES; i++) {
+ gpu = priv->gpu[i];
+ if (!gpu)
+ continue;
+
+ ret = show(gpu, m);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static struct drm_info_list etnaviv_debugfs_list[] = {
+ {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
+ {"gem", show_unlocked, 0, etnaviv_gem_show},
+ { "mm", show_unlocked, 0, etnaviv_mm_show },
+ {"mmu", show_each_gpu, 0, etnaviv_mmu_show},
+ {"ring", show_each_gpu, 0, etnaviv_ring_show},
+};
+
+static int etnaviv_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ int ret;
+
+ ret = drm_debugfs_create_files(etnaviv_debugfs_list,
+ ARRAY_SIZE(etnaviv_debugfs_list),
+ minor->debugfs_root, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static void etnaviv_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(etnaviv_debugfs_list,
+ ARRAY_SIZE(etnaviv_debugfs_list), minor);
+}
+#endif
+
+/*
+ * DRM ioctls:
+ */
+
+static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct drm_etnaviv_param *args = data;
+ struct etnaviv_gpu *gpu;
+
+ if (args->pipe >= ETNA_MAX_PIPES)
+ return -EINVAL;
+
+ gpu = priv->gpu[args->pipe];
+ if (!gpu)
+ return -ENXIO;
+
+ return etnaviv_gpu_get_param(gpu, args->param, &args->value);
+}
+
+static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_etnaviv_gem_new *args = data;
+
+ if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
+ ETNA_BO_FORCE_MMU))
+ return -EINVAL;
+
+ return etnaviv_gem_new_handle(dev, file, args->size,
+ args->flags, &args->handle);
+}
+
+#define TS(t) ((struct timespec){ \
+ .tv_sec = (t).tv_sec, \
+ .tv_nsec = (t).tv_nsec \
+})
+
+static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_etnaviv_gem_cpu_prep *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_etnaviv_gem_cpu_fini *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (args->flags)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = etnaviv_gem_cpu_fini(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_etnaviv_gem_info *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (args->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = etnaviv_gem_mmap_offset(obj, &args->offset);
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_etnaviv_wait_fence *args = data;
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct timespec *timeout = &TS(args->timeout);
+ struct etnaviv_gpu *gpu;
+
+ if (args->flags & ~(ETNA_WAIT_NONBLOCK))
+ return -EINVAL;
+
+ if (args->pipe >= ETNA_MAX_PIPES)
+ return -EINVAL;
+
+ gpu = priv->gpu[args->pipe];
+ if (!gpu)
+ return -ENXIO;
+
+ if (args->flags & ETNA_WAIT_NONBLOCK)
+ timeout = NULL;
+
+ return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
+ timeout);
+}
+
+static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_etnaviv_gem_userptr *args = data;
+ int access;
+
+ if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
+ args->flags == 0)
+ return -EINVAL;
+
+ if (offset_in_page(args->user_ptr | args->user_size) ||
+ (uintptr_t)args->user_ptr != args->user_ptr ||
+ (u32)args->user_size != args->user_size ||
+ args->user_ptr & ~PAGE_MASK)
+ return -EINVAL;
+
+ if (args->flags & ETNA_USERPTR_WRITE)
+ access = VERIFY_WRITE;
+ else
+ access = VERIFY_READ;
+
+ if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr,
+ args->user_size))
+ return -EFAULT;
+
+ return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
+ args->user_size, args->flags,
+ &args->handle);
+}
+
+static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct drm_etnaviv_gem_wait *args = data;
+ struct timespec *timeout = &TS(args->timeout);
+ struct drm_gem_object *obj;
+ struct etnaviv_gpu *gpu;
+ int ret;
+
+ if (args->flags & ~(ETNA_WAIT_NONBLOCK))
+ return -EINVAL;
+
+ if (args->pipe >= ETNA_MAX_PIPES)
+ return -EINVAL;
+
+ gpu = priv->gpu[args->pipe];
+ if (!gpu)
+ return -ENXIO;
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (args->flags & ETNA_WAIT_NONBLOCK)
+ timeout = NULL;
+
+ ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static const struct drm_ioctl_desc etnaviv_ioctls[] = {
+#define ETNA_IOCTL(n, func, flags) \
+ DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
+ ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
+};
+
+static const struct vm_operations_struct vm_ops = {
+ .fault = etnaviv_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .mmap = etnaviv_gem_mmap,
+};
+
+static struct drm_driver etnaviv_drm_driver = {
+ .driver_features = DRIVER_HAVE_IRQ |
+ DRIVER_GEM |
+ DRIVER_PRIME |
+ DRIVER_RENDER,
+ .open = etnaviv_open,
+ .preclose = etnaviv_preclose,
+ .set_busid = drm_platform_set_busid,
+ .gem_free_object = etnaviv_gem_free_object,
+ .gem_vm_ops = &vm_ops,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_pin = etnaviv_gem_prime_pin,
+ .gem_prime_unpin = etnaviv_gem_prime_unpin,
+ .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
+ .gem_prime_vmap = etnaviv_gem_prime_vmap,
+ .gem_prime_vunmap = etnaviv_gem_prime_vunmap,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = etnaviv_debugfs_init,
+ .debugfs_cleanup = etnaviv_debugfs_cleanup,
+#endif
+ .ioctls = etnaviv_ioctls,
+ .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
+ .fops = &fops,
+ .name = "etnaviv",
+ .desc = "etnaviv DRM",
+ .date = "20151214",
+ .major = 1,
+ .minor = 0,
+};
+
+/*
+ * Platform driver:
+ */
+static int etnaviv_bind(struct device *dev)
+{
+ struct etnaviv_drm_private *priv;
+ struct drm_device *drm;
+ int ret;
+
+ drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
+ if (!drm)
+ return -ENOMEM;
+
+ drm->platformdev = to_platform_device(dev);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev, "failed to allocate private data\n");
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+ drm->dev_private = priv;
+
+ priv->wq = alloc_ordered_workqueue("etnaviv", 0);
+ if (!priv->wq) {
+ ret = -ENOMEM;
+ goto out_wq;
+ }
+
+ mutex_init(&priv->gem_lock);
+ INIT_LIST_HEAD(&priv->gem_list);
+ priv->num_gpus = 0;
+
+ dev_set_drvdata(dev, drm);
+
+ ret = component_bind_all(dev, drm);
+ if (ret < 0)
+ goto out_bind;
+
+ load_gpu(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto out_register;
+
+ return 0;
+
+out_register:
+ component_unbind_all(dev, drm);
+out_bind:
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+out_wq:
+ kfree(priv);
+out_unref:
+ drm_dev_unref(drm);
+
+ return ret;
+}
+
+static void etnaviv_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct etnaviv_drm_private *priv = drm->dev_private;
+
+ drm_dev_unregister(drm);
+
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+
+ component_unbind_all(dev, drm);
+
+ drm->dev_private = NULL;
+ kfree(priv);
+
+ drm_put_dev(drm);
+}
+
+static const struct component_master_ops etnaviv_master_ops = {
+ .bind = etnaviv_bind,
+ .unbind = etnaviv_unbind,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ struct device_node *np = data;
+
+ return dev->of_node == np;
+}
+
+static int compare_str(struct device *dev, void *data)
+{
+ return !strcmp(dev_name(dev), data);
+}
+
+static int etnaviv_pdev_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct component_match *match = NULL;
+
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+
+ if (node) {
+ struct device_node *core_node;
+ int i;
+
+ for (i = 0; ; i++) {
+ core_node = of_parse_phandle(node, "cores", i);
+ if (!core_node)
+ break;
+
+ component_match_add(&pdev->dev, &match, compare_of,
+ core_node);
+ of_node_put(core_node);
+ }
+ } else if (dev->platform_data) {
+ char **names = dev->platform_data;
+ unsigned i;
+
+ for (i = 0; names[i]; i++)
+ component_match_add(dev, &match, compare_str, names[i]);
+ }
+
+ return component_master_add_with_match(dev, &etnaviv_master_ops, match);
+}
+
+static int etnaviv_pdev_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &etnaviv_master_ops);
+
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "fsl,imx-gpu-subsystem" },
+ { .compatible = "marvell,dove-gpu-subsystem" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static struct platform_driver etnaviv_platform_driver = {
+ .probe = etnaviv_pdev_probe,
+ .remove = etnaviv_pdev_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "etnaviv",
+ .of_match_table = dt_match,
+ },
+};
+
+static int __init etnaviv_init(void)
+{
+ int ret;
+
+ etnaviv_validate_init();
+
+ ret = platform_driver_register(&etnaviv_gpu_driver);
+ if (ret != 0)
+ return ret;
+
+ ret = platform_driver_register(&etnaviv_platform_driver);
+ if (ret != 0)
+ platform_driver_unregister(&etnaviv_gpu_driver);
+
+ return ret;
+}
+module_init(etnaviv_init);
+
+static void __exit etnaviv_exit(void)
+{
+ platform_driver_unregister(&etnaviv_gpu_driver);
+ platform_driver_unregister(&etnaviv_platform_driver);
+}
+module_exit(etnaviv_exit);
+
+MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
+MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
+MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
+MODULE_DESCRIPTION("etnaviv DRM Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:etnaviv");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
new file mode 100644
index 000000000000..d6bd438bd5be
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_DRV_H__
+#define __ETNAVIV_DRV_H__
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/iommu.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+#include <drm/etnaviv_drm.h>
+
+struct etnaviv_cmdbuf;
+struct etnaviv_gpu;
+struct etnaviv_mmu;
+struct etnaviv_gem_object;
+struct etnaviv_gem_submit;
+
+struct etnaviv_file_private {
+ /* currently we don't do anything useful with this.. but when
+ * per-context address spaces are supported we'd keep track of
+ * the context's page-tables here.
+ */
+ int dummy;
+};
+
+struct etnaviv_drm_private {
+ int num_gpus;
+ struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
+
+ /* list of GEM objects: */
+ struct mutex gem_lock;
+ struct list_head gem_list;
+
+ struct workqueue_struct *wq;
+};
+
+static inline void etnaviv_queue_work(struct drm_device *dev,
+ struct work_struct *w)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+
+ queue_work(priv->wq, w);
+}
+
+int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
+int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu,
+ struct drm_gem_object *obj, u32 *iova);
+void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj);
+struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
+void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
+void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach, struct sg_table *sg);
+int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
+void etnaviv_gem_prime_unpin(struct drm_gem_object *obj);
+void *etnaviv_gem_vaddr(struct drm_gem_object *obj);
+int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
+ struct timespec *timeout);
+int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
+void etnaviv_gem_free_object(struct drm_gem_object *obj);
+int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ u32 size, u32 flags, u32 *handle);
+struct drm_gem_object *etnaviv_gem_new_locked(struct drm_device *dev,
+ u32 size, u32 flags);
+struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
+ u32 size, u32 flags);
+int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
+ uintptr_t ptr, u32 size, u32 flags, u32 *handle);
+u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
+void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
+void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
+ struct etnaviv_cmdbuf *cmdbuf);
+void etnaviv_validate_init(void);
+bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu,
+ u32 *stream, unsigned int size,
+ struct drm_etnaviv_gem_submit_reloc *relocs, unsigned int reloc_size);
+
+#ifdef CONFIG_DEBUG_FS
+void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
+ struct seq_file *m);
+#endif
+
+void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
+ const char *dbgname);
+void etnaviv_writel(u32 data, void __iomem *addr);
+u32 etnaviv_readl(const void __iomem *addr);
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+
+/*
+ * Return the storage size of a structure with a variable length array.
+ * The array is nelem elements of elem_size, where the base structure
+ * is defined by base. If the size overflows size_t, return zero.
+ */
+static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base)
+{
+ if (elem_size && nelem > (SIZE_MAX - base) / elem_size)
+ return 0;
+ return base + nelem * elem_size;
+}
+
+/* returns true if fence a comes after fence b */
+static inline bool fence_after(u32 a, u32 b)
+{
+ return (s32)(a - b) > 0;
+}
+
+static inline bool fence_after_eq(u32 a, u32 b)
+{
+ return (s32)(a - b) >= 0;
+}
+
+static inline unsigned long etnaviv_timeout_to_jiffies(
+ const struct timespec *timeout)
+{
+ unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
+ unsigned long start_jiffies = jiffies;
+ unsigned long remaining_jiffies;
+
+ if (time_after(start_jiffies, timeout_jiffies))
+ remaining_jiffies = 0;
+ else
+ remaining_jiffies = timeout_jiffies - start_jiffies;
+
+ return remaining_jiffies;
+}
+
+#endif /* __ETNAVIV_DRV_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
new file mode 100644
index 000000000000..bf8fa859e8be
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/devcoredump.h>
+#include "etnaviv_dump.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
+#include "state.xml.h"
+#include "state_hi.xml.h"
+
+struct core_dump_iterator {
+ void *start;
+ struct etnaviv_dump_object_header *hdr;
+ void *data;
+};
+
+static const unsigned short etnaviv_dump_registers[] = {
+ VIVS_HI_AXI_STATUS,
+ VIVS_HI_CLOCK_CONTROL,
+ VIVS_HI_IDLE_STATE,
+ VIVS_HI_AXI_CONFIG,
+ VIVS_HI_INTR_ENBL,
+ VIVS_HI_CHIP_IDENTITY,
+ VIVS_HI_CHIP_FEATURE,
+ VIVS_HI_CHIP_MODEL,
+ VIVS_HI_CHIP_REV,
+ VIVS_HI_CHIP_DATE,
+ VIVS_HI_CHIP_TIME,
+ VIVS_HI_CHIP_MINOR_FEATURE_0,
+ VIVS_HI_CACHE_CONTROL,
+ VIVS_HI_AXI_CONTROL,
+ VIVS_PM_POWER_CONTROLS,
+ VIVS_PM_MODULE_CONTROLS,
+ VIVS_PM_MODULE_STATUS,
+ VIVS_PM_PULSE_EATER,
+ VIVS_MC_MMU_FE_PAGE_TABLE,
+ VIVS_MC_MMU_TX_PAGE_TABLE,
+ VIVS_MC_MMU_PE_PAGE_TABLE,
+ VIVS_MC_MMU_PEZ_PAGE_TABLE,
+ VIVS_MC_MMU_RA_PAGE_TABLE,
+ VIVS_MC_DEBUG_MEMORY,
+ VIVS_MC_MEMORY_BASE_ADDR_RA,
+ VIVS_MC_MEMORY_BASE_ADDR_FE,
+ VIVS_MC_MEMORY_BASE_ADDR_TX,
+ VIVS_MC_MEMORY_BASE_ADDR_PEZ,
+ VIVS_MC_MEMORY_BASE_ADDR_PE,
+ VIVS_MC_MEMORY_TIMING_CONTROL,
+ VIVS_MC_BUS_CONFIG,
+ VIVS_FE_DMA_STATUS,
+ VIVS_FE_DMA_DEBUG_STATE,
+ VIVS_FE_DMA_ADDRESS,
+ VIVS_FE_DMA_LOW,
+ VIVS_FE_DMA_HIGH,
+ VIVS_FE_AUTO_FLUSH,
+};
+
+static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
+ u32 type, void *data_end)
+{
+ struct etnaviv_dump_object_header *hdr = iter->hdr;
+
+ hdr->magic = cpu_to_le32(ETDUMP_MAGIC);
+ hdr->type = cpu_to_le32(type);
+ hdr->file_offset = cpu_to_le32(iter->data - iter->start);
+ hdr->file_size = cpu_to_le32(data_end - iter->data);
+
+ iter->hdr++;
+ iter->data += hdr->file_size;
+}
+
+static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
+ struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_dump_registers *reg = iter->data;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
+ reg->reg = etnaviv_dump_registers[i];
+ reg->value = gpu_read(gpu, etnaviv_dump_registers[i]);
+ }
+
+ etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
+}
+
+static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
+ struct etnaviv_gpu *gpu, size_t mmu_size)
+{
+ etnaviv_iommu_dump(gpu->mmu, iter->data);
+
+ etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
+}
+
+static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
+ void *ptr, size_t size, u64 iova)
+{
+ memcpy(iter->data, ptr, size);
+
+ iter->hdr->iova = cpu_to_le64(iova);
+
+ etnaviv_core_dump_header(iter, type, iter->data + size);
+}
+
+void etnaviv_core_dump(struct etnaviv_gpu *gpu)
+{
+ struct core_dump_iterator iter;
+ struct etnaviv_vram_mapping *vram;
+ struct etnaviv_gem_object *obj;
+ struct etnaviv_cmdbuf *cmd;
+ unsigned int n_obj, n_bomap_pages;
+ size_t file_size, mmu_size;
+ __le64 *bomap, *bomap_start;
+
+ mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
+
+ /* We always dump registers, mmu, ring and end marker */
+ n_obj = 4;
+ n_bomap_pages = 0;
+ file_size = ARRAY_SIZE(etnaviv_dump_registers) *
+ sizeof(struct etnaviv_dump_registers) +
+ mmu_size + gpu->buffer->size;
+
+ /* Add in the active command buffers */
+ list_for_each_entry(cmd, &gpu->active_cmd_list, node) {
+ file_size += cmd->size;
+ n_obj++;
+ }
+
+ /* Add in the active buffer objects */
+ list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
+ if (!vram->use)
+ continue;
+
+ obj = vram->object;
+ file_size += obj->base.size;
+ n_bomap_pages += obj->base.size >> PAGE_SHIFT;
+ n_obj++;
+ }
+
+ /* If we have any buffer objects, add a bomap object */
+ if (n_bomap_pages) {
+ file_size += n_bomap_pages * sizeof(__le64);
+ n_obj++;
+ }
+
+ /* Add the size of the headers */
+ file_size += sizeof(*iter.hdr) * n_obj;
+
+ /* Allocate the file in vmalloc memory, it's likely to be big */
+ iter.start = vmalloc(file_size);
+ if (!iter.start) {
+ dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
+ return;
+ }
+
+ /* Point the data member after the headers */
+ iter.hdr = iter.start;
+ iter.data = &iter.hdr[n_obj];
+
+ memset(iter.hdr, 0, iter.data - iter.start);
+
+ etnaviv_core_dump_registers(&iter, gpu);
+ etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
+ etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr,
+ gpu->buffer->size, gpu->buffer->paddr);
+
+ list_for_each_entry(cmd, &gpu->active_cmd_list, node)
+ etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr,
+ cmd->size, cmd->paddr);
+
+ /* Reserve space for the bomap */
+ if (n_bomap_pages) {
+ bomap_start = bomap = iter.data;
+ memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
+ etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
+ bomap + n_bomap_pages);
+ } else {
+ /* Silence warning */
+ bomap_start = bomap = NULL;
+ }
+
+ list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
+ struct page **pages;
+ void *vaddr;
+
+ if (vram->use == 0)
+ continue;
+
+ obj = vram->object;
+
+ pages = etnaviv_gem_get_pages(obj);
+ if (pages) {
+ int j;
+
+ iter.hdr->data[0] = bomap - bomap_start;
+
+ for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
+ *bomap++ = cpu_to_le64(page_to_phys(*pages++));
+ }
+
+ iter.hdr->iova = cpu_to_le64(vram->iova);
+
+ vaddr = etnaviv_gem_vaddr(&obj->base);
+ if (vaddr && !IS_ERR(vaddr))
+ memcpy(iter.data, vaddr, obj->base.size);
+
+ etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
+ obj->base.size);
+ }
+
+ etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
+
+ dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.h b/drivers/gpu/drm/etnaviv/etnaviv_dump.h
new file mode 100644
index 000000000000..97f2f8db9133
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Etnaviv devcoredump file definitions
+ */
+#ifndef ETNAVIV_DUMP_H
+#define ETNAVIV_DUMP_H
+
+#include <linux/types.h>
+
+enum {
+ ETDUMP_MAGIC = 0x414e5445,
+ ETDUMP_BUF_REG = 0,
+ ETDUMP_BUF_MMU,
+ ETDUMP_BUF_RING,
+ ETDUMP_BUF_CMD,
+ ETDUMP_BUF_BOMAP,
+ ETDUMP_BUF_BO,
+ ETDUMP_BUF_END,
+};
+
+struct etnaviv_dump_object_header {
+ __le32 magic;
+ __le32 type;
+ __le32 file_offset;
+ __le32 file_size;
+ __le64 iova;
+ __le32 data[2];
+};
+
+/* Registers object, an array of these */
+struct etnaviv_dump_registers {
+ __le32 reg;
+ __le32 value;
+};
+
+#ifdef __KERNEL__
+struct etnaviv_gpu;
+void etnaviv_core_dump(struct etnaviv_gpu *gpu);
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
new file mode 100644
index 000000000000..8d6f859f8200
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -0,0 +1,897 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/shmem_fs.h>
+
+#include "etnaviv_drv.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
+
+static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
+{
+ struct drm_device *dev = etnaviv_obj->base.dev;
+ struct sg_table *sgt = etnaviv_obj->sgt;
+
+ /*
+ * For non-cached buffers, ensure the new pages are clean
+ * because display controller, GPU, etc. are not coherent.
+ */
+ if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
+ dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+}
+
+static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
+{
+ struct drm_device *dev = etnaviv_obj->base.dev;
+ struct sg_table *sgt = etnaviv_obj->sgt;
+
+ /*
+ * For non-cached buffers, ensure the new pages are clean
+ * because display controller, GPU, etc. are not coherent:
+ *
+ * WARNING: The DMA API does not support concurrent CPU
+ * and device access to the memory area. With BIDIRECTIONAL,
+ * we will clean the cache lines which overlap the region,
+ * and invalidate all cache lines (partially) contained in
+ * the region.
+ *
+ * If you have dirty data in the overlapping cache lines,
+ * that will corrupt the GPU-written data. If you have
+ * written into the remainder of the region, this can
+ * discard those writes.
+ */
+ if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
+ dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+}
+
+/* called with etnaviv_obj->lock held */
+static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
+{
+ struct drm_device *dev = etnaviv_obj->base.dev;
+ struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
+
+ if (IS_ERR(p)) {
+ dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
+ return PTR_ERR(p);
+ }
+
+ etnaviv_obj->pages = p;
+
+ return 0;
+}
+
+static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
+{
+ if (etnaviv_obj->sgt) {
+ etnaviv_gem_scatterlist_unmap(etnaviv_obj);
+ sg_free_table(etnaviv_obj->sgt);
+ kfree(etnaviv_obj->sgt);
+ etnaviv_obj->sgt = NULL;
+ }
+ if (etnaviv_obj->pages) {
+ drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
+ true, false);
+
+ etnaviv_obj->pages = NULL;
+ }
+}
+
+struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
+{
+ int ret;
+
+ lockdep_assert_held(&etnaviv_obj->lock);
+
+ if (!etnaviv_obj->pages) {
+ ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ }
+
+ if (!etnaviv_obj->sgt) {
+ struct drm_device *dev = etnaviv_obj->base.dev;
+ int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
+ struct sg_table *sgt;
+
+ sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
+ if (IS_ERR(sgt)) {
+ dev_err(dev->dev, "failed to allocate sgt: %ld\n",
+ PTR_ERR(sgt));
+ return ERR_CAST(sgt);
+ }
+
+ etnaviv_obj->sgt = sgt;
+
+ etnaviv_gem_scatter_map(etnaviv_obj);
+ }
+
+ return etnaviv_obj->pages;
+}
+
+void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
+{
+ lockdep_assert_held(&etnaviv_obj->lock);
+ /* when we start tracking the pin count, then do something here */
+}
+
+static int etnaviv_gem_mmap_obj(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ pgprot_t vm_page_prot;
+
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
+
+ vm_page_prot = vm_get_page_prot(vma->vm_flags);
+
+ if (etnaviv_obj->flags & ETNA_BO_WC) {
+ vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
+ } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
+ vma->vm_page_prot = pgprot_noncached(vm_page_prot);
+ } else {
+ /*
+ * Shunt off cached objs to shmem file so they have their own
+ * address_space (so unmap_mapping_range does what we want,
+ * in particular in the case of mmap'd dmabufs)
+ */
+ fput(vma->vm_file);
+ get_file(obj->filp);
+ vma->vm_pgoff = 0;
+ vma->vm_file = obj->filp;
+
+ vma->vm_page_prot = vm_page_prot;
+ }
+
+ return 0;
+}
+
+int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct etnaviv_gem_object *obj;
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret) {
+ DBG("mmap failed: %d", ret);
+ return ret;
+ }
+
+ obj = to_etnaviv_bo(vma->vm_private_data);
+ return etnaviv_gem_mmap_obj(vma->vm_private_data, vma);
+}
+
+int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ struct page **pages, *page;
+ pgoff_t pgoff;
+ int ret;
+
+ /*
+ * Make sure we don't parallel update on a fault, nor move or remove
+ * something from beneath our feet. Note that vm_insert_page() is
+ * specifically coded to take care of this, so we don't have to.
+ */
+ ret = mutex_lock_interruptible(&etnaviv_obj->lock);
+ if (ret)
+ goto out;
+
+ /* make sure we have pages attached now */
+ pages = etnaviv_gem_get_pages(etnaviv_obj);
+ mutex_unlock(&etnaviv_obj->lock);
+
+ if (IS_ERR(pages)) {
+ ret = PTR_ERR(pages);
+ goto out;
+ }
+
+ /* We don't use vmf->pgoff since that has the fake offset: */
+ pgoff = ((unsigned long)vmf->virtual_address -
+ vma->vm_start) >> PAGE_SHIFT;
+
+ page = pages[pgoff];
+
+ VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
+
+ ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+
+out:
+ switch (ret) {
+ case -EAGAIN:
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ case -EBUSY:
+ /*
+ * EBUSY is ok: this just means that another thread
+ * already did the job.
+ */
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
+{
+ int ret;
+
+ /* Make it mmapable */
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ dev_err(obj->dev->dev, "could not allocate mmap offset\n");
+ else
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
+
+ return ret;
+}
+
+static struct etnaviv_vram_mapping *
+etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
+ struct etnaviv_iommu *mmu)
+{
+ struct etnaviv_vram_mapping *mapping;
+
+ list_for_each_entry(mapping, &obj->vram_list, obj_node) {
+ if (mapping->mmu == mmu)
+ return mapping;
+ }
+
+ return NULL;
+}
+
+int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu,
+ struct drm_gem_object *obj, u32 *iova)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ struct etnaviv_vram_mapping *mapping;
+ struct page **pages;
+ int ret = 0;
+
+ mutex_lock(&etnaviv_obj->lock);
+ mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
+ if (mapping) {
+ /*
+ * Holding the object lock prevents the use count changing
+ * beneath us. If the use count is zero, the MMU might be
+ * reaping this object, so take the lock and re-check that
+ * the MMU owns this mapping to close this race.
+ */
+ if (mapping->use == 0) {
+ mutex_lock(&gpu->mmu->lock);
+ if (mapping->mmu == gpu->mmu)
+ mapping->use += 1;
+ else
+ mapping = NULL;
+ mutex_unlock(&gpu->mmu->lock);
+ if (mapping)
+ goto out;
+ } else {
+ mapping->use += 1;
+ goto out;
+ }
+ }
+
+ pages = etnaviv_gem_get_pages(etnaviv_obj);
+ if (IS_ERR(pages)) {
+ ret = PTR_ERR(pages);
+ goto out;
+ }
+
+ /*
+ * See if we have a reaped vram mapping we can re-use before
+ * allocating a fresh mapping.
+ */
+ mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
+ if (!mapping) {
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mapping->scan_node);
+ mapping->object = etnaviv_obj;
+ } else {
+ list_del(&mapping->obj_node);
+ }
+
+ mapping->mmu = gpu->mmu;
+ mapping->use = 1;
+
+ ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
+ mapping);
+ if (ret < 0)
+ kfree(mapping);
+ else
+ list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
+
+out:
+ mutex_unlock(&etnaviv_obj->lock);
+
+ if (!ret) {
+ /* Take a reference on the object */
+ drm_gem_object_reference(obj);
+ *iova = mapping->iova;
+ }
+
+ return ret;
+}
+
+void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ struct etnaviv_vram_mapping *mapping;
+
+ mutex_lock(&etnaviv_obj->lock);
+ mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
+
+ WARN_ON(mapping->use == 0);
+ mapping->use -= 1;
+ mutex_unlock(&etnaviv_obj->lock);
+
+ drm_gem_object_unreference_unlocked(obj);
+}
+
+void *etnaviv_gem_vaddr(struct drm_gem_object *obj)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+ mutex_lock(&etnaviv_obj->lock);
+ if (!etnaviv_obj->vaddr) {
+ struct page **pages = etnaviv_gem_get_pages(etnaviv_obj);
+
+ if (IS_ERR(pages))
+ return ERR_CAST(pages);
+
+ etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+ VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ }
+ mutex_unlock(&etnaviv_obj->lock);
+
+ return etnaviv_obj->vaddr;
+}
+
+static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
+{
+ if (op & ETNA_PREP_READ)
+ return DMA_FROM_DEVICE;
+ else if (op & ETNA_PREP_WRITE)
+ return DMA_TO_DEVICE;
+ else
+ return DMA_BIDIRECTIONAL;
+}
+
+int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
+ struct timespec *timeout)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ struct drm_device *dev = obj->dev;
+ bool write = !!(op & ETNA_PREP_WRITE);
+ int ret;
+
+ if (op & ETNA_PREP_NOSYNC) {
+ if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
+ write))
+ return -EBUSY;
+ } else {
+ unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
+
+ ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
+ write, true, remain);
+ if (ret <= 0)
+ return ret == 0 ? -ETIMEDOUT : ret;
+ }
+
+ if (etnaviv_obj->flags & ETNA_BO_CACHED) {
+ if (!etnaviv_obj->sgt) {
+ void *ret;
+
+ mutex_lock(&etnaviv_obj->lock);
+ ret = etnaviv_gem_get_pages(etnaviv_obj);
+ mutex_unlock(&etnaviv_obj->lock);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+ }
+
+ dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
+ etnaviv_obj->sgt->nents,
+ etnaviv_op_to_dma_dir(op));
+ etnaviv_obj->last_cpu_prep_op = op;
+ }
+
+ return 0;
+}
+
+int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+ if (etnaviv_obj->flags & ETNA_BO_CACHED) {
+ /* fini without a prep is almost certainly a userspace error */
+ WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
+ dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
+ etnaviv_obj->sgt->nents,
+ etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
+ etnaviv_obj->last_cpu_prep_op = 0;
+ }
+
+ return 0;
+}
+
+int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
+ struct timespec *timeout)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+ return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void etnaviv_gem_describe_fence(struct fence *fence,
+ const char *type, struct seq_file *m)
+{
+ if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ seq_printf(m, "\t%9s: %s %s seq %u\n",
+ type,
+ fence->ops->get_driver_name(fence),
+ fence->ops->get_timeline_name(fence),
+ fence->seqno);
+}
+
+static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ struct reservation_object *robj = etnaviv_obj->resv;
+ struct reservation_object_list *fobj;
+ struct fence *fence;
+ unsigned long off = drm_vma_node_start(&obj->vma_node);
+
+ seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
+ etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
+ obj->name, obj->refcount.refcount.counter,
+ off, etnaviv_obj->vaddr, obj->size);
+
+ rcu_read_lock();
+ fobj = rcu_dereference(robj->fence);
+ if (fobj) {
+ unsigned int i, shared_count = fobj->shared_count;
+
+ for (i = 0; i < shared_count; i++) {
+ fence = rcu_dereference(fobj->shared[i]);
+ etnaviv_gem_describe_fence(fence, "Shared", m);
+ }
+ }
+
+ fence = rcu_dereference(robj->fence_excl);
+ if (fence)
+ etnaviv_gem_describe_fence(fence, "Exclusive", m);
+ rcu_read_unlock();
+}
+
+void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
+ struct seq_file *m)
+{
+ struct etnaviv_gem_object *etnaviv_obj;
+ int count = 0;
+ size_t size = 0;
+
+ mutex_lock(&priv->gem_lock);
+ list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
+ struct drm_gem_object *obj = &etnaviv_obj->base;
+
+ seq_puts(m, " ");
+ etnaviv_gem_describe(obj, m);
+ count++;
+ size += obj->size;
+ }
+ mutex_unlock(&priv->gem_lock);
+
+ seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
+}
+#endif
+
+static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
+{
+ if (etnaviv_obj->vaddr)
+ vunmap(etnaviv_obj->vaddr);
+ put_pages(etnaviv_obj);
+}
+
+static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
+ .get_pages = etnaviv_gem_shmem_get_pages,
+ .release = etnaviv_gem_shmem_release,
+};
+
+void etnaviv_gem_free_object(struct drm_gem_object *obj)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ struct etnaviv_vram_mapping *mapping, *tmp;
+
+ /* object should not be active */
+ WARN_ON(is_active(etnaviv_obj));
+
+ list_del(&etnaviv_obj->gem_node);
+
+ list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
+ obj_node) {
+ struct etnaviv_iommu *mmu = mapping->mmu;
+
+ WARN_ON(mapping->use);
+
+ if (mmu)
+ etnaviv_iommu_unmap_gem(mmu, mapping);
+
+ list_del(&mapping->obj_node);
+ kfree(mapping);
+ }
+
+ drm_gem_free_mmap_offset(obj);
+ etnaviv_obj->ops->release(etnaviv_obj);
+ if (etnaviv_obj->resv == &etnaviv_obj->_resv)
+ reservation_object_fini(&etnaviv_obj->_resv);
+ drm_gem_object_release(obj);
+
+ kfree(etnaviv_obj);
+}
+
+int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+ mutex_lock(&priv->gem_lock);
+ list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
+ mutex_unlock(&priv->gem_lock);
+
+ return 0;
+}
+
+static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
+ struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
+ struct drm_gem_object **obj)
+{
+ struct etnaviv_gem_object *etnaviv_obj;
+ unsigned sz = sizeof(*etnaviv_obj);
+ bool valid = true;
+
+ /* validate flags */
+ switch (flags & ETNA_BO_CACHE_MASK) {
+ case ETNA_BO_UNCACHED:
+ case ETNA_BO_CACHED:
+ case ETNA_BO_WC:
+ break;
+ default:
+ valid = false;
+ }
+
+ if (!valid) {
+ dev_err(dev->dev, "invalid cache flag: %x\n",
+ (flags & ETNA_BO_CACHE_MASK));
+ return -EINVAL;
+ }
+
+ etnaviv_obj = kzalloc(sz, GFP_KERNEL);
+ if (!etnaviv_obj)
+ return -ENOMEM;
+
+ etnaviv_obj->flags = flags;
+ etnaviv_obj->ops = ops;
+ if (robj) {
+ etnaviv_obj->resv = robj;
+ } else {
+ etnaviv_obj->resv = &etnaviv_obj->_resv;
+ reservation_object_init(&etnaviv_obj->_resv);
+ }
+
+ mutex_init(&etnaviv_obj->lock);
+ INIT_LIST_HEAD(&etnaviv_obj->vram_list);
+
+ *obj = &etnaviv_obj->base;
+
+ return 0;
+}
+
+static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
+ u32 size, u32 flags)
+{
+ struct drm_gem_object *obj = NULL;
+ int ret;
+
+ size = PAGE_ALIGN(size);
+
+ ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
+ &etnaviv_gem_shmem_ops, &obj);
+ if (ret)
+ goto fail;
+
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret == 0) {
+ struct address_space *mapping;
+
+ /*
+ * Our buffers are kept pinned, so allocating them
+ * from the MOVABLE zone is a really bad idea, and
+ * conflicts with CMA. See coments above new_inode()
+ * why this is required _and_ expected if you're
+ * going to pin these pages.
+ */
+ mapping = file_inode(obj->filp)->i_mapping;
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
+ }
+
+ if (ret)
+ goto fail;
+
+ return obj;
+
+fail:
+ if (obj)
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ERR_PTR(ret);
+}
+
+/* convenience method to construct a GEM buffer object, and userspace handle */
+int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ u32 size, u32 flags, u32 *handle)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = __etnaviv_gem_new(dev, size, flags);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ ret = etnaviv_gem_obj_add(dev, obj);
+ if (ret < 0) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+
+ ret = drm_gem_handle_create(file, obj, handle);
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
+ u32 size, u32 flags)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = __etnaviv_gem_new(dev, size, flags);
+ if (IS_ERR(obj))
+ return obj;
+
+ ret = etnaviv_gem_obj_add(dev, obj);
+ if (ret < 0) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(ret);
+ }
+
+ return obj;
+}
+
+int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
+ struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
+ struct etnaviv_gem_object **res)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
+ if (ret)
+ return ret;
+
+ drm_gem_private_object_init(dev, obj, size);
+
+ *res = to_etnaviv_bo(obj);
+
+ return 0;
+}
+
+struct get_pages_work {
+ struct work_struct work;
+ struct mm_struct *mm;
+ struct task_struct *task;
+ struct etnaviv_gem_object *etnaviv_obj;
+};
+
+static struct page **etnaviv_gem_userptr_do_get_pages(
+ struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
+{
+ int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
+ struct page **pvec;
+ uintptr_t ptr;
+
+ pvec = drm_malloc_ab(npages, sizeof(struct page *));
+ if (!pvec)
+ return ERR_PTR(-ENOMEM);
+
+ pinned = 0;
+ ptr = etnaviv_obj->userptr.ptr;
+
+ down_read(&mm->mmap_sem);
+ while (pinned < npages) {
+ ret = get_user_pages(task, mm, ptr, npages - pinned,
+ !etnaviv_obj->userptr.ro, 0,
+ pvec + pinned, NULL);
+ if (ret < 0)
+ break;
+
+ ptr += ret * PAGE_SIZE;
+ pinned += ret;
+ }
+ up_read(&mm->mmap_sem);
+
+ if (ret < 0) {
+ release_pages(pvec, pinned, 0);
+ drm_free_large(pvec);
+ return ERR_PTR(ret);
+ }
+
+ return pvec;
+}
+
+static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
+{
+ struct get_pages_work *work = container_of(_work, typeof(*work), work);
+ struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
+ struct page **pvec;
+
+ pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
+
+ mutex_lock(&etnaviv_obj->lock);
+ if (IS_ERR(pvec)) {
+ etnaviv_obj->userptr.work = ERR_CAST(pvec);
+ } else {
+ etnaviv_obj->userptr.work = NULL;
+ etnaviv_obj->pages = pvec;
+ }
+
+ mutex_unlock(&etnaviv_obj->lock);
+ drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+
+ mmput(work->mm);
+ put_task_struct(work->task);
+ kfree(work);
+}
+
+static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
+{
+ struct page **pvec = NULL;
+ struct get_pages_work *work;
+ struct mm_struct *mm;
+ int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
+
+ if (etnaviv_obj->userptr.work) {
+ if (IS_ERR(etnaviv_obj->userptr.work)) {
+ ret = PTR_ERR(etnaviv_obj->userptr.work);
+ etnaviv_obj->userptr.work = NULL;
+ } else {
+ ret = -EAGAIN;
+ }
+ return ret;
+ }
+
+ mm = get_task_mm(etnaviv_obj->userptr.task);
+ pinned = 0;
+ if (mm == current->mm) {
+ pvec = drm_malloc_ab(npages, sizeof(struct page *));
+ if (!pvec) {
+ mmput(mm);
+ return -ENOMEM;
+ }
+
+ pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
+ !etnaviv_obj->userptr.ro, pvec);
+ if (pinned < 0) {
+ drm_free_large(pvec);
+ mmput(mm);
+ return pinned;
+ }
+
+ if (pinned == npages) {
+ etnaviv_obj->pages = pvec;
+ mmput(mm);
+ return 0;
+ }
+ }
+
+ release_pages(pvec, pinned, 0);
+ drm_free_large(pvec);
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work) {
+ mmput(mm);
+ return -ENOMEM;
+ }
+
+ get_task_struct(current);
+ drm_gem_object_reference(&etnaviv_obj->base);
+
+ work->mm = mm;
+ work->task = current;
+ work->etnaviv_obj = etnaviv_obj;
+
+ etnaviv_obj->userptr.work = &work->work;
+ INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
+
+ etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
+
+ return -EAGAIN;
+}
+
+static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
+{
+ if (etnaviv_obj->sgt) {
+ etnaviv_gem_scatterlist_unmap(etnaviv_obj);
+ sg_free_table(etnaviv_obj->sgt);
+ kfree(etnaviv_obj->sgt);
+ }
+ if (etnaviv_obj->pages) {
+ int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
+
+ release_pages(etnaviv_obj->pages, npages, 0);
+ drm_free_large(etnaviv_obj->pages);
+ }
+ put_task_struct(etnaviv_obj->userptr.task);
+}
+
+static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
+ .get_pages = etnaviv_gem_userptr_get_pages,
+ .release = etnaviv_gem_userptr_release,
+};
+
+int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
+ uintptr_t ptr, u32 size, u32 flags, u32 *handle)
+{
+ struct etnaviv_gem_object *etnaviv_obj;
+ int ret;
+
+ ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
+ &etnaviv_gem_userptr_ops, &etnaviv_obj);
+ if (ret)
+ return ret;
+
+ etnaviv_obj->userptr.ptr = ptr;
+ etnaviv_obj->userptr.task = current;
+ etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
+ get_task_struct(current);
+
+ ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ return ret;
+ }
+
+ ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
new file mode 100644
index 000000000000..a300b4b3d545
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_GEM_H__
+#define __ETNAVIV_GEM_H__
+
+#include <linux/reservation.h>
+#include "etnaviv_drv.h"
+
+struct etnaviv_gem_ops;
+struct etnaviv_gem_object;
+
+struct etnaviv_gem_userptr {
+ uintptr_t ptr;
+ struct task_struct *task;
+ struct work_struct *work;
+ bool ro;
+};
+
+struct etnaviv_vram_mapping {
+ struct list_head obj_node;
+ struct list_head scan_node;
+ struct list_head mmu_node;
+ struct etnaviv_gem_object *object;
+ struct etnaviv_iommu *mmu;
+ struct drm_mm_node vram_node;
+ unsigned int use;
+ u32 iova;
+};
+
+struct etnaviv_gem_object {
+ struct drm_gem_object base;
+ const struct etnaviv_gem_ops *ops;
+ struct mutex lock;
+
+ u32 flags;
+
+ struct list_head gem_node;
+ struct etnaviv_gpu *gpu; /* non-null if active */
+ atomic_t gpu_active;
+ u32 access;
+
+ struct page **pages;
+ struct sg_table *sgt;
+ void *vaddr;
+
+ /* normally (resv == &_resv) except for imported bo's */
+ struct reservation_object *resv;
+ struct reservation_object _resv;
+
+ struct list_head vram_list;
+
+ /* cache maintenance */
+ u32 last_cpu_prep_op;
+
+ struct etnaviv_gem_userptr userptr;
+};
+
+static inline
+struct etnaviv_gem_object *to_etnaviv_bo(struct drm_gem_object *obj)
+{
+ return container_of(obj, struct etnaviv_gem_object, base);
+}
+
+struct etnaviv_gem_ops {
+ int (*get_pages)(struct etnaviv_gem_object *);
+ void (*release)(struct etnaviv_gem_object *);
+};
+
+static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj)
+{
+ return atomic_read(&etnaviv_obj->gpu_active) != 0;
+}
+
+#define MAX_CMDS 4
+
+/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
+ * associated with the cmdstream submission for synchronization (and
+ * make it easier to unwind when things go wrong, etc). This only
+ * lasts for the duration of the submit-ioctl.
+ */
+struct etnaviv_gem_submit {
+ struct drm_device *dev;
+ struct etnaviv_gpu *gpu;
+ struct ww_acquire_ctx ticket;
+ u32 fence;
+ unsigned int nr_bos;
+ struct {
+ u32 flags;
+ struct etnaviv_gem_object *obj;
+ u32 iova;
+ } bos[0];
+};
+
+int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
+ struct timespec *timeout);
+int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
+ struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
+ struct etnaviv_gem_object **res);
+int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
+struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
+void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);
+
+#endif /* __ETNAVIV_GEM_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
new file mode 100644
index 000000000000..e94db4f95770
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/dma-buf.h>
+#include "etnaviv_drv.h"
+#include "etnaviv_gem.h"
+
+
+struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+ BUG_ON(!etnaviv_obj->sgt); /* should have already pinned! */
+
+ return etnaviv_obj->sgt;
+}
+
+void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
+{
+ return etnaviv_gem_vaddr(obj);
+}
+
+void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ /* TODO msm_gem_vunmap() */
+}
+
+int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
+{
+ if (!obj->import_attach) {
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+ mutex_lock(&etnaviv_obj->lock);
+ etnaviv_gem_get_pages(etnaviv_obj);
+ mutex_unlock(&etnaviv_obj->lock);
+ }
+ return 0;
+}
+
+void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
+{
+ if (!obj->import_attach) {
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+ mutex_lock(&etnaviv_obj->lock);
+ etnaviv_gem_put_pages(to_etnaviv_bo(obj));
+ mutex_unlock(&etnaviv_obj->lock);
+ }
+}
+
+static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
+{
+ if (etnaviv_obj->vaddr)
+ dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf,
+ etnaviv_obj->vaddr);
+
+ /* Don't drop the pages for imported dmabuf, as they are not
+ * ours, just free the array we allocated:
+ */
+ if (etnaviv_obj->pages)
+ drm_free_large(etnaviv_obj->pages);
+
+ drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt);
+}
+
+static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
+ /* .get_pages should never be called */
+ .release = etnaviv_gem_prime_release,
+};
+
+struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach, struct sg_table *sgt)
+{
+ struct etnaviv_gem_object *etnaviv_obj;
+ size_t size = PAGE_ALIGN(attach->dmabuf->size);
+ int ret, npages;
+
+ ret = etnaviv_gem_new_private(dev, size, ETNA_BO_WC,
+ attach->dmabuf->resv,
+ &etnaviv_gem_prime_ops, &etnaviv_obj);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ npages = size / PAGE_SIZE;
+
+ etnaviv_obj->sgt = sgt;
+ etnaviv_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (!etnaviv_obj->pages) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages,
+ NULL, npages);
+ if (ret)
+ goto fail;
+
+ ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
+ if (ret)
+ goto fail;
+
+ return &etnaviv_obj->base;
+
+fail:
+ drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
new file mode 100644
index 000000000000..1aba01a999df
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/reservation.h>
+#include "etnaviv_drv.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_gem.h"
+
+/*
+ * Cmdstream submission:
+ */
+
+#define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE)
+/* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */
+#define BO_LOCKED 0x4000
+#define BO_PINNED 0x2000
+
+static inline void __user *to_user_ptr(u64 address)
+{
+ return (void __user *)(uintptr_t)address;
+}
+
+static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
+ struct etnaviv_gpu *gpu, size_t nr)
+{
+ struct etnaviv_gem_submit *submit;
+ size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit));
+
+ submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ if (submit) {
+ submit->dev = dev;
+ submit->gpu = gpu;
+
+ /* initially, until copy_from_user() and bo lookup succeeds: */
+ submit->nr_bos = 0;
+
+ ww_acquire_init(&submit->ticket, &reservation_ww_class);
+ }
+
+ return submit;
+}
+
+static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
+ struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos,
+ unsigned nr_bos)
+{
+ struct drm_etnaviv_gem_submit_bo *bo;
+ unsigned i;
+ int ret = 0;
+
+ spin_lock(&file->table_lock);
+
+ for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) {
+ struct drm_gem_object *obj;
+
+ if (bo->flags & BO_INVALID_FLAGS) {
+ DRM_ERROR("invalid flags: %x\n", bo->flags);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ submit->bos[i].flags = bo->flags;
+
+ /* normally use drm_gem_object_lookup(), but for bulk lookup
+ * all under single table_lock just hit object_idr directly:
+ */
+ obj = idr_find(&file->object_idr, bo->handle);
+ if (!obj) {
+ DRM_ERROR("invalid handle %u at index %u\n",
+ bo->handle, i);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /*
+ * Take a refcount on the object. The file table lock
+ * prevents the object_idr's refcount on this being dropped.
+ */
+ drm_gem_object_reference(obj);
+
+ submit->bos[i].obj = to_etnaviv_bo(obj);
+ }
+
+out_unlock:
+ submit->nr_bos = i;
+ spin_unlock(&file->table_lock);
+
+ return ret;
+}
+
+static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
+{
+ if (submit->bos[i].flags & BO_LOCKED) {
+ struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+
+ ww_mutex_unlock(&etnaviv_obj->resv->lock);
+ submit->bos[i].flags &= ~BO_LOCKED;
+ }
+}
+
+static int submit_lock_objects(struct etnaviv_gem_submit *submit)
+{
+ int contended, slow_locked = -1, i, ret = 0;
+
+retry:
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+
+ if (slow_locked == i)
+ slow_locked = -1;
+
+ contended = i;
+
+ if (!(submit->bos[i].flags & BO_LOCKED)) {
+ ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock,
+ &submit->ticket);
+ if (ret == -EALREADY)
+ DRM_ERROR("BO at index %u already on submit list\n",
+ i);
+ if (ret)
+ goto fail;
+ submit->bos[i].flags |= BO_LOCKED;
+ }
+ }
+
+ ww_acquire_done(&submit->ticket);
+
+ return 0;
+
+fail:
+ for (; i >= 0; i--)
+ submit_unlock_object(submit, i);
+
+ if (slow_locked > 0)
+ submit_unlock_object(submit, slow_locked);
+
+ if (ret == -EDEADLK) {
+ struct etnaviv_gem_object *etnaviv_obj;
+
+ etnaviv_obj = submit->bos[contended].obj;
+
+ /* we lost out in a seqno race, lock and retry.. */
+ ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock,
+ &submit->ticket);
+ if (!ret) {
+ submit->bos[contended].flags |= BO_LOCKED;
+ slow_locked = contended;
+ goto retry;
+ }
+ }
+
+ return ret;
+}
+
+static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
+{
+ unsigned int context = submit->gpu->fence_context;
+ int i, ret = 0;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+ bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
+
+ ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static void submit_unpin_objects(struct etnaviv_gem_submit *submit)
+{
+ int i;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+
+ if (submit->bos[i].flags & BO_PINNED)
+ etnaviv_gem_put_iova(submit->gpu, &etnaviv_obj->base);
+
+ submit->bos[i].iova = 0;
+ submit->bos[i].flags &= ~BO_PINNED;
+ }
+}
+
+static int submit_pin_objects(struct etnaviv_gem_submit *submit)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+ u32 iova;
+
+ ret = etnaviv_gem_get_iova(submit->gpu, &etnaviv_obj->base,
+ &iova);
+ if (ret)
+ break;
+
+ submit->bos[i].flags |= BO_PINNED;
+ submit->bos[i].iova = iova;
+ }
+
+ return ret;
+}
+
+static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
+ struct etnaviv_gem_object **obj, u32 *iova)
+{
+ if (idx >= submit->nr_bos) {
+ DRM_ERROR("invalid buffer index: %u (out of %u)\n",
+ idx, submit->nr_bos);
+ return -EINVAL;
+ }
+
+ if (obj)
+ *obj = submit->bos[idx].obj;
+ if (iova)
+ *iova = submit->bos[idx].iova;
+
+ return 0;
+}
+
+/* process the reloc's and patch up the cmdstream as needed: */
+static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
+ u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs,
+ u32 nr_relocs)
+{
+ u32 i, last_offset = 0;
+ u32 *ptr = stream;
+ int ret;
+
+ for (i = 0; i < nr_relocs; i++) {
+ const struct drm_etnaviv_gem_submit_reloc *r = relocs + i;
+ struct etnaviv_gem_object *bobj;
+ u32 iova, off;
+
+ if (unlikely(r->flags)) {
+ DRM_ERROR("invalid reloc flags\n");
+ return -EINVAL;
+ }
+
+ if (r->submit_offset % 4) {
+ DRM_ERROR("non-aligned reloc offset: %u\n",
+ r->submit_offset);
+ return -EINVAL;
+ }
+
+ /* offset in dwords: */
+ off = r->submit_offset / 4;
+
+ if ((off >= size ) ||
+ (off < last_offset)) {
+ DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
+ return -EINVAL;
+ }
+
+ ret = submit_bo(submit, r->reloc_idx, &bobj, &iova);
+ if (ret)
+ return ret;
+
+ if (r->reloc_offset >=
+ bobj->base.size - sizeof(*ptr)) {
+ DRM_ERROR("relocation %u outside object", i);
+ return -EINVAL;
+ }
+
+ ptr[off] = iova + r->reloc_offset;
+
+ last_offset = off;
+ }
+
+ return 0;
+}
+
+static void submit_cleanup(struct etnaviv_gem_submit *submit)
+{
+ unsigned i;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+
+ submit_unlock_object(submit, i);
+ drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ }
+
+ ww_acquire_fini(&submit->ticket);
+ kfree(submit);
+}
+
+int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct drm_etnaviv_gem_submit *args = data;
+ struct drm_etnaviv_gem_submit_reloc *relocs;
+ struct drm_etnaviv_gem_submit_bo *bos;
+ struct etnaviv_gem_submit *submit;
+ struct etnaviv_cmdbuf *cmdbuf;
+ struct etnaviv_gpu *gpu;
+ void *stream;
+ int ret;
+
+ if (args->pipe >= ETNA_MAX_PIPES)
+ return -EINVAL;
+
+ gpu = priv->gpu[args->pipe];
+ if (!gpu)
+ return -ENXIO;
+
+ if (args->stream_size % 4) {
+ DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
+ args->stream_size);
+ return -EINVAL;
+ }
+
+ if (args->exec_state != ETNA_PIPE_3D &&
+ args->exec_state != ETNA_PIPE_2D &&
+ args->exec_state != ETNA_PIPE_VG) {
+ DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state);
+ return -EINVAL;
+ }
+
+ /*
+ * Copy the command submission and bo array to kernel space in
+ * one go, and do this outside of any locks.
+ */
+ bos = drm_malloc_ab(args->nr_bos, sizeof(*bos));
+ relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs));
+ stream = drm_malloc_ab(1, args->stream_size);
+ cmdbuf = etnaviv_gpu_cmdbuf_new(gpu, ALIGN(args->stream_size, 8) + 8,
+ args->nr_bos);
+ if (!bos || !relocs || !stream || !cmdbuf) {
+ ret = -ENOMEM;
+ goto err_submit_cmds;
+ }
+
+ cmdbuf->exec_state = args->exec_state;
+ cmdbuf->ctx = file->driver_priv;
+
+ ret = copy_from_user(bos, to_user_ptr(args->bos),
+ args->nr_bos * sizeof(*bos));
+ if (ret) {
+ ret = -EFAULT;
+ goto err_submit_cmds;
+ }
+
+ ret = copy_from_user(relocs, to_user_ptr(args->relocs),
+ args->nr_relocs * sizeof(*relocs));
+ if (ret) {
+ ret = -EFAULT;
+ goto err_submit_cmds;
+ }
+
+ ret = copy_from_user(stream, to_user_ptr(args->stream),
+ args->stream_size);
+ if (ret) {
+ ret = -EFAULT;
+ goto err_submit_cmds;
+ }
+
+ submit = submit_create(dev, gpu, args->nr_bos);
+ if (!submit) {
+ ret = -ENOMEM;
+ goto err_submit_cmds;
+ }
+
+ ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
+ if (ret)
+ goto err_submit_objects;
+
+ ret = submit_lock_objects(submit);
+ if (ret)
+ goto err_submit_objects;
+
+ if (!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
+ relocs, args->nr_relocs)) {
+ ret = -EINVAL;
+ goto err_submit_objects;
+ }
+
+ ret = submit_fence_sync(submit);
+ if (ret)
+ goto err_submit_objects;
+
+ ret = submit_pin_objects(submit);
+ if (ret)
+ goto out;
+
+ ret = submit_reloc(submit, stream, args->stream_size / 4,
+ relocs, args->nr_relocs);
+ if (ret)
+ goto out;
+
+ memcpy(cmdbuf->vaddr, stream, args->stream_size);
+ cmdbuf->user_size = ALIGN(args->stream_size, 8);
+
+ ret = etnaviv_gpu_submit(gpu, submit, cmdbuf);
+ if (ret == 0)
+ cmdbuf = NULL;
+
+ args->fence = submit->fence;
+
+out:
+ submit_unpin_objects(submit);
+
+ /*
+ * If we're returning -EAGAIN, it may be due to the userptr code
+ * wanting to run its workqueue outside of any locks. Flush our
+ * workqueue to ensure that it is run in a timely manner.
+ */
+ if (ret == -EAGAIN)
+ flush_workqueue(priv->wq);
+
+err_submit_objects:
+ submit_cleanup(submit);
+
+err_submit_cmds:
+ /* if we still own the cmdbuf */
+ if (cmdbuf)
+ etnaviv_gpu_cmdbuf_free(cmdbuf);
+ if (stream)
+ drm_free_large(stream);
+ if (bos)
+ drm_free_large(bos);
+ if (relocs)
+ drm_free_large(relocs);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
new file mode 100644
index 000000000000..d39093dc37e6
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -0,0 +1,1644 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/component.h>
+#include <linux/fence.h>
+#include <linux/moduleparam.h>
+#include <linux/of_device.h>
+#include "etnaviv_dump.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_mmu.h"
+#include "etnaviv_iommu.h"
+#include "etnaviv_iommu_v2.h"
+#include "common.xml.h"
+#include "state.xml.h"
+#include "state_hi.xml.h"
+#include "cmdstream.xml.h"
+
+static const struct platform_device_id gpu_ids[] = {
+ { .name = "etnaviv-gpu,2d" },
+ { },
+};
+
+static bool etnaviv_dump_core = true;
+module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
+
+/*
+ * Driver functions:
+ */
+
+int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
+{
+ switch (param) {
+ case ETNAVIV_PARAM_GPU_MODEL:
+ *value = gpu->identity.model;
+ break;
+
+ case ETNAVIV_PARAM_GPU_REVISION:
+ *value = gpu->identity.revision;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_0:
+ *value = gpu->identity.features;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_1:
+ *value = gpu->identity.minor_features0;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_2:
+ *value = gpu->identity.minor_features1;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_3:
+ *value = gpu->identity.minor_features2;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_4:
+ *value = gpu->identity.minor_features3;
+ break;
+
+ case ETNAVIV_PARAM_GPU_STREAM_COUNT:
+ *value = gpu->identity.stream_count;
+ break;
+
+ case ETNAVIV_PARAM_GPU_REGISTER_MAX:
+ *value = gpu->identity.register_max;
+ break;
+
+ case ETNAVIV_PARAM_GPU_THREAD_COUNT:
+ *value = gpu->identity.thread_count;
+ break;
+
+ case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
+ *value = gpu->identity.vertex_cache_size;
+ break;
+
+ case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
+ *value = gpu->identity.shader_core_count;
+ break;
+
+ case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
+ *value = gpu->identity.pixel_pipes;
+ break;
+
+ case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
+ *value = gpu->identity.vertex_output_buffer_size;
+ break;
+
+ case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
+ *value = gpu->identity.buffer_size;
+ break;
+
+ case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
+ *value = gpu->identity.instruction_count;
+ break;
+
+ case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
+ *value = gpu->identity.num_constants;
+ break;
+
+ default:
+ DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
+{
+ if (gpu->identity.minor_features0 &
+ chipMinorFeatures0_MORE_MINOR_FEATURES) {
+ u32 specs[2];
+
+ specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
+ specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
+
+ gpu->identity.stream_count =
+ (specs[0] & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK)
+ >> VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT;
+ gpu->identity.register_max =
+ (specs[0] & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK)
+ >> VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT;
+ gpu->identity.thread_count =
+ (specs[0] & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK)
+ >> VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT;
+ gpu->identity.vertex_cache_size =
+ (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK)
+ >> VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT;
+ gpu->identity.shader_core_count =
+ (specs[0] & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK)
+ >> VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT;
+ gpu->identity.pixel_pipes =
+ (specs[0] & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK)
+ >> VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT;
+ gpu->identity.vertex_output_buffer_size =
+ (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK)
+ >> VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT;
+
+ gpu->identity.buffer_size =
+ (specs[1] & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK)
+ >> VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT;
+ gpu->identity.instruction_count =
+ (specs[1] & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK)
+ >> VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT;
+ gpu->identity.num_constants =
+ (specs[1] & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK)
+ >> VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT;
+ }
+
+ /* Fill in the stream count if not specified */
+ if (gpu->identity.stream_count == 0) {
+ if (gpu->identity.model >= 0x1000)
+ gpu->identity.stream_count = 4;
+ else
+ gpu->identity.stream_count = 1;
+ }
+
+ /* Convert the register max value */
+ if (gpu->identity.register_max)
+ gpu->identity.register_max = 1 << gpu->identity.register_max;
+ else if (gpu->identity.model == 0x0400)
+ gpu->identity.register_max = 32;
+ else
+ gpu->identity.register_max = 64;
+
+ /* Convert thread count */
+ if (gpu->identity.thread_count)
+ gpu->identity.thread_count = 1 << gpu->identity.thread_count;
+ else if (gpu->identity.model == 0x0400)
+ gpu->identity.thread_count = 64;
+ else if (gpu->identity.model == 0x0500 ||
+ gpu->identity.model == 0x0530)
+ gpu->identity.thread_count = 128;
+ else
+ gpu->identity.thread_count = 256;
+
+ if (gpu->identity.vertex_cache_size == 0)
+ gpu->identity.vertex_cache_size = 8;
+
+ if (gpu->identity.shader_core_count == 0) {
+ if (gpu->identity.model >= 0x1000)
+ gpu->identity.shader_core_count = 2;
+ else
+ gpu->identity.shader_core_count = 1;
+ }
+
+ if (gpu->identity.pixel_pipes == 0)
+ gpu->identity.pixel_pipes = 1;
+
+ /* Convert virtex buffer size */
+ if (gpu->identity.vertex_output_buffer_size) {
+ gpu->identity.vertex_output_buffer_size =
+ 1 << gpu->identity.vertex_output_buffer_size;
+ } else if (gpu->identity.model == 0x0400) {
+ if (gpu->identity.revision < 0x4000)
+ gpu->identity.vertex_output_buffer_size = 512;
+ else if (gpu->identity.revision < 0x4200)
+ gpu->identity.vertex_output_buffer_size = 256;
+ else
+ gpu->identity.vertex_output_buffer_size = 128;
+ } else {
+ gpu->identity.vertex_output_buffer_size = 512;
+ }
+
+ switch (gpu->identity.instruction_count) {
+ case 0:
+ if ((gpu->identity.model == 0x2000 &&
+ gpu->identity.revision == 0x5108) ||
+ gpu->identity.model == 0x880)
+ gpu->identity.instruction_count = 512;
+ else
+ gpu->identity.instruction_count = 256;
+ break;
+
+ case 1:
+ gpu->identity.instruction_count = 1024;
+ break;
+
+ case 2:
+ gpu->identity.instruction_count = 2048;
+ break;
+
+ default:
+ gpu->identity.instruction_count = 256;
+ break;
+ }
+
+ if (gpu->identity.num_constants == 0)
+ gpu->identity.num_constants = 168;
+}
+
+static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
+{
+ u32 chipIdentity;
+
+ chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
+
+ /* Special case for older graphic cores. */
+ if (VIVS_HI_CHIP_IDENTITY_FAMILY(chipIdentity) == 0x01) {
+ gpu->identity.model = 0x500; /* gc500 */
+ gpu->identity.revision = VIVS_HI_CHIP_IDENTITY_REVISION(chipIdentity);
+ } else {
+
+ gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
+ gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
+
+ /*
+ * !!!! HACK ALERT !!!!
+ * Because people change device IDs without letting software
+ * know about it - here is the hack to make it all look the
+ * same. Only for GC400 family.
+ */
+ if ((gpu->identity.model & 0xff00) == 0x0400 &&
+ gpu->identity.model != 0x0420) {
+ gpu->identity.model = gpu->identity.model & 0x0400;
+ }
+
+ /* Another special case */
+ if (gpu->identity.model == 0x300 &&
+ gpu->identity.revision == 0x2201) {
+ u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
+ u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
+
+ if (chipDate == 0x20080814 && chipTime == 0x12051100) {
+ /*
+ * This IP has an ECO; put the correct
+ * revision in it.
+ */
+ gpu->identity.revision = 0x1051;
+ }
+ }
+ }
+
+ dev_info(gpu->dev, "model: GC%x, revision: %x\n",
+ gpu->identity.model, gpu->identity.revision);
+
+ gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
+
+ /* Disable fast clear on GC700. */
+ if (gpu->identity.model == 0x700)
+ gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
+
+ if ((gpu->identity.model == 0x500 && gpu->identity.revision < 2) ||
+ (gpu->identity.model == 0x300 && gpu->identity.revision < 0x2000)) {
+
+ /*
+ * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
+ * registers.
+ */
+ gpu->identity.minor_features0 = 0;
+ gpu->identity.minor_features1 = 0;
+ gpu->identity.minor_features2 = 0;
+ gpu->identity.minor_features3 = 0;
+ } else
+ gpu->identity.minor_features0 =
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
+
+ if (gpu->identity.minor_features0 &
+ chipMinorFeatures0_MORE_MINOR_FEATURES) {
+ gpu->identity.minor_features1 =
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
+ gpu->identity.minor_features2 =
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
+ gpu->identity.minor_features3 =
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
+ }
+
+ /* GC600 idle register reports zero bits where modules aren't present */
+ if (gpu->identity.model == chipModel_GC600) {
+ gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
+ VIVS_HI_IDLE_STATE_RA |
+ VIVS_HI_IDLE_STATE_SE |
+ VIVS_HI_IDLE_STATE_PA |
+ VIVS_HI_IDLE_STATE_SH |
+ VIVS_HI_IDLE_STATE_PE |
+ VIVS_HI_IDLE_STATE_DE |
+ VIVS_HI_IDLE_STATE_FE;
+ } else {
+ gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
+ }
+
+ etnaviv_hw_specs(gpu);
+}
+
+static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
+{
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
+ VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
+}
+
+static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
+{
+ u32 control, idle;
+ unsigned long timeout;
+ bool failed = true;
+
+ /* TODO
+ *
+ * - clock gating
+ * - puls eater
+ * - what about VG?
+ */
+
+ /* We hope that the GPU resets in under one second */
+ timeout = jiffies + msecs_to_jiffies(1000);
+
+ while (time_is_after_jiffies(timeout)) {
+ control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
+ VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
+
+ /* enable clock */
+ etnaviv_gpu_load_clock(gpu, control);
+
+ /* Wait for stable clock. Vivante's code waited for 1ms */
+ usleep_range(1000, 10000);
+
+ /* isolate the GPU. */
+ control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
+
+ /* set soft reset. */
+ control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
+
+ /* wait for reset. */
+ msleep(1);
+
+ /* reset soft reset bit. */
+ control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
+
+ /* reset GPU isolation. */
+ control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
+
+ /* read idle register. */
+ idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
+
+ /* try reseting again if FE it not idle */
+ if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
+ dev_dbg(gpu->dev, "FE is not idle\n");
+ continue;
+ }
+
+ /* read reset register. */
+ control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+
+ /* is the GPU idle? */
+ if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
+ ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
+ dev_dbg(gpu->dev, "GPU is not idle\n");
+ continue;
+ }
+
+ failed = false;
+ break;
+ }
+
+ if (failed) {
+ idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
+ control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+
+ dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
+ idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
+ control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
+ control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
+
+ return -EBUSY;
+ }
+
+ /* We rely on the GPU running, so program the clock */
+ control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
+ VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
+
+ /* enable clock */
+ etnaviv_gpu_load_clock(gpu, control);
+
+ return 0;
+}
+
+static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
+{
+ u16 prefetch;
+
+ if (gpu->identity.model == chipModel_GC320 &&
+ gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400 &&
+ (gpu->identity.revision == 0x5007 ||
+ gpu->identity.revision == 0x5220)) {
+ u32 mc_memory_debug;
+
+ mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
+
+ if (gpu->identity.revision == 0x5007)
+ mc_memory_debug |= 0x0c;
+ else
+ mc_memory_debug |= 0x08;
+
+ gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
+ }
+
+ /*
+ * Update GPU AXI cache atttribute to "cacheable, no allocate".
+ * This is necessary to prevent the iMX6 SoC locking up.
+ */
+ gpu_write(gpu, VIVS_HI_AXI_CONFIG,
+ VIVS_HI_AXI_CONFIG_AWCACHE(2) |
+ VIVS_HI_AXI_CONFIG_ARCACHE(2));
+
+ /* GC2000 rev 5108 needs a special bus config */
+ if (gpu->identity.model == 0x2000 && gpu->identity.revision == 0x5108) {
+ u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
+ bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
+ VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
+ bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
+ VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
+ gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
+ }
+
+ /* set base addresses */
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
+
+ /* setup the MMU page table pointers */
+ etnaviv_iommu_domain_restore(gpu, gpu->mmu->domain);
+
+ /* Start command processor */
+ prefetch = etnaviv_buffer_init(gpu);
+
+ gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
+ gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS,
+ gpu->buffer->paddr - gpu->memory_base);
+ gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
+ VIVS_FE_COMMAND_CONTROL_ENABLE |
+ VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
+}
+
+int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
+{
+ int ret, i;
+ struct iommu_domain *iommu;
+ enum etnaviv_iommu_version version;
+ bool mmuv2;
+
+ ret = pm_runtime_get_sync(gpu->dev);
+ if (ret < 0)
+ return ret;
+
+ etnaviv_hw_identify(gpu);
+
+ if (gpu->identity.model == 0) {
+ dev_err(gpu->dev, "Unknown GPU model\n");
+ pm_runtime_put_autosuspend(gpu->dev);
+ return -ENXIO;
+ }
+
+ ret = etnaviv_hw_reset(gpu);
+ if (ret)
+ goto fail;
+
+ /* Setup IOMMU.. eventually we will (I think) do this once per context
+ * and have separate page tables per context. For now, to keep things
+ * simple and to get something working, just use a single address space:
+ */
+ mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION;
+ dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2);
+
+ if (!mmuv2) {
+ iommu = etnaviv_iommu_domain_alloc(gpu);
+ version = ETNAVIV_IOMMU_V1;
+ } else {
+ iommu = etnaviv_iommu_v2_domain_alloc(gpu);
+ version = ETNAVIV_IOMMU_V2;
+ }
+
+ if (!iommu) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* TODO: we will leak here memory - fix it! */
+
+ gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
+ if (!gpu->mmu) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* Create buffer: */
+ gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0);
+ if (!gpu->buffer) {
+ ret = -ENOMEM;
+ dev_err(gpu->dev, "could not create command buffer\n");
+ goto fail;
+ }
+ if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
+ ret = -EINVAL;
+ dev_err(gpu->dev,
+ "command buffer outside valid memory window\n");
+ goto free_buffer;
+ }
+
+ /* Setup event management */
+ spin_lock_init(&gpu->event_spinlock);
+ init_completion(&gpu->event_free);
+ for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
+ gpu->event[i].used = false;
+ complete(&gpu->event_free);
+ }
+
+ /* Now program the hardware */
+ mutex_lock(&gpu->lock);
+ etnaviv_gpu_hw_init(gpu);
+ mutex_unlock(&gpu->lock);
+
+ pm_runtime_mark_last_busy(gpu->dev);
+ pm_runtime_put_autosuspend(gpu->dev);
+
+ return 0;
+
+free_buffer:
+ etnaviv_gpu_cmdbuf_free(gpu->buffer);
+ gpu->buffer = NULL;
+fail:
+ pm_runtime_mark_last_busy(gpu->dev);
+ pm_runtime_put_autosuspend(gpu->dev);
+
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct dma_debug {
+ u32 address[2];
+ u32 state[2];
+};
+
+static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
+{
+ u32 i;
+
+ debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+ debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
+
+ for (i = 0; i < 500; i++) {
+ debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+ debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
+
+ if (debug->address[0] != debug->address[1])
+ break;
+
+ if (debug->state[0] != debug->state[1])
+ break;
+ }
+}
+
+int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
+{
+ struct dma_debug debug;
+ u32 dma_lo, dma_hi, axi, idle;
+ int ret;
+
+ seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
+
+ ret = pm_runtime_get_sync(gpu->dev);
+ if (ret < 0)
+ return ret;
+
+ dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
+ dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
+ axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
+ idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
+
+ verify_dma(gpu, &debug);
+
+ seq_puts(m, "\tfeatures\n");
+ seq_printf(m, "\t minor_features0: 0x%08x\n",
+ gpu->identity.minor_features0);
+ seq_printf(m, "\t minor_features1: 0x%08x\n",
+ gpu->identity.minor_features1);
+ seq_printf(m, "\t minor_features2: 0x%08x\n",
+ gpu->identity.minor_features2);
+ seq_printf(m, "\t minor_features3: 0x%08x\n",
+ gpu->identity.minor_features3);
+
+ seq_puts(m, "\tspecs\n");
+ seq_printf(m, "\t stream_count: %d\n",
+ gpu->identity.stream_count);
+ seq_printf(m, "\t register_max: %d\n",
+ gpu->identity.register_max);
+ seq_printf(m, "\t thread_count: %d\n",
+ gpu->identity.thread_count);
+ seq_printf(m, "\t vertex_cache_size: %d\n",
+ gpu->identity.vertex_cache_size);
+ seq_printf(m, "\t shader_core_count: %d\n",
+ gpu->identity.shader_core_count);
+ seq_printf(m, "\t pixel_pipes: %d\n",
+ gpu->identity.pixel_pipes);
+ seq_printf(m, "\t vertex_output_buffer_size: %d\n",
+ gpu->identity.vertex_output_buffer_size);
+ seq_printf(m, "\t buffer_size: %d\n",
+ gpu->identity.buffer_size);
+ seq_printf(m, "\t instruction_count: %d\n",
+ gpu->identity.instruction_count);
+ seq_printf(m, "\t num_constants: %d\n",
+ gpu->identity.num_constants);
+
+ seq_printf(m, "\taxi: 0x%08x\n", axi);
+ seq_printf(m, "\tidle: 0x%08x\n", idle);
+ idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
+ if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
+ seq_puts(m, "\t FE is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
+ seq_puts(m, "\t DE is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
+ seq_puts(m, "\t PE is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
+ seq_puts(m, "\t SH is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
+ seq_puts(m, "\t PA is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
+ seq_puts(m, "\t SE is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
+ seq_puts(m, "\t RA is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
+ seq_puts(m, "\t TX is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
+ seq_puts(m, "\t VG is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
+ seq_puts(m, "\t IM is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
+ seq_puts(m, "\t FP is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
+ seq_puts(m, "\t TS is not idle\n");
+ if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
+ seq_puts(m, "\t AXI low power mode\n");
+
+ if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
+ u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
+ u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
+ u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
+
+ seq_puts(m, "\tMC\n");
+ seq_printf(m, "\t read0: 0x%08x\n", read0);
+ seq_printf(m, "\t read1: 0x%08x\n", read1);
+ seq_printf(m, "\t write: 0x%08x\n", write);
+ }
+
+ seq_puts(m, "\tDMA ");
+
+ if (debug.address[0] == debug.address[1] &&
+ debug.state[0] == debug.state[1]) {
+ seq_puts(m, "seems to be stuck\n");
+ } else if (debug.address[0] == debug.address[1]) {
+ seq_puts(m, "adress is constant\n");
+ } else {
+ seq_puts(m, "is runing\n");
+ }
+
+ seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
+ seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
+ seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
+ seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
+ seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
+ dma_lo, dma_hi);
+
+ ret = 0;
+
+ pm_runtime_mark_last_busy(gpu->dev);
+ pm_runtime_put_autosuspend(gpu->dev);
+
+ return ret;
+}
+#endif
+
+/*
+ * Power Management:
+ */
+static int enable_clk(struct etnaviv_gpu *gpu)
+{
+ if (gpu->clk_core)
+ clk_prepare_enable(gpu->clk_core);
+ if (gpu->clk_shader)
+ clk_prepare_enable(gpu->clk_shader);
+
+ return 0;
+}
+
+static int disable_clk(struct etnaviv_gpu *gpu)
+{
+ if (gpu->clk_core)
+ clk_disable_unprepare(gpu->clk_core);
+ if (gpu->clk_shader)
+ clk_disable_unprepare(gpu->clk_shader);
+
+ return 0;
+}
+
+static int enable_axi(struct etnaviv_gpu *gpu)
+{
+ if (gpu->clk_bus)
+ clk_prepare_enable(gpu->clk_bus);
+
+ return 0;
+}
+
+static int disable_axi(struct etnaviv_gpu *gpu)
+{
+ if (gpu->clk_bus)
+ clk_disable_unprepare(gpu->clk_bus);
+
+ return 0;
+}
+
+/*
+ * Hangcheck detection for locked gpu:
+ */
+static void recover_worker(struct work_struct *work)
+{
+ struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
+ recover_work);
+ unsigned long flags;
+ unsigned int i;
+
+ dev_err(gpu->dev, "hangcheck recover!\n");
+
+ if (pm_runtime_get_sync(gpu->dev) < 0)
+ return;
+
+ mutex_lock(&gpu->lock);
+
+ /* Only catch the first event, or when manually re-armed */
+ if (etnaviv_dump_core) {
+ etnaviv_core_dump(gpu);
+ etnaviv_dump_core = false;
+ }
+
+ etnaviv_hw_reset(gpu);
+
+ /* complete all events, the GPU won't do it after the reset */
+ spin_lock_irqsave(&gpu->event_spinlock, flags);
+ for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
+ if (!gpu->event[i].used)
+ continue;
+ fence_signal(gpu->event[i].fence);
+ gpu->event[i].fence = NULL;
+ gpu->event[i].used = false;
+ complete(&gpu->event_free);
+ /*
+ * Decrement the PM count for each stuck event. This is safe
+ * even in atomic context as we use ASYNC RPM here.
+ */
+ pm_runtime_put_autosuspend(gpu->dev);
+ }
+ spin_unlock_irqrestore(&gpu->event_spinlock, flags);
+ gpu->completed_fence = gpu->active_fence;
+
+ etnaviv_gpu_hw_init(gpu);
+ gpu->switch_context = true;
+
+ mutex_unlock(&gpu->lock);
+ pm_runtime_mark_last_busy(gpu->dev);
+ pm_runtime_put_autosuspend(gpu->dev);
+
+ /* Retire the buffer objects in a work */
+ etnaviv_queue_work(gpu->drm, &gpu->retire_work);
+}
+
+static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
+{
+ DBG("%s", dev_name(gpu->dev));
+ mod_timer(&gpu->hangcheck_timer,
+ round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
+}
+
+static void hangcheck_handler(unsigned long data)
+{
+ struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data;
+ u32 fence = gpu->completed_fence;
+ bool progress = false;
+
+ if (fence != gpu->hangcheck_fence) {
+ gpu->hangcheck_fence = fence;
+ progress = true;
+ }
+
+ if (!progress) {
+ u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+ int change = dma_addr - gpu->hangcheck_dma_addr;
+
+ if (change < 0 || change > 16) {
+ gpu->hangcheck_dma_addr = dma_addr;
+ progress = true;
+ }
+ }
+
+ if (!progress && fence_after(gpu->active_fence, fence)) {
+ dev_err(gpu->dev, "hangcheck detected gpu lockup!\n");
+ dev_err(gpu->dev, " completed fence: %u\n", fence);
+ dev_err(gpu->dev, " active fence: %u\n",
+ gpu->active_fence);
+ etnaviv_queue_work(gpu->drm, &gpu->recover_work);
+ }
+
+ /* if still more pending work, reset the hangcheck timer: */
+ if (fence_after(gpu->active_fence, gpu->hangcheck_fence))
+ hangcheck_timer_reset(gpu);
+}
+
+static void hangcheck_disable(struct etnaviv_gpu *gpu)
+{
+ del_timer_sync(&gpu->hangcheck_timer);
+ cancel_work_sync(&gpu->recover_work);
+}
+
+/* fence object management */
+struct etnaviv_fence {
+ struct etnaviv_gpu *gpu;
+ struct fence base;
+};
+
+static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence)
+{
+ return container_of(fence, struct etnaviv_fence, base);
+}
+
+static const char *etnaviv_fence_get_driver_name(struct fence *fence)
+{
+ return "etnaviv";
+}
+
+static const char *etnaviv_fence_get_timeline_name(struct fence *fence)
+{
+ struct etnaviv_fence *f = to_etnaviv_fence(fence);
+
+ return dev_name(f->gpu->dev);
+}
+
+static bool etnaviv_fence_enable_signaling(struct fence *fence)
+{
+ return true;
+}
+
+static bool etnaviv_fence_signaled(struct fence *fence)
+{
+ struct etnaviv_fence *f = to_etnaviv_fence(fence);
+
+ return fence_completed(f->gpu, f->base.seqno);
+}
+
+static void etnaviv_fence_release(struct fence *fence)
+{
+ struct etnaviv_fence *f = to_etnaviv_fence(fence);
+
+ kfree_rcu(f, base.rcu);
+}
+
+static const struct fence_ops etnaviv_fence_ops = {
+ .get_driver_name = etnaviv_fence_get_driver_name,
+ .get_timeline_name = etnaviv_fence_get_timeline_name,
+ .enable_signaling = etnaviv_fence_enable_signaling,
+ .signaled = etnaviv_fence_signaled,
+ .wait = fence_default_wait,
+ .release = etnaviv_fence_release,
+};
+
+static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_fence *f;
+
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ f->gpu = gpu;
+
+ fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
+ gpu->fence_context, ++gpu->next_fence);
+
+ return &f->base;
+}
+
+int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
+ unsigned int context, bool exclusive)
+{
+ struct reservation_object *robj = etnaviv_obj->resv;
+ struct reservation_object_list *fobj;
+ struct fence *fence;
+ int i, ret;
+
+ if (!exclusive) {
+ ret = reservation_object_reserve_shared(robj);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * If we have any shared fences, then the exclusive fence
+ * should be ignored as it will already have been signalled.
+ */
+ fobj = reservation_object_get_list(robj);
+ if (!fobj || fobj->shared_count == 0) {
+ /* Wait on any existing exclusive fence which isn't our own */
+ fence = reservation_object_get_excl(robj);
+ if (fence && fence->context != context) {
+ ret = fence_wait(fence, true);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (!exclusive || !fobj)
+ return 0;
+
+ for (i = 0; i < fobj->shared_count; i++) {
+ fence = rcu_dereference_protected(fobj->shared[i],
+ reservation_object_held(robj));
+ if (fence->context != context) {
+ ret = fence_wait(fence, true);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * event management:
+ */
+
+static unsigned int event_alloc(struct etnaviv_gpu *gpu)
+{
+ unsigned long ret, flags;
+ unsigned int i, event = ~0U;
+
+ ret = wait_for_completion_timeout(&gpu->event_free,
+ msecs_to_jiffies(10 * 10000));
+ if (!ret)
+ dev_err(gpu->dev, "wait_for_completion_timeout failed");
+
+ spin_lock_irqsave(&gpu->event_spinlock, flags);
+
+ /* find first free event */
+ for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
+ if (gpu->event[i].used == false) {
+ gpu->event[i].used = true;
+ event = i;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&gpu->event_spinlock, flags);
+
+ return event;
+}
+
+static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpu->event_spinlock, flags);
+
+ if (gpu->event[event].used == false) {
+ dev_warn(gpu->dev, "event %u is already marked as free",
+ event);
+ spin_unlock_irqrestore(&gpu->event_spinlock, flags);
+ } else {
+ gpu->event[event].used = false;
+ spin_unlock_irqrestore(&gpu->event_spinlock, flags);
+
+ complete(&gpu->event_free);
+ }
+}
+
+/*
+ * Cmdstream submission/retirement:
+ */
+
+struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
+ size_t nr_bos)
+{
+ struct etnaviv_cmdbuf *cmdbuf;
+ size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo[0]),
+ sizeof(*cmdbuf));
+
+ cmdbuf = kzalloc(sz, GFP_KERNEL);
+ if (!cmdbuf)
+ return NULL;
+
+ cmdbuf->vaddr = dma_alloc_writecombine(gpu->dev, size, &cmdbuf->paddr,
+ GFP_KERNEL);
+ if (!cmdbuf->vaddr) {
+ kfree(cmdbuf);
+ return NULL;
+ }
+
+ cmdbuf->gpu = gpu;
+ cmdbuf->size = size;
+
+ return cmdbuf;
+}
+
+void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
+{
+ dma_free_writecombine(cmdbuf->gpu->dev, cmdbuf->size,
+ cmdbuf->vaddr, cmdbuf->paddr);
+ kfree(cmdbuf);
+}
+
+static void retire_worker(struct work_struct *work)
+{
+ struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
+ retire_work);
+ u32 fence = gpu->completed_fence;
+ struct etnaviv_cmdbuf *cmdbuf, *tmp;
+ unsigned int i;
+
+ mutex_lock(&gpu->lock);
+ list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
+ if (!fence_is_signaled(cmdbuf->fence))
+ break;
+
+ list_del(&cmdbuf->node);
+ fence_put(cmdbuf->fence);
+
+ for (i = 0; i < cmdbuf->nr_bos; i++) {
+ struct etnaviv_gem_object *etnaviv_obj = cmdbuf->bo[i];
+
+ atomic_dec(&etnaviv_obj->gpu_active);
+ /* drop the refcount taken in etnaviv_gpu_submit */
+ etnaviv_gem_put_iova(gpu, &etnaviv_obj->base);
+ }
+
+ etnaviv_gpu_cmdbuf_free(cmdbuf);
+ }
+
+ gpu->retired_fence = fence;
+
+ mutex_unlock(&gpu->lock);
+
+ wake_up_all(&gpu->fence_event);
+}
+
+int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
+ u32 fence, struct timespec *timeout)
+{
+ int ret;
+
+ if (fence_after(fence, gpu->next_fence)) {
+ DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
+ fence, gpu->next_fence);
+ return -EINVAL;
+ }
+
+ if (!timeout) {
+ /* No timeout was requested: just test for completion */
+ ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
+ } else {
+ unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
+
+ ret = wait_event_interruptible_timeout(gpu->fence_event,
+ fence_completed(gpu, fence),
+ remaining);
+ if (ret == 0) {
+ DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
+ fence, gpu->retired_fence,
+ gpu->completed_fence);
+ ret = -ETIMEDOUT;
+ } else if (ret != -ERESTARTSYS) {
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Wait for an object to become inactive. This, on it's own, is not race
+ * free: the object is moved by the retire worker off the active list, and
+ * then the iova is put. Moreover, the object could be re-submitted just
+ * after we notice that it's become inactive.
+ *
+ * Although the retirement happens under the gpu lock, we don't want to hold
+ * that lock in this function while waiting.
+ */
+int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
+ struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
+{
+ unsigned long remaining;
+ long ret;
+
+ if (!timeout)
+ return !is_active(etnaviv_obj) ? 0 : -EBUSY;
+
+ remaining = etnaviv_timeout_to_jiffies(timeout);
+
+ ret = wait_event_interruptible_timeout(gpu->fence_event,
+ !is_active(etnaviv_obj),
+ remaining);
+ if (ret > 0) {
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
+
+ /* Synchronise with the retire worker */
+ flush_workqueue(priv->wq);
+ return 0;
+ } else if (ret == -ERESTARTSYS) {
+ return -ERESTARTSYS;
+ } else {
+ return -ETIMEDOUT;
+ }
+}
+
+int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu)
+{
+ return pm_runtime_get_sync(gpu->dev);
+}
+
+void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
+{
+ pm_runtime_mark_last_busy(gpu->dev);
+ pm_runtime_put_autosuspend(gpu->dev);
+}
+
+/* add bo's to gpu's ring, and kick gpu: */
+int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
+ struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
+{
+ struct fence *fence;
+ unsigned int event, i;
+ int ret;
+
+ ret = etnaviv_gpu_pm_get_sync(gpu);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&gpu->lock);
+
+ /*
+ * TODO
+ *
+ * - flush
+ * - data endian
+ * - prefetch
+ *
+ */
+
+ event = event_alloc(gpu);
+ if (unlikely(event == ~0U)) {
+ DRM_ERROR("no free event\n");
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ fence = etnaviv_gpu_fence_alloc(gpu);
+ if (!fence) {
+ event_free(gpu, event);
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ gpu->event[event].fence = fence;
+ submit->fence = fence->seqno;
+ gpu->active_fence = submit->fence;
+
+ if (gpu->lastctx != cmdbuf->ctx) {
+ gpu->mmu->need_flush = true;
+ gpu->switch_context = true;
+ gpu->lastctx = cmdbuf->ctx;
+ }
+
+ etnaviv_buffer_queue(gpu, event, cmdbuf);
+
+ cmdbuf->fence = fence;
+ list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
+
+ /* We're committed to adding this command buffer, hold a PM reference */
+ pm_runtime_get_noresume(gpu->dev);
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+ u32 iova;
+
+ /* Each cmdbuf takes a refcount on the iova */
+ etnaviv_gem_get_iova(gpu, &etnaviv_obj->base, &iova);
+ cmdbuf->bo[i] = etnaviv_obj;
+ atomic_inc(&etnaviv_obj->gpu_active);
+
+ if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
+ reservation_object_add_excl_fence(etnaviv_obj->resv,
+ fence);
+ else
+ reservation_object_add_shared_fence(etnaviv_obj->resv,
+ fence);
+ }
+ cmdbuf->nr_bos = submit->nr_bos;
+ hangcheck_timer_reset(gpu);
+ ret = 0;
+
+out_unlock:
+ mutex_unlock(&gpu->lock);
+
+ etnaviv_gpu_pm_put(gpu);
+
+ return ret;
+}
+
+/*
+ * Init/Cleanup:
+ */
+static irqreturn_t irq_handler(int irq, void *data)
+{
+ struct etnaviv_gpu *gpu = data;
+ irqreturn_t ret = IRQ_NONE;
+
+ u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
+
+ if (intr != 0) {
+ int event;
+
+ pm_runtime_mark_last_busy(gpu->dev);
+
+ dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
+
+ if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
+ dev_err(gpu->dev, "AXI bus error\n");
+ intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
+ }
+
+ while ((event = ffs(intr)) != 0) {
+ struct fence *fence;
+
+ event -= 1;
+
+ intr &= ~(1 << event);
+
+ dev_dbg(gpu->dev, "event %u\n", event);
+
+ fence = gpu->event[event].fence;
+ gpu->event[event].fence = NULL;
+ fence_signal(fence);
+
+ /*
+ * Events can be processed out of order. Eg,
+ * - allocate and queue event 0
+ * - allocate event 1
+ * - event 0 completes, we process it
+ * - allocate and queue event 0
+ * - event 1 and event 0 complete
+ * we can end up processing event 0 first, then 1.
+ */
+ if (fence_after(fence->seqno, gpu->completed_fence))
+ gpu->completed_fence = fence->seqno;
+
+ event_free(gpu, event);
+
+ /*
+ * We need to balance the runtime PM count caused by
+ * each submission. Upon submission, we increment
+ * the runtime PM counter, and allocate one event.
+ * So here, we put the runtime PM count for each
+ * completed event.
+ */
+ pm_runtime_put_autosuspend(gpu->dev);
+ }
+
+ /* Retire the buffer objects in a work */
+ etnaviv_queue_work(gpu->drm, &gpu->retire_work);
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
+{
+ int ret;
+
+ ret = enable_clk(gpu);
+ if (ret)
+ return ret;
+
+ ret = enable_axi(gpu);
+ if (ret) {
+ disable_clk(gpu);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
+{
+ int ret;
+
+ ret = disable_axi(gpu);
+ if (ret)
+ return ret;
+
+ ret = disable_clk(gpu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
+{
+ if (gpu->buffer) {
+ unsigned long timeout;
+
+ /* Replace the last WAIT with END */
+ etnaviv_buffer_end(gpu);
+
+ /*
+ * We know that only the FE is busy here, this should
+ * happen quickly (as the WAIT is only 200 cycles). If
+ * we fail, just warn and continue.
+ */
+ timeout = jiffies + msecs_to_jiffies(100);
+ do {
+ u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
+
+ if ((idle & gpu->idle_mask) == gpu->idle_mask)
+ break;
+
+ if (time_is_before_jiffies(timeout)) {
+ dev_warn(gpu->dev,
+ "timed out waiting for idle: idle=0x%x\n",
+ idle);
+ break;
+ }
+
+ udelay(5);
+ } while (1);
+ }
+
+ return etnaviv_gpu_clk_disable(gpu);
+}
+
+#ifdef CONFIG_PM
+static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
+{
+ u32 clock;
+ int ret;
+
+ ret = mutex_lock_killable(&gpu->lock);
+ if (ret)
+ return ret;
+
+ clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
+ VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
+
+ etnaviv_gpu_load_clock(gpu, clock);
+ etnaviv_gpu_hw_init(gpu);
+
+ gpu->switch_context = true;
+
+ mutex_unlock(&gpu->lock);
+
+ return 0;
+}
+#endif
+
+static int etnaviv_gpu_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = data;
+ struct etnaviv_drm_private *priv = drm->dev_private;
+ struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
+ int ret;
+
+#ifdef CONFIG_PM
+ ret = pm_runtime_get_sync(gpu->dev);
+#else
+ ret = etnaviv_gpu_clk_enable(gpu);
+#endif
+ if (ret < 0)
+ return ret;
+
+ gpu->drm = drm;
+ gpu->fence_context = fence_context_alloc(1);
+ spin_lock_init(&gpu->fence_spinlock);
+
+ INIT_LIST_HEAD(&gpu->active_cmd_list);
+ INIT_WORK(&gpu->retire_work, retire_worker);
+ INIT_WORK(&gpu->recover_work, recover_worker);
+ init_waitqueue_head(&gpu->fence_event);
+
+ setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
+ (unsigned long)gpu);
+
+ priv->gpu[priv->num_gpus++] = gpu;
+
+ pm_runtime_mark_last_busy(gpu->dev);
+ pm_runtime_put_autosuspend(gpu->dev);
+
+ return 0;
+}
+
+static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
+
+ DBG("%s", dev_name(gpu->dev));
+
+ hangcheck_disable(gpu);
+
+#ifdef CONFIG_PM
+ pm_runtime_get_sync(gpu->dev);
+ pm_runtime_put_sync_suspend(gpu->dev);
+#else
+ etnaviv_gpu_hw_suspend(gpu);
+#endif
+
+ if (gpu->buffer) {
+ etnaviv_gpu_cmdbuf_free(gpu->buffer);
+ gpu->buffer = NULL;
+ }
+
+ if (gpu->mmu) {
+ etnaviv_iommu_destroy(gpu->mmu);
+ gpu->mmu = NULL;
+ }
+
+ gpu->drm = NULL;
+}
+
+static const struct component_ops gpu_ops = {
+ .bind = etnaviv_gpu_bind,
+ .unbind = etnaviv_gpu_unbind,
+};
+
+static const struct of_device_id etnaviv_gpu_match[] = {
+ {
+ .compatible = "vivante,gc"
+ },
+ { /* sentinel */ }
+};
+
+static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct etnaviv_gpu *gpu;
+ int err = 0;
+
+ gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
+ if (!gpu)
+ return -ENOMEM;
+
+ gpu->dev = &pdev->dev;
+ mutex_init(&gpu->lock);
+
+ /*
+ * Set the GPU base address to the start of physical memory. This
+ * ensures that if we have up to 2GB, the v1 MMU can address the
+ * highest memory. This is important as command buffers may be
+ * allocated outside of this limit.
+ */
+ gpu->memory_base = PHYS_OFFSET;
+
+ /* Map registers: */
+ gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
+ if (IS_ERR(gpu->mmio))
+ return PTR_ERR(gpu->mmio);
+
+ /* Get Interrupt: */
+ gpu->irq = platform_get_irq(pdev, 0);
+ if (gpu->irq < 0) {
+ err = gpu->irq;
+ dev_err(dev, "failed to get irq: %d\n", err);
+ goto fail;
+ }
+
+ err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
+ dev_name(gpu->dev), gpu);
+ if (err) {
+ dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
+ goto fail;
+ }
+
+ /* Get Clocks: */
+ gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
+ DBG("clk_bus: %p", gpu->clk_bus);
+ if (IS_ERR(gpu->clk_bus))
+ gpu->clk_bus = NULL;
+
+ gpu->clk_core = devm_clk_get(&pdev->dev, "core");
+ DBG("clk_core: %p", gpu->clk_core);
+ if (IS_ERR(gpu->clk_core))
+ gpu->clk_core = NULL;
+
+ gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
+ DBG("clk_shader: %p", gpu->clk_shader);
+ if (IS_ERR(gpu->clk_shader))
+ gpu->clk_shader = NULL;
+
+ /* TODO: figure out max mapped size */
+ dev_set_drvdata(dev, gpu);
+
+ /*
+ * We treat the device as initially suspended. The runtime PM
+ * autosuspend delay is rather arbitary: no measurements have
+ * yet been performed to determine an appropriate value.
+ */
+ pm_runtime_use_autosuspend(gpu->dev);
+ pm_runtime_set_autosuspend_delay(gpu->dev, 200);
+ pm_runtime_enable(gpu->dev);
+
+ err = component_add(&pdev->dev, &gpu_ops);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register component: %d\n", err);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ return err;
+}
+
+static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &gpu_ops);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int etnaviv_gpu_rpm_suspend(struct device *dev)
+{
+ struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
+ u32 idle, mask;
+
+ /* If we have outstanding fences, we're not idle */
+ if (gpu->completed_fence != gpu->active_fence)
+ return -EBUSY;
+
+ /* Check whether the hardware (except FE) is idle */
+ mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
+ idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
+ if (idle != mask)
+ return -EBUSY;
+
+ return etnaviv_gpu_hw_suspend(gpu);
+}
+
+static int etnaviv_gpu_rpm_resume(struct device *dev)
+{
+ struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
+ int ret;
+
+ ret = etnaviv_gpu_clk_enable(gpu);
+ if (ret)
+ return ret;
+
+ /* Re-initialise the basic hardware state */
+ if (gpu->drm && gpu->buffer) {
+ ret = etnaviv_gpu_hw_resume(gpu);
+ if (ret) {
+ etnaviv_gpu_clk_disable(gpu);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
+ SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
+ NULL)
+};
+
+struct platform_driver etnaviv_gpu_driver = {
+ .driver = {
+ .name = "etnaviv-gpu",
+ .owner = THIS_MODULE,
+ .pm = &etnaviv_gpu_pm_ops,
+ .of_match_table = etnaviv_gpu_match,
+ },
+ .probe = etnaviv_gpu_platform_probe,
+ .remove = etnaviv_gpu_platform_remove,
+ .id_table = gpu_ids,
+};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
new file mode 100644
index 000000000000..c75d50359ab0
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_GPU_H__
+#define __ETNAVIV_GPU_H__
+
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include "etnaviv_drv.h"
+
+struct etnaviv_gem_submit;
+
+struct etnaviv_chip_identity {
+ /* Chip model. */
+ u32 model;
+
+ /* Revision value.*/
+ u32 revision;
+
+ /* Supported feature fields. */
+ u32 features;
+
+ /* Supported minor feature fields. */
+ u32 minor_features0;
+
+ /* Supported minor feature 1 fields. */
+ u32 minor_features1;
+
+ /* Supported minor feature 2 fields. */
+ u32 minor_features2;
+
+ /* Supported minor feature 3 fields. */
+ u32 minor_features3;
+
+ /* Number of streams supported. */
+ u32 stream_count;
+
+ /* Total number of temporary registers per thread. */
+ u32 register_max;
+
+ /* Maximum number of threads. */
+ u32 thread_count;
+
+ /* Number of shader cores. */
+ u32 shader_core_count;
+
+ /* Size of the vertex cache. */
+ u32 vertex_cache_size;
+
+ /* Number of entries in the vertex output buffer. */
+ u32 vertex_output_buffer_size;
+
+ /* Number of pixel pipes. */
+ u32 pixel_pipes;
+
+ /* Number of instructions. */
+ u32 instruction_count;
+
+ /* Number of constants. */
+ u32 num_constants;
+
+ /* Buffer size */
+ u32 buffer_size;
+};
+
+struct etnaviv_event {
+ bool used;
+ struct fence *fence;
+};
+
+struct etnaviv_cmdbuf;
+
+struct etnaviv_gpu {
+ struct drm_device *drm;
+ struct device *dev;
+ struct mutex lock;
+ struct etnaviv_chip_identity identity;
+ struct etnaviv_file_private *lastctx;
+ bool switch_context;
+
+ /* 'ring'-buffer: */
+ struct etnaviv_cmdbuf *buffer;
+
+ /* bus base address of memory */
+ u32 memory_base;
+
+ /* event management: */
+ struct etnaviv_event event[30];
+ struct completion event_free;
+ spinlock_t event_spinlock;
+
+ /* list of currently in-flight command buffers */
+ struct list_head active_cmd_list;
+
+ u32 idle_mask;
+
+ /* Fencing support */
+ u32 next_fence;
+ u32 active_fence;
+ u32 completed_fence;
+ u32 retired_fence;
+ wait_queue_head_t fence_event;
+ unsigned int fence_context;
+ spinlock_t fence_spinlock;
+
+ /* worker for handling active-list retiring: */
+ struct work_struct retire_work;
+
+ void __iomem *mmio;
+ int irq;
+
+ struct etnaviv_iommu *mmu;
+
+ /* Power Control: */
+ struct clk *clk_bus;
+ struct clk *clk_core;
+ struct clk *clk_shader;
+
+ /* Hang Detction: */
+#define DRM_ETNAVIV_HANGCHECK_PERIOD 500 /* in ms */
+#define DRM_ETNAVIV_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_ETNAVIV_HANGCHECK_PERIOD)
+ struct timer_list hangcheck_timer;
+ u32 hangcheck_fence;
+ u32 hangcheck_dma_addr;
+ struct work_struct recover_work;
+};
+
+struct etnaviv_cmdbuf {
+ /* device this cmdbuf is allocated for */
+ struct etnaviv_gpu *gpu;
+ /* user context key, must be unique between all active users */
+ struct etnaviv_file_private *ctx;
+ /* cmdbuf properties */
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 size;
+ u32 user_size;
+ /* fence after which this buffer is to be disposed */
+ struct fence *fence;
+ /* target exec state */
+ u32 exec_state;
+ /* per GPU in-flight list */
+ struct list_head node;
+ /* BOs attached to this command buffer */
+ unsigned int nr_bos;
+ struct etnaviv_gem_object *bo[0];
+};
+
+static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
+{
+ etnaviv_writel(data, gpu->mmio + reg);
+}
+
+static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
+{
+ return etnaviv_readl(gpu->mmio + reg);
+}
+
+static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence)
+{
+ return fence_after_eq(gpu->completed_fence, fence);
+}
+
+static inline bool fence_retired(struct etnaviv_gpu *gpu, u32 fence)
+{
+ return fence_after_eq(gpu->retired_fence, fence);
+}
+
+int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
+
+int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
+
+#ifdef CONFIG_DEBUG_FS
+int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
+#endif
+
+int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
+ unsigned int context, bool exclusive);
+
+void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
+int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
+ u32 fence, struct timespec *timeout);
+int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
+ struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
+int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
+ struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf);
+struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu,
+ u32 size, size_t nr_bos);
+void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
+int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
+void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
+
+extern struct platform_driver etnaviv_gpu_driver;
+
+#endif /* __ETNAVIV_GPU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
new file mode 100644
index 000000000000..522cfd447892
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+
+#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
+#include "etnaviv_iommu.h"
+#include "state_hi.xml.h"
+
+#define PT_SIZE SZ_2M
+#define PT_ENTRIES (PT_SIZE / sizeof(u32))
+
+#define GPU_MEM_START 0x80000000
+
+struct etnaviv_iommu_domain_pgtable {
+ u32 *pgtable;
+ dma_addr_t paddr;
+};
+
+struct etnaviv_iommu_domain {
+ struct iommu_domain domain;
+ struct device *dev;
+ void *bad_page_cpu;
+ dma_addr_t bad_page_dma;
+ struct etnaviv_iommu_domain_pgtable pgtable;
+ spinlock_t map_lock;
+};
+
+static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain *domain)
+{
+ return container_of(domain, struct etnaviv_iommu_domain, domain);
+}
+
+static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable,
+ size_t size)
+{
+ pgtable->pgtable = dma_alloc_coherent(NULL, size, &pgtable->paddr, GFP_KERNEL);
+ if (!pgtable->pgtable)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void pgtable_free(struct etnaviv_iommu_domain_pgtable *pgtable,
+ size_t size)
+{
+ dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr);
+}
+
+static u32 pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable,
+ unsigned long iova)
+{
+ /* calcuate index into page table */
+ unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
+ phys_addr_t paddr;
+
+ paddr = pgtable->pgtable[index];
+
+ return paddr;
+}
+
+static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable,
+ unsigned long iova, phys_addr_t paddr)
+{
+ /* calcuate index into page table */
+ unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
+
+ pgtable->pgtable[index] = paddr;
+}
+
+static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain)
+{
+ u32 *p;
+ int ret, i;
+
+ etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
+ SZ_4K,
+ &etnaviv_domain->bad_page_dma,
+ GFP_KERNEL);
+ if (!etnaviv_domain->bad_page_cpu)
+ return -ENOMEM;
+
+ p = etnaviv_domain->bad_page_cpu;
+ for (i = 0; i < SZ_4K / 4; i++)
+ *p++ = 0xdead55aa;
+
+ ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE);
+ if (ret < 0) {
+ dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+ etnaviv_domain->bad_page_cpu,
+ etnaviv_domain->bad_page_dma);
+ return ret;
+ }
+
+ for (i = 0; i < PT_ENTRIES; i++)
+ etnaviv_domain->pgtable.pgtable[i] =
+ etnaviv_domain->bad_page_dma;
+
+ spin_lock_init(&etnaviv_domain->map_lock);
+
+ return 0;
+}
+
+static void etnaviv_domain_free(struct iommu_domain *domain)
+{
+ struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+
+ pgtable_free(&etnaviv_domain->pgtable, PT_SIZE);
+
+ dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+ etnaviv_domain->bad_page_cpu,
+ etnaviv_domain->bad_page_dma);
+
+ kfree(etnaviv_domain);
+}
+
+static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+
+ if (size != SZ_4K)
+ return -EINVAL;
+
+ spin_lock(&etnaviv_domain->map_lock);
+ pgtable_write(&etnaviv_domain->pgtable, iova, paddr);
+ spin_unlock(&etnaviv_domain->map_lock);
+
+ return 0;
+}
+
+static size_t etnaviv_iommuv1_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+
+ if (size != SZ_4K)
+ return -EINVAL;
+
+ spin_lock(&etnaviv_domain->map_lock);
+ pgtable_write(&etnaviv_domain->pgtable, iova,
+ etnaviv_domain->bad_page_dma);
+ spin_unlock(&etnaviv_domain->map_lock);
+
+ return SZ_4K;
+}
+
+static phys_addr_t etnaviv_iommu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+
+ return pgtable_read(&etnaviv_domain->pgtable, iova);
+}
+
+static size_t etnaviv_iommuv1_dump_size(struct iommu_domain *domain)
+{
+ return PT_SIZE;
+}
+
+static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf)
+{
+ struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+
+ memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE);
+}
+
+static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
+ .ops = {
+ .domain_free = etnaviv_domain_free,
+ .map = etnaviv_iommuv1_map,
+ .unmap = etnaviv_iommuv1_unmap,
+ .iova_to_phys = etnaviv_iommu_iova_to_phys,
+ .pgsize_bitmap = SZ_4K,
+ },
+ .dump_size = etnaviv_iommuv1_dump_size,
+ .dump = etnaviv_iommuv1_dump,
+};
+
+void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
+ struct iommu_domain *domain)
+{
+ struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+ u32 pgtable;
+
+ /* set page table address in MC */
+ pgtable = (u32)etnaviv_domain->pgtable.paddr;
+
+ gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
+ gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
+ gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
+ gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
+ gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
+}
+
+struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_iommu_domain *etnaviv_domain;
+ int ret;
+
+ etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
+ if (!etnaviv_domain)
+ return NULL;
+
+ etnaviv_domain->dev = gpu->dev;
+
+ etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
+ etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
+ etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
+ etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
+
+ ret = __etnaviv_iommu_init(etnaviv_domain);
+ if (ret)
+ goto out_free;
+
+ return &etnaviv_domain->domain;
+
+out_free:
+ kfree(etnaviv_domain);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
new file mode 100644
index 000000000000..cf45503f6b6f
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_IOMMU_H__
+#define __ETNAVIV_IOMMU_H__
+
+#include <linux/iommu.h>
+struct etnaviv_gpu;
+
+struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu);
+void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
+ struct iommu_domain *domain);
+struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
+
+#endif /* __ETNAVIV_IOMMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
new file mode 100644
index 000000000000..fbb4aed3dc80
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+
+#include "etnaviv_gpu.h"
+#include "etnaviv_iommu.h"
+#include "state_hi.xml.h"
+
+
+struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu)
+{
+ /* TODO */
+ return NULL;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h
new file mode 100644
index 000000000000..603ea41c5389
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_IOMMU_V2_H__
+#define __ETNAVIV_IOMMU_V2_H__
+
+#include <linux/iommu.h>
+struct etnaviv_gpu;
+
+struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
+
+#endif /* __ETNAVIV_IOMMU_V2_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
new file mode 100644
index 000000000000..6743bc648dc8
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "etnaviv_drv.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
+
+static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
+ unsigned long iova, int flags, void *arg)
+{
+ DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
+ return 0;
+}
+
+int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
+ struct sg_table *sgt, unsigned len, int prot)
+{
+ struct iommu_domain *domain = iommu->domain;
+ struct scatterlist *sg;
+ unsigned int da = iova;
+ unsigned int i, j;
+ int ret;
+
+ if (!domain || !sgt)
+ return -EINVAL;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ u32 pa = sg_dma_address(sg) - sg->offset;
+ size_t bytes = sg_dma_len(sg) + sg->offset;
+
+ VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+
+ ret = iommu_map(domain, da, pa, bytes, prot);
+ if (ret)
+ goto fail;
+
+ da += bytes;
+ }
+
+ return 0;
+
+fail:
+ da = iova;
+
+ for_each_sg(sgt->sgl, sg, i, j) {
+ size_t bytes = sg_dma_len(sg) + sg->offset;
+
+ iommu_unmap(domain, da, bytes);
+ da += bytes;
+ }
+ return ret;
+}
+
+int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
+ struct sg_table *sgt, unsigned len)
+{
+ struct iommu_domain *domain = iommu->domain;
+ struct scatterlist *sg;
+ unsigned int da = iova;
+ int i;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes = sg_dma_len(sg) + sg->offset;
+ size_t unmapped;
+
+ unmapped = iommu_unmap(domain, da, bytes);
+ if (unmapped < bytes)
+ return unmapped;
+
+ VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+
+ BUG_ON(!PAGE_ALIGNED(bytes));
+
+ da += bytes;
+ }
+
+ return 0;
+}
+
+static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
+ struct etnaviv_vram_mapping *mapping)
+{
+ struct etnaviv_gem_object *etnaviv_obj = mapping->object;
+
+ etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
+ etnaviv_obj->sgt, etnaviv_obj->base.size);
+ drm_mm_remove_node(&mapping->vram_node);
+}
+
+int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
+ struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
+ struct etnaviv_vram_mapping *mapping)
+{
+ struct etnaviv_vram_mapping *free = NULL;
+ struct sg_table *sgt = etnaviv_obj->sgt;
+ struct drm_mm_node *node;
+ int ret;
+
+ lockdep_assert_held(&etnaviv_obj->lock);
+
+ mutex_lock(&mmu->lock);
+
+ /* v1 MMU can optimize single entry (contiguous) scatterlists */
+ if (sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
+ u32 iova;
+
+ iova = sg_dma_address(sgt->sgl) - memory_base;
+ if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
+ mapping->iova = iova;
+ list_add_tail(&mapping->mmu_node, &mmu->mappings);
+ mutex_unlock(&mmu->lock);
+ return 0;
+ }
+ }
+
+ node = &mapping->vram_node;
+ while (1) {
+ struct etnaviv_vram_mapping *m, *n;
+ struct list_head list;
+ bool found;
+
+ ret = drm_mm_insert_node_in_range(&mmu->mm, node,
+ etnaviv_obj->base.size, 0, mmu->last_iova, ~0UL,
+ DRM_MM_SEARCH_DEFAULT);
+
+ if (ret != -ENOSPC)
+ break;
+
+ /*
+ * If we did not search from the start of the MMU region,
+ * try again in case there are free slots.
+ */
+ if (mmu->last_iova) {
+ mmu->last_iova = 0;
+ mmu->need_flush = true;
+ continue;
+ }
+
+ /* Try to retire some entries */
+ drm_mm_init_scan(&mmu->mm, etnaviv_obj->base.size, 0, 0);
+
+ found = 0;
+ INIT_LIST_HEAD(&list);
+ list_for_each_entry(free, &mmu->mappings, mmu_node) {
+ /* If this vram node has not been used, skip this. */
+ if (!free->vram_node.mm)
+ continue;
+
+ /*
+ * If the iova is pinned, then it's in-use,
+ * so we must keep its mapping.
+ */
+ if (free->use)
+ continue;
+
+ list_add(&free->scan_node, &list);
+ if (drm_mm_scan_add_block(&free->vram_node)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ /* Nothing found, clean up and fail */
+ list_for_each_entry_safe(m, n, &list, scan_node)
+ BUG_ON(drm_mm_scan_remove_block(&m->vram_node));
+ break;
+ }
+
+ /*
+ * drm_mm does not allow any other operations while
+ * scanning, so we have to remove all blocks first.
+ * If drm_mm_scan_remove_block() returns false, we
+ * can leave the block pinned.
+ */
+ list_for_each_entry_safe(m, n, &list, scan_node)
+ if (!drm_mm_scan_remove_block(&m->vram_node))
+ list_del_init(&m->scan_node);
+
+ /*
+ * Unmap the blocks which need to be reaped from the MMU.
+ * Clear the mmu pointer to prevent the get_iova finding
+ * this mapping.
+ */
+ list_for_each_entry_safe(m, n, &list, scan_node) {
+ etnaviv_iommu_remove_mapping(mmu, m);
+ m->mmu = NULL;
+ list_del_init(&m->mmu_node);
+ list_del_init(&m->scan_node);
+ }
+
+ /*
+ * We removed enough mappings so that the new allocation will
+ * succeed. Ensure that the MMU will be flushed before the
+ * associated commit requesting this mapping, and retry the
+ * allocation one more time.
+ */
+ mmu->need_flush = true;
+ }
+
+ if (ret < 0) {
+ mutex_unlock(&mmu->lock);
+ return ret;
+ }
+
+ mmu->last_iova = node->start + etnaviv_obj->base.size;
+ mapping->iova = node->start;
+ ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
+ IOMMU_READ | IOMMU_WRITE);
+
+ if (ret < 0) {
+ drm_mm_remove_node(node);
+ mutex_unlock(&mmu->lock);
+ return ret;
+ }
+
+ list_add_tail(&mapping->mmu_node, &mmu->mappings);
+ mutex_unlock(&mmu->lock);
+
+ return ret;
+}
+
+void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
+ struct etnaviv_vram_mapping *mapping)
+{
+ WARN_ON(mapping->use);
+
+ mutex_lock(&mmu->lock);
+
+ /* If the vram node is on the mm, unmap and remove the node */
+ if (mapping->vram_node.mm == &mmu->mm)
+ etnaviv_iommu_remove_mapping(mmu, mapping);
+
+ list_del(&mapping->mmu_node);
+ mutex_unlock(&mmu->lock);
+}
+
+void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
+{
+ drm_mm_takedown(&mmu->mm);
+ iommu_domain_free(mmu->domain);
+ kfree(mmu);
+}
+
+struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
+ struct iommu_domain *domain, enum etnaviv_iommu_version version)
+{
+ struct etnaviv_iommu *mmu;
+
+ mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
+ if (!mmu)
+ return ERR_PTR(-ENOMEM);
+
+ mmu->domain = domain;
+ mmu->gpu = gpu;
+ mmu->version = version;
+ mutex_init(&mmu->lock);
+ INIT_LIST_HEAD(&mmu->mappings);
+
+ drm_mm_init(&mmu->mm, domain->geometry.aperture_start,
+ domain->geometry.aperture_end -
+ domain->geometry.aperture_start + 1);
+
+ iommu_set_fault_handler(domain, etnaviv_fault_handler, gpu->dev);
+
+ return mmu;
+}
+
+size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
+{
+ struct etnaviv_iommu_ops *ops;
+
+ ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
+
+ return ops->dump_size(iommu->domain);
+}
+
+void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
+{
+ struct etnaviv_iommu_ops *ops;
+
+ ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
+
+ ops->dump(iommu->domain, buf);
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
new file mode 100644
index 000000000000..fff215a47630
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_MMU_H__
+#define __ETNAVIV_MMU_H__
+
+#include <linux/iommu.h>
+
+enum etnaviv_iommu_version {
+ ETNAVIV_IOMMU_V1 = 0,
+ ETNAVIV_IOMMU_V2,
+};
+
+struct etnaviv_gpu;
+struct etnaviv_vram_mapping;
+
+struct etnaviv_iommu_ops {
+ struct iommu_ops ops;
+ size_t (*dump_size)(struct iommu_domain *);
+ void (*dump)(struct iommu_domain *, void *);
+};
+
+struct etnaviv_iommu {
+ struct etnaviv_gpu *gpu;
+ struct iommu_domain *domain;
+
+ enum etnaviv_iommu_version version;
+
+ /* memory manager for GPU address area */
+ struct mutex lock;
+ struct list_head mappings;
+ struct drm_mm mm;
+ u32 last_iova;
+ bool need_flush;
+};
+
+struct etnaviv_gem_object;
+
+int etnaviv_iommu_attach(struct etnaviv_iommu *iommu, const char **names,
+ int cnt);
+int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
+ struct sg_table *sgt, unsigned len, int prot);
+int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
+ struct sg_table *sgt, unsigned len);
+int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
+ struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
+ struct etnaviv_vram_mapping *mapping);
+void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
+ struct etnaviv_vram_mapping *mapping);
+void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
+
+size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
+void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
+
+struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
+ struct iommu_domain *domain, enum etnaviv_iommu_version version);
+
+#endif /* __ETNAVIV_MMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/state.xml.h b/drivers/gpu/drm/etnaviv/state.xml.h
new file mode 100644
index 000000000000..368218304566
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/state.xml.h
@@ -0,0 +1,351 @@
+#ifndef STATE_XML
+#define STATE_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- state.xml ( 18882 bytes, from 2015-03-25 11:42:32)
+- common.xml ( 18437 bytes, from 2015-03-25 11:27:41)
+- state_hi.xml ( 23420 bytes, from 2015-03-25 11:47:21)
+- state_2d.xml ( 51549 bytes, from 2015-03-25 11:25:06)
+- state_3d.xml ( 54600 bytes, from 2015-03-25 11:25:19)
+- state_vg.xml ( 5973 bytes, from 2015-03-25 11:26:01)
+
+Copyright (C) 2015
+*/
+
+
+#define VARYING_COMPONENT_USE_UNUSED 0x00000000
+#define VARYING_COMPONENT_USE_USED 0x00000001
+#define VARYING_COMPONENT_USE_POINTCOORD_X 0x00000002
+#define VARYING_COMPONENT_USE_POINTCOORD_Y 0x00000003
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK 0x000000ff
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT 0
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE(x) (((x) << FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT) & FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK)
+#define VIVS_FE 0x00000000
+
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG(i0) (0x00000600 + 0x4*(i0))
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG__ESIZE 0x00000004
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG__LEN 0x00000010
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__MASK 0x0000000f
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__SHIFT 0
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_BYTE 0x00000000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_BYTE 0x00000001
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_SHORT 0x00000002
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_SHORT 0x00000003
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT 0x00000004
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT 0x00000005
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FLOAT 0x00000008
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_HALF_FLOAT 0x00000009
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FIXED 0x0000000b
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT_10_10_10_2 0x0000000c
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT_10_10_10_2 0x0000000d
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK 0x00000030
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT 4
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK)
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NONCONSECUTIVE 0x00000080
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK 0x00000700
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT 8
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK)
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK 0x00003000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT 12
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK)
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__MASK 0x0000c000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__SHIFT 14
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_OFF 0x00000000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_ON 0x00008000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK 0x00ff0000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT 16
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK)
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK 0xff000000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT 24
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK)
+
+#define VIVS_FE_CMD_STREAM_BASE_ADDR 0x00000640
+
+#define VIVS_FE_INDEX_STREAM_BASE_ADDR 0x00000644
+
+#define VIVS_FE_INDEX_STREAM_CONTROL 0x00000648
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__MASK 0x00000003
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__SHIFT 0
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_CHAR 0x00000000
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_SHORT 0x00000001
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_INT 0x00000002
+
+#define VIVS_FE_VERTEX_STREAM_BASE_ADDR 0x0000064c
+
+#define VIVS_FE_VERTEX_STREAM_CONTROL 0x00000650
+
+#define VIVS_FE_COMMAND_ADDRESS 0x00000654
+
+#define VIVS_FE_COMMAND_CONTROL 0x00000658
+#define VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK 0x0000ffff
+#define VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT 0
+#define VIVS_FE_COMMAND_CONTROL_PREFETCH(x) (((x) << VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT) & VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK)
+#define VIVS_FE_COMMAND_CONTROL_ENABLE 0x00010000
+
+#define VIVS_FE_DMA_STATUS 0x0000065c
+
+#define VIVS_FE_DMA_DEBUG_STATE 0x00000660
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__MASK 0x0000001f
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__SHIFT 0
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_IDLE 0x00000000
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DEC 0x00000001
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR0 0x00000002
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD0 0x00000003
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR1 0x00000004
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD1 0x00000005
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DADR 0x00000006
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCMD 0x00000007
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCNTL 0x00000008
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DIDXCNTL 0x00000009
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_INITREQDMA 0x0000000a
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAWIDX 0x0000000b
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAW 0x0000000c
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT0 0x0000000d
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT1 0x0000000e
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA0 0x0000000f
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA1 0x00000010
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAITFIFO 0x00000011
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAIT 0x00000012
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LINK 0x00000013
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_END 0x00000014
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_STALL 0x00000015
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__MASK 0x00000300
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__SHIFT 8
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_IDLE 0x00000000
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_START 0x00000100
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_REQ 0x00000200
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_END 0x00000300
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__MASK 0x00000c00
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__SHIFT 10
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_IDLE 0x00000000
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_RAMVALID 0x00000400
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_VALID 0x00000800
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__MASK 0x00003000
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__SHIFT 12
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_IDLE 0x00000000
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_WAITIDX 0x00001000
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_CAL 0x00002000
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__MASK 0x0000c000
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__SHIFT 14
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDLE 0x00000000
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_LDADR 0x00004000
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDXCALC 0x00008000
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__MASK 0x00030000
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__SHIFT 16
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_IDLE 0x00000000
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_CKCACHE 0x00010000
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_MISS 0x00020000
+
+#define VIVS_FE_DMA_ADDRESS 0x00000664
+
+#define VIVS_FE_DMA_LOW 0x00000668
+
+#define VIVS_FE_DMA_HIGH 0x0000066c
+
+#define VIVS_FE_AUTO_FLUSH 0x00000670
+
+#define VIVS_FE_UNK00678 0x00000678
+
+#define VIVS_FE_UNK0067C 0x0000067c
+
+#define VIVS_FE_VERTEX_STREAMS(i0) (0x00000000 + 0x4*(i0))
+#define VIVS_FE_VERTEX_STREAMS__ESIZE 0x00000004
+#define VIVS_FE_VERTEX_STREAMS__LEN 0x00000008
+
+#define VIVS_FE_VERTEX_STREAMS_BASE_ADDR(i0) (0x00000680 + 0x4*(i0))
+
+#define VIVS_FE_VERTEX_STREAMS_CONTROL(i0) (0x000006a0 + 0x4*(i0))
+
+#define VIVS_FE_UNK00700(i0) (0x00000700 + 0x4*(i0))
+#define VIVS_FE_UNK00700__ESIZE 0x00000004
+#define VIVS_FE_UNK00700__LEN 0x00000010
+
+#define VIVS_FE_UNK00740(i0) (0x00000740 + 0x4*(i0))
+#define VIVS_FE_UNK00740__ESIZE 0x00000004
+#define VIVS_FE_UNK00740__LEN 0x00000010
+
+#define VIVS_FE_UNK00780(i0) (0x00000780 + 0x4*(i0))
+#define VIVS_FE_UNK00780__ESIZE 0x00000004
+#define VIVS_FE_UNK00780__LEN 0x00000010
+
+#define VIVS_GL 0x00000000
+
+#define VIVS_GL_PIPE_SELECT 0x00003800
+#define VIVS_GL_PIPE_SELECT_PIPE__MASK 0x00000001
+#define VIVS_GL_PIPE_SELECT_PIPE__SHIFT 0
+#define VIVS_GL_PIPE_SELECT_PIPE(x) (((x) << VIVS_GL_PIPE_SELECT_PIPE__SHIFT) & VIVS_GL_PIPE_SELECT_PIPE__MASK)
+
+#define VIVS_GL_EVENT 0x00003804
+#define VIVS_GL_EVENT_EVENT_ID__MASK 0x0000001f
+#define VIVS_GL_EVENT_EVENT_ID__SHIFT 0
+#define VIVS_GL_EVENT_EVENT_ID(x) (((x) << VIVS_GL_EVENT_EVENT_ID__SHIFT) & VIVS_GL_EVENT_EVENT_ID__MASK)
+#define VIVS_GL_EVENT_FROM_FE 0x00000020
+#define VIVS_GL_EVENT_FROM_PE 0x00000040
+#define VIVS_GL_EVENT_SOURCE__MASK 0x00001f00
+#define VIVS_GL_EVENT_SOURCE__SHIFT 8
+#define VIVS_GL_EVENT_SOURCE(x) (((x) << VIVS_GL_EVENT_SOURCE__SHIFT) & VIVS_GL_EVENT_SOURCE__MASK)
+
+#define VIVS_GL_SEMAPHORE_TOKEN 0x00003808
+#define VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK 0x0000001f
+#define VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT 0
+#define VIVS_GL_SEMAPHORE_TOKEN_FROM(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK)
+#define VIVS_GL_SEMAPHORE_TOKEN_TO__MASK 0x00001f00
+#define VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT 8
+#define VIVS_GL_SEMAPHORE_TOKEN_TO(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_TO__MASK)
+
+#define VIVS_GL_FLUSH_CACHE 0x0000380c
+#define VIVS_GL_FLUSH_CACHE_DEPTH 0x00000001
+#define VIVS_GL_FLUSH_CACHE_COLOR 0x00000002
+#define VIVS_GL_FLUSH_CACHE_TEXTURE 0x00000004
+#define VIVS_GL_FLUSH_CACHE_PE2D 0x00000008
+#define VIVS_GL_FLUSH_CACHE_TEXTUREVS 0x00000010
+#define VIVS_GL_FLUSH_CACHE_SHADER_L1 0x00000020
+#define VIVS_GL_FLUSH_CACHE_SHADER_L2 0x00000040
+
+#define VIVS_GL_FLUSH_MMU 0x00003810
+#define VIVS_GL_FLUSH_MMU_FLUSH_FEMMU 0x00000001
+#define VIVS_GL_FLUSH_MMU_FLUSH_UNK1 0x00000002
+#define VIVS_GL_FLUSH_MMU_FLUSH_UNK2 0x00000004
+#define VIVS_GL_FLUSH_MMU_FLUSH_PEMMU 0x00000008
+#define VIVS_GL_FLUSH_MMU_FLUSH_UNK4 0x00000010
+
+#define VIVS_GL_VERTEX_ELEMENT_CONFIG 0x00003814
+
+#define VIVS_GL_MULTI_SAMPLE_CONFIG 0x00003818
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__MASK 0x00000003
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__SHIFT 0
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_NONE 0x00000000
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_2X 0x00000001
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_4X 0x00000002
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_MASK 0x00000008
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK 0x000000f0
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT 4
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK)
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES_MASK 0x00000100
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK 0x00007000
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT 12
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK)
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12_MASK 0x00008000
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK 0x00030000
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT 16
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK)
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16_MASK 0x00080000
+
+#define VIVS_GL_VARYING_TOTAL_COMPONENTS 0x0000381c
+#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK 0x000000ff
+#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT 0
+#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM(x) (((x) << VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT) & VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK)
+
+#define VIVS_GL_VARYING_NUM_COMPONENTS 0x00003820
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK 0x00000007
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT 0
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK)
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK 0x00000070
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT 4
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK)
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK 0x00000700
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT 8
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK)
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK 0x00007000
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT 12
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK)
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK 0x00070000
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT 16
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK)
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK 0x00700000
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT 20
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK)
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK 0x07000000
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT 24
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK)
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK 0x70000000
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT 28
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK)
+
+#define VIVS_GL_VARYING_COMPONENT_USE(i0) (0x00003828 + 0x4*(i0))
+#define VIVS_GL_VARYING_COMPONENT_USE__ESIZE 0x00000004
+#define VIVS_GL_VARYING_COMPONENT_USE__LEN 0x00000002
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK 0x00000003
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT 0
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP0(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK 0x0000000c
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT 2
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP1(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK 0x00000030
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT 4
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP2(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK 0x000000c0
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT 6
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP3(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK 0x00000300
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT 8
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP4(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK 0x00000c00
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT 10
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP5(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK 0x00003000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT 12
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP6(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK 0x0000c000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT 14
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP7(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK 0x00030000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT 16
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP8(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK 0x000c0000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT 18
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP9(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK 0x00300000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT 20
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP10(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK 0x00c00000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT 22
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP11(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK 0x03000000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT 24
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP12(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK 0x0c000000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT 26
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP13(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK 0x30000000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT 28
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP14(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK 0xc0000000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT 30
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP15(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK)
+
+#define VIVS_GL_UNK03834 0x00003834
+
+#define VIVS_GL_UNK03838 0x00003838
+
+#define VIVS_GL_API_MODE 0x0000384c
+#define VIVS_GL_API_MODE_OPENGL 0x00000000
+#define VIVS_GL_API_MODE_OPENVG 0x00000001
+#define VIVS_GL_API_MODE_OPENCL 0x00000002
+
+#define VIVS_GL_CONTEXT_POINTER 0x00003850
+
+#define VIVS_GL_UNK03A00 0x00003a00
+
+#define VIVS_GL_STALL_TOKEN 0x00003c00
+#define VIVS_GL_STALL_TOKEN_FROM__MASK 0x0000001f
+#define VIVS_GL_STALL_TOKEN_FROM__SHIFT 0
+#define VIVS_GL_STALL_TOKEN_FROM(x) (((x) << VIVS_GL_STALL_TOKEN_FROM__SHIFT) & VIVS_GL_STALL_TOKEN_FROM__MASK)
+#define VIVS_GL_STALL_TOKEN_TO__MASK 0x00001f00
+#define VIVS_GL_STALL_TOKEN_TO__SHIFT 8
+#define VIVS_GL_STALL_TOKEN_TO(x) (((x) << VIVS_GL_STALL_TOKEN_TO__SHIFT) & VIVS_GL_STALL_TOKEN_TO__MASK)
+#define VIVS_GL_STALL_TOKEN_FLIP0 0x40000000
+#define VIVS_GL_STALL_TOKEN_FLIP1 0x80000000
+
+#define VIVS_DUMMY 0x00000000
+
+#define VIVS_DUMMY_DUMMY 0x0003fffc
+
+
+#endif /* STATE_XML */
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
new file mode 100644
index 000000000000..0064f2640396
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -0,0 +1,407 @@
+#ifndef STATE_HI_XML
+#define STATE_HI_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- state_hi.xml ( 23420 bytes, from 2015-03-25 11:47:21)
+- common.xml ( 18437 bytes, from 2015-03-25 11:27:41)
+
+Copyright (C) 2015
+*/
+
+
+#define MMU_EXCEPTION_SLAVE_NOT_PRESENT 0x00000001
+#define MMU_EXCEPTION_PAGE_NOT_PRESENT 0x00000002
+#define MMU_EXCEPTION_WRITE_VIOLATION 0x00000003
+#define VIVS_HI 0x00000000
+
+#define VIVS_HI_CLOCK_CONTROL 0x00000000
+#define VIVS_HI_CLOCK_CONTROL_CLK3D_DIS 0x00000001
+#define VIVS_HI_CLOCK_CONTROL_CLK2D_DIS 0x00000002
+#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK 0x000001fc
+#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__SHIFT 2
+#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(x) (((x) << VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__SHIFT) & VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK)
+#define VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD 0x00000200
+#define VIVS_HI_CLOCK_CONTROL_DISABLE_RAM_CLK_GATING 0x00000400
+#define VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS 0x00000800
+#define VIVS_HI_CLOCK_CONTROL_SOFT_RESET 0x00001000
+#define VIVS_HI_CLOCK_CONTROL_IDLE_3D 0x00010000
+#define VIVS_HI_CLOCK_CONTROL_IDLE_2D 0x00020000
+#define VIVS_HI_CLOCK_CONTROL_IDLE_VG 0x00040000
+#define VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU 0x00080000
+#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK 0x00f00000
+#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__SHIFT 20
+#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(x) (((x) << VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__SHIFT) & VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK)
+
+#define VIVS_HI_IDLE_STATE 0x00000004
+#define VIVS_HI_IDLE_STATE_FE 0x00000001
+#define VIVS_HI_IDLE_STATE_DE 0x00000002
+#define VIVS_HI_IDLE_STATE_PE 0x00000004
+#define VIVS_HI_IDLE_STATE_SH 0x00000008
+#define VIVS_HI_IDLE_STATE_PA 0x00000010
+#define VIVS_HI_IDLE_STATE_SE 0x00000020
+#define VIVS_HI_IDLE_STATE_RA 0x00000040
+#define VIVS_HI_IDLE_STATE_TX 0x00000080
+#define VIVS_HI_IDLE_STATE_VG 0x00000100
+#define VIVS_HI_IDLE_STATE_IM 0x00000200
+#define VIVS_HI_IDLE_STATE_FP 0x00000400
+#define VIVS_HI_IDLE_STATE_TS 0x00000800
+#define VIVS_HI_IDLE_STATE_AXI_LP 0x80000000
+
+#define VIVS_HI_AXI_CONFIG 0x00000008
+#define VIVS_HI_AXI_CONFIG_AWID__MASK 0x0000000f
+#define VIVS_HI_AXI_CONFIG_AWID__SHIFT 0
+#define VIVS_HI_AXI_CONFIG_AWID(x) (((x) << VIVS_HI_AXI_CONFIG_AWID__SHIFT) & VIVS_HI_AXI_CONFIG_AWID__MASK)
+#define VIVS_HI_AXI_CONFIG_ARID__MASK 0x000000f0
+#define VIVS_HI_AXI_CONFIG_ARID__SHIFT 4
+#define VIVS_HI_AXI_CONFIG_ARID(x) (((x) << VIVS_HI_AXI_CONFIG_ARID__SHIFT) & VIVS_HI_AXI_CONFIG_ARID__MASK)
+#define VIVS_HI_AXI_CONFIG_AWCACHE__MASK 0x00000f00
+#define VIVS_HI_AXI_CONFIG_AWCACHE__SHIFT 8
+#define VIVS_HI_AXI_CONFIG_AWCACHE(x) (((x) << VIVS_HI_AXI_CONFIG_AWCACHE__SHIFT) & VIVS_HI_AXI_CONFIG_AWCACHE__MASK)
+#define VIVS_HI_AXI_CONFIG_ARCACHE__MASK 0x0000f000
+#define VIVS_HI_AXI_CONFIG_ARCACHE__SHIFT 12
+#define VIVS_HI_AXI_CONFIG_ARCACHE(x) (((x) << VIVS_HI_AXI_CONFIG_ARCACHE__SHIFT) & VIVS_HI_AXI_CONFIG_ARCACHE__MASK)
+
+#define VIVS_HI_AXI_STATUS 0x0000000c
+#define VIVS_HI_AXI_STATUS_WR_ERR_ID__MASK 0x0000000f
+#define VIVS_HI_AXI_STATUS_WR_ERR_ID__SHIFT 0
+#define VIVS_HI_AXI_STATUS_WR_ERR_ID(x) (((x) << VIVS_HI_AXI_STATUS_WR_ERR_ID__SHIFT) & VIVS_HI_AXI_STATUS_WR_ERR_ID__MASK)
+#define VIVS_HI_AXI_STATUS_RD_ERR_ID__MASK 0x000000f0
+#define VIVS_HI_AXI_STATUS_RD_ERR_ID__SHIFT 4
+#define VIVS_HI_AXI_STATUS_RD_ERR_ID(x) (((x) << VIVS_HI_AXI_STATUS_RD_ERR_ID__SHIFT) & VIVS_HI_AXI_STATUS_RD_ERR_ID__MASK)
+#define VIVS_HI_AXI_STATUS_DET_WR_ERR 0x00000100
+#define VIVS_HI_AXI_STATUS_DET_RD_ERR 0x00000200
+
+#define VIVS_HI_INTR_ACKNOWLEDGE 0x00000010
+#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK 0x7fffffff
+#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT 0
+#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC(x) (((x) << VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT) & VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK)
+#define VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR 0x80000000
+
+#define VIVS_HI_INTR_ENBL 0x00000014
+#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__MASK 0xffffffff
+#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__SHIFT 0
+#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC(x) (((x) << VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__SHIFT) & VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__MASK)
+
+#define VIVS_HI_CHIP_IDENTITY 0x00000018
+#define VIVS_HI_CHIP_IDENTITY_FAMILY__MASK 0xff000000
+#define VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT 24
+#define VIVS_HI_CHIP_IDENTITY_FAMILY(x) (((x) << VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT) & VIVS_HI_CHIP_IDENTITY_FAMILY__MASK)
+#define VIVS_HI_CHIP_IDENTITY_PRODUCT__MASK 0x00ff0000
+#define VIVS_HI_CHIP_IDENTITY_PRODUCT__SHIFT 16
+#define VIVS_HI_CHIP_IDENTITY_PRODUCT(x) (((x) << VIVS_HI_CHIP_IDENTITY_PRODUCT__SHIFT) & VIVS_HI_CHIP_IDENTITY_PRODUCT__MASK)
+#define VIVS_HI_CHIP_IDENTITY_REVISION__MASK 0x0000f000
+#define VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT 12
+#define VIVS_HI_CHIP_IDENTITY_REVISION(x) (((x) << VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT) & VIVS_HI_CHIP_IDENTITY_REVISION__MASK)
+
+#define VIVS_HI_CHIP_FEATURE 0x0000001c
+
+#define VIVS_HI_CHIP_MODEL 0x00000020
+
+#define VIVS_HI_CHIP_REV 0x00000024
+
+#define VIVS_HI_CHIP_DATE 0x00000028
+
+#define VIVS_HI_CHIP_TIME 0x0000002c
+
+#define VIVS_HI_CHIP_MINOR_FEATURE_0 0x00000034
+
+#define VIVS_HI_CACHE_CONTROL 0x00000038
+
+#define VIVS_HI_MEMORY_COUNTER_RESET 0x0000003c
+
+#define VIVS_HI_PROFILE_READ_BYTES8 0x00000040
+
+#define VIVS_HI_PROFILE_WRITE_BYTES8 0x00000044
+
+#define VIVS_HI_CHIP_SPECS 0x00000048
+#define VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK 0x0000000f
+#define VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT 0
+#define VIVS_HI_CHIP_SPECS_STREAM_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK)
+#define VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK 0x000000f0
+#define VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT 4
+#define VIVS_HI_CHIP_SPECS_REGISTER_MAX(x) (((x) << VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT) & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK)
+#define VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK 0x00000f00
+#define VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT 8
+#define VIVS_HI_CHIP_SPECS_THREAD_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK)
+#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK 0x0001f000
+#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT 12
+#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK)
+#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK 0x01f00000
+#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT 20
+#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK)
+#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK 0x0e000000
+#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT 25
+#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES(x) (((x) << VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT) & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK)
+#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK 0xf0000000
+#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT 28
+#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK)
+
+#define VIVS_HI_PROFILE_WRITE_BURSTS 0x0000004c
+
+#define VIVS_HI_PROFILE_WRITE_REQUESTS 0x00000050
+
+#define VIVS_HI_PROFILE_READ_BURSTS 0x00000058
+
+#define VIVS_HI_PROFILE_READ_REQUESTS 0x0000005c
+
+#define VIVS_HI_PROFILE_READ_LASTS 0x00000060
+
+#define VIVS_HI_GP_OUT0 0x00000064
+
+#define VIVS_HI_GP_OUT1 0x00000068
+
+#define VIVS_HI_GP_OUT2 0x0000006c
+
+#define VIVS_HI_AXI_CONTROL 0x00000070
+#define VIVS_HI_AXI_CONTROL_WR_FULL_BURST_MODE 0x00000001
+
+#define VIVS_HI_CHIP_MINOR_FEATURE_1 0x00000074
+
+#define VIVS_HI_PROFILE_TOTAL_CYCLES 0x00000078
+
+#define VIVS_HI_PROFILE_IDLE_CYCLES 0x0000007c
+
+#define VIVS_HI_CHIP_SPECS_2 0x00000080
+#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK 0x000000ff
+#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT 0
+#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK)
+#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK 0x0000ff00
+#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT 8
+#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK)
+#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK 0xffff0000
+#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT 16
+#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS(x) (((x) << VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT) & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK)
+
+#define VIVS_HI_CHIP_MINOR_FEATURE_2 0x00000084
+
+#define VIVS_HI_CHIP_MINOR_FEATURE_3 0x00000088
+
+#define VIVS_HI_CHIP_MINOR_FEATURE_4 0x00000094
+
+#define VIVS_PM 0x00000000
+
+#define VIVS_PM_POWER_CONTROLS 0x00000100
+#define VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING 0x00000001
+#define VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING 0x00000002
+#define VIVS_PM_POWER_CONTROLS_DISABLE_STARVE_MODULE_CLOCK_GATING 0x00000004
+#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__MASK 0x000000f0
+#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__SHIFT 4
+#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER(x) (((x) << VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__SHIFT) & VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__MASK)
+#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__MASK 0xffff0000
+#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__SHIFT 16
+#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER(x) (((x) << VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__SHIFT) & VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__MASK)
+
+#define VIVS_PM_MODULE_CONTROLS 0x00000104
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_FE 0x00000001
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_DE 0x00000002
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE 0x00000004
+
+#define VIVS_PM_MODULE_STATUS 0x00000108
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_DE 0x00000002
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PE 0x00000004
+
+#define VIVS_PM_PULSE_EATER 0x0000010c
+
+#define VIVS_MMUv2 0x00000000
+
+#define VIVS_MMUv2_SAFE_ADDRESS 0x00000180
+
+#define VIVS_MMUv2_CONFIGURATION 0x00000184
+#define VIVS_MMUv2_CONFIGURATION_MODE__MASK 0x00000001
+#define VIVS_MMUv2_CONFIGURATION_MODE__SHIFT 0
+#define VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K 0x00000000
+#define VIVS_MMUv2_CONFIGURATION_MODE_MODE1_K 0x00000001
+#define VIVS_MMUv2_CONFIGURATION_MODE_MASK 0x00000008
+#define VIVS_MMUv2_CONFIGURATION_FLUSH__MASK 0x00000010
+#define VIVS_MMUv2_CONFIGURATION_FLUSH__SHIFT 4
+#define VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH 0x00000010
+#define VIVS_MMUv2_CONFIGURATION_FLUSH_MASK 0x00000080
+#define VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK 0x00000100
+#define VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK 0xfffffc00
+#define VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT 10
+#define VIVS_MMUv2_CONFIGURATION_ADDRESS(x) (((x) << VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT) & VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK)
+
+#define VIVS_MMUv2_STATUS 0x00000188
+#define VIVS_MMUv2_STATUS_EXCEPTION0__MASK 0x00000003
+#define VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT 0
+#define VIVS_MMUv2_STATUS_EXCEPTION0(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION0__MASK)
+#define VIVS_MMUv2_STATUS_EXCEPTION1__MASK 0x00000030
+#define VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT 4
+#define VIVS_MMUv2_STATUS_EXCEPTION1(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION1__MASK)
+#define VIVS_MMUv2_STATUS_EXCEPTION2__MASK 0x00000300
+#define VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT 8
+#define VIVS_MMUv2_STATUS_EXCEPTION2(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION2__MASK)
+#define VIVS_MMUv2_STATUS_EXCEPTION3__MASK 0x00003000
+#define VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT 12
+#define VIVS_MMUv2_STATUS_EXCEPTION3(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION3__MASK)
+
+#define VIVS_MMUv2_CONTROL 0x0000018c
+#define VIVS_MMUv2_CONTROL_ENABLE 0x00000001
+
+#define VIVS_MMUv2_EXCEPTION_ADDR(i0) (0x00000190 + 0x4*(i0))
+#define VIVS_MMUv2_EXCEPTION_ADDR__ESIZE 0x00000004
+#define VIVS_MMUv2_EXCEPTION_ADDR__LEN 0x00000004
+
+#define VIVS_MC 0x00000000
+
+#define VIVS_MC_MMU_FE_PAGE_TABLE 0x00000400
+
+#define VIVS_MC_MMU_TX_PAGE_TABLE 0x00000404
+
+#define VIVS_MC_MMU_PE_PAGE_TABLE 0x00000408
+
+#define VIVS_MC_MMU_PEZ_PAGE_TABLE 0x0000040c
+
+#define VIVS_MC_MMU_RA_PAGE_TABLE 0x00000410
+
+#define VIVS_MC_DEBUG_MEMORY 0x00000414
+#define VIVS_MC_DEBUG_MEMORY_SPECIAL_PATCH_GC320 0x00000008
+#define VIVS_MC_DEBUG_MEMORY_FAST_CLEAR_BYPASS 0x00100000
+#define VIVS_MC_DEBUG_MEMORY_COMPRESSION_BYPASS 0x00200000
+
+#define VIVS_MC_MEMORY_BASE_ADDR_RA 0x00000418
+
+#define VIVS_MC_MEMORY_BASE_ADDR_FE 0x0000041c
+
+#define VIVS_MC_MEMORY_BASE_ADDR_TX 0x00000420
+
+#define VIVS_MC_MEMORY_BASE_ADDR_PEZ 0x00000424
+
+#define VIVS_MC_MEMORY_BASE_ADDR_PE 0x00000428
+
+#define VIVS_MC_MEMORY_TIMING_CONTROL 0x0000042c
+
+#define VIVS_MC_MEMORY_FLUSH 0x00000430
+
+#define VIVS_MC_PROFILE_CYCLE_COUNTER 0x00000438
+
+#define VIVS_MC_DEBUG_READ0 0x0000043c
+
+#define VIVS_MC_DEBUG_READ1 0x00000440
+
+#define VIVS_MC_DEBUG_WRITE 0x00000444
+
+#define VIVS_MC_PROFILE_RA_READ 0x00000448
+
+#define VIVS_MC_PROFILE_TX_READ 0x0000044c
+
+#define VIVS_MC_PROFILE_FE_READ 0x00000450
+
+#define VIVS_MC_PROFILE_PE_READ 0x00000454
+
+#define VIVS_MC_PROFILE_DE_READ 0x00000458
+
+#define VIVS_MC_PROFILE_SH_READ 0x0000045c
+
+#define VIVS_MC_PROFILE_PA_READ 0x00000460
+
+#define VIVS_MC_PROFILE_SE_READ 0x00000464
+
+#define VIVS_MC_PROFILE_MC_READ 0x00000468
+
+#define VIVS_MC_PROFILE_HI_READ 0x0000046c
+
+#define VIVS_MC_PROFILE_CONFIG0 0x00000470
+#define VIVS_MC_PROFILE_CONFIG0_FE__MASK 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG0_FE__SHIFT 0
+#define VIVS_MC_PROFILE_CONFIG0_FE_RESET 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG0_DE__MASK 0x00000f00
+#define VIVS_MC_PROFILE_CONFIG0_DE__SHIFT 8
+#define VIVS_MC_PROFILE_CONFIG0_DE_RESET 0x00000f00
+#define VIVS_MC_PROFILE_CONFIG0_PE__MASK 0x000f0000
+#define VIVS_MC_PROFILE_CONFIG0_PE__SHIFT 16
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_COLOR_PIPE 0x00000000
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_DEPTH_PIPE 0x00010000
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_COLOR_PIPE 0x00020000
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE 0x00030000
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXELS_RENDERED_2D 0x000b0000
+#define VIVS_MC_PROFILE_CONFIG0_PE_RESET 0x000f0000
+#define VIVS_MC_PROFILE_CONFIG0_SH__MASK 0x0f000000
+#define VIVS_MC_PROFILE_CONFIG0_SH__SHIFT 24
+#define VIVS_MC_PROFILE_CONFIG0_SH_SHADER_CYCLES 0x04000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_PS_INST_COUNTER 0x07000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_PIXEL_COUNTER 0x08000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_VS_INST_COUNTER 0x09000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_VERTICE_COUNTER 0x0a000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_VTX_BRANCH_INST_COUNTER 0x0b000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_VTX_TEXLD_INST_COUNTER 0x0c000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_PXL_BRANCH_INST_COUNTER 0x0d000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_PXL_TEXLD_INST_COUNTER 0x0e000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_RESET 0x0f000000
+
+#define VIVS_MC_PROFILE_CONFIG1 0x00000474
+#define VIVS_MC_PROFILE_CONFIG1_PA__MASK 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG1_PA__SHIFT 0
+#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_VTX_COUNTER 0x00000003
+#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_PRIM_COUNTER 0x00000004
+#define VIVS_MC_PROFILE_CONFIG1_PA_OUTPUT_PRIM_COUNTER 0x00000005
+#define VIVS_MC_PROFILE_CONFIG1_PA_DEPTH_CLIPPED_COUNTER 0x00000006
+#define VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER 0x00000007
+#define VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER 0x00000008
+#define VIVS_MC_PROFILE_CONFIG1_PA_RESET 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG1_SE__MASK 0x00000f00
+#define VIVS_MC_PROFILE_CONFIG1_SE__SHIFT 8
+#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_TRIANGLE_COUNT 0x00000000
+#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_LINES_COUNT 0x00000100
+#define VIVS_MC_PROFILE_CONFIG1_SE_RESET 0x00000f00
+#define VIVS_MC_PROFILE_CONFIG1_RA__MASK 0x000f0000
+#define VIVS_MC_PROFILE_CONFIG1_RA__SHIFT 16
+#define VIVS_MC_PROFILE_CONFIG1_RA_VALID_PIXEL_COUNT 0x00000000
+#define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_QUAD_COUNT 0x00010000
+#define VIVS_MC_PROFILE_CONFIG1_RA_VALID_QUAD_COUNT_AFTER_EARLY_Z 0x00020000
+#define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_PRIMITIVE_COUNT 0x00030000
+#define VIVS_MC_PROFILE_CONFIG1_RA_PIPE_CACHE_MISS_COUNTER 0x00090000
+#define VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_CACHE_MISS_COUNTER 0x000a0000
+#define VIVS_MC_PROFILE_CONFIG1_RA_CULLED_QUAD_COUNT 0x000b0000
+#define VIVS_MC_PROFILE_CONFIG1_RA_RESET 0x000f0000
+#define VIVS_MC_PROFILE_CONFIG1_TX__MASK 0x0f000000
+#define VIVS_MC_PROFILE_CONFIG1_TX__SHIFT 24
+#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_BILINEAR_REQUESTS 0x00000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TRILINEAR_REQUESTS 0x01000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_DISCARDED_TEXTURE_REQUESTS 0x02000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TEXTURE_REQUESTS 0x03000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_UNKNOWN 0x04000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_COUNT 0x05000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_IN_8B_COUNT 0x06000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_COUNT 0x07000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_HIT_TEXEL_COUNT 0x08000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_TEXEL_COUNT 0x09000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_RESET 0x0f000000
+
+#define VIVS_MC_PROFILE_CONFIG2 0x00000478
+#define VIVS_MC_PROFILE_CONFIG2_MC__MASK 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG2_MC__SHIFT 0
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_PIPELINE 0x00000001
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_IP 0x00000002
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_PIPELINE 0x00000003
+#define VIVS_MC_PROFILE_CONFIG2_MC_RESET 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG2_HI__MASK 0x00000f00
+#define VIVS_MC_PROFILE_CONFIG2_HI__SHIFT 8
+#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_READ_REQUEST_STALLED 0x00000000
+#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_REQUEST_STALLED 0x00000100
+#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_DATA_STALLED 0x00000200
+#define VIVS_MC_PROFILE_CONFIG2_HI_RESET 0x00000f00
+
+#define VIVS_MC_PROFILE_CONFIG3 0x0000047c
+
+#define VIVS_MC_BUS_CONFIG 0x00000480
+#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK 0x0000000f
+#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__SHIFT 0
+#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(x) (((x) << VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__SHIFT) & VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK)
+#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK 0x000000f0
+#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__SHIFT 4
+#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(x) (((x) << VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__SHIFT) & VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK)
+
+#define VIVS_MC_START_COMPOSITION 0x00000554
+
+#define VIVS_MC_128B_MERGE 0x00000558
+
+
+#endif /* STATE_HI_XML */
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 96e86cf4455b..83efca941388 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -118,7 +118,7 @@ config DRM_EXYNOS_ROTATOR
config DRM_EXYNOS_GSC
bool "GScaler"
- depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM
+ depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !VIDEO_SAMSUNG_EXYNOS_GSC
help
Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index fbe1b3174f75..c7362b99ce28 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -21,11 +21,11 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
+#include "exynos_drm_fb.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_iommu.h"
#define WINDOWS_NR 3
-#define CURSOR_WIN 2
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
static const char * const decon_clks_name[] = {
@@ -56,6 +56,7 @@ struct decon_context {
struct drm_device *drm_dev;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR];
+ struct exynos_drm_plane_config configs[WINDOWS_NR];
void __iomem *addr;
struct clk *clks[ARRAY_SIZE(decon_clks_name)];
int pipe;
@@ -71,6 +72,12 @@ static const uint32_t decon_formats[] = {
DRM_FORMAT_ARGB8888,
};
+static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_OVERLAY,
+ DRM_PLANE_TYPE_CURSOR,
+};
+
static inline void decon_set_bits(struct decon_context *ctx, u32 reg, u32 mask,
u32 val)
{
@@ -259,21 +266,24 @@ static void decon_atomic_begin(struct exynos_drm_crtc *crtc,
static void decon_update_plane(struct exynos_drm_crtc *crtc,
struct exynos_drm_plane *plane)
{
+ struct exynos_drm_plane_state *state =
+ to_exynos_plane_state(plane->base.state);
struct decon_context *ctx = crtc->ctx;
- struct drm_plane_state *state = plane->base.state;
+ struct drm_framebuffer *fb = state->base.fb;
unsigned int win = plane->zpos;
- unsigned int bpp = state->fb->bits_per_pixel >> 3;
- unsigned int pitch = state->fb->pitches[0];
+ unsigned int bpp = fb->bits_per_pixel >> 3;
+ unsigned int pitch = fb->pitches[0];
+ dma_addr_t dma_addr = exynos_drm_fb_dma_addr(fb, 0);
u32 val;
if (test_bit(BIT_SUSPENDED, &ctx->flags))
return;
- val = COORDINATE_X(plane->crtc_x) | COORDINATE_Y(plane->crtc_y);
+ val = COORDINATE_X(state->crtc.x) | COORDINATE_Y(state->crtc.y);
writel(val, ctx->addr + DECON_VIDOSDxA(win));
- val = COORDINATE_X(plane->crtc_x + plane->crtc_w - 1) |
- COORDINATE_Y(plane->crtc_y + plane->crtc_h - 1);
+ val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) |
+ COORDINATE_Y(state->crtc.y + state->crtc.h - 1);
writel(val, ctx->addr + DECON_VIDOSDxB(win));
val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
@@ -284,20 +294,20 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
VIDOSD_Wx_ALPHA_B_F(0x0);
writel(val, ctx->addr + DECON_VIDOSDxD(win));
- writel(plane->dma_addr[0], ctx->addr + DECON_VIDW0xADD0B0(win));
+ writel(dma_addr, ctx->addr + DECON_VIDW0xADD0B0(win));
- val = plane->dma_addr[0] + pitch * plane->crtc_h;
+ val = dma_addr + pitch * state->src.h;
writel(val, ctx->addr + DECON_VIDW0xADD1B0(win));
if (ctx->out_type != IFTYPE_HDMI)
- val = BIT_VAL(pitch - plane->crtc_w * bpp, 27, 14)
- | BIT_VAL(plane->crtc_w * bpp, 13, 0);
+ val = BIT_VAL(pitch - state->crtc.w * bpp, 27, 14)
+ | BIT_VAL(state->crtc.w * bpp, 13, 0);
else
- val = BIT_VAL(pitch - plane->crtc_w * bpp, 29, 15)
- | BIT_VAL(plane->crtc_w * bpp, 14, 0);
+ val = BIT_VAL(pitch - state->crtc.w * bpp, 29, 15)
+ | BIT_VAL(state->crtc.w * bpp, 14, 0);
writel(val, ctx->addr + DECON_VIDW0xADD2(win));
- decon_win_set_pixfmt(ctx, win, state->fb);
+ decon_win_set_pixfmt(ctx, win, fb);
/* window enable */
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
@@ -377,20 +387,12 @@ static void decon_swreset(struct decon_context *ctx)
static void decon_enable(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
- int ret;
- int i;
if (!test_and_clear_bit(BIT_SUSPENDED, &ctx->flags))
return;
pm_runtime_get_sync(ctx->dev);
- for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
- ret = clk_prepare_enable(ctx->clks[i]);
- if (ret < 0)
- goto err;
- }
-
set_bit(BIT_CLKS_ENABLED, &ctx->flags);
/* if vblank was enabled status, enable it again. */
@@ -399,11 +401,6 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
decon_commit(ctx->crtc);
- return;
-err:
- while (--i >= 0)
- clk_disable_unprepare(ctx->clks[i]);
-
set_bit(BIT_SUSPENDED, &ctx->flags);
}
@@ -425,9 +422,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
decon_swreset(ctx);
- for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++)
- clk_disable_unprepare(ctx->clks[i]);
-
clear_bit(BIT_CLKS_ENABLED, &ctx->flags);
pm_runtime_put_sync(ctx->dev);
@@ -478,7 +472,6 @@ err:
static struct exynos_drm_crtc_ops decon_crtc_ops = {
.enable = decon_enable,
.disable = decon_disable,
- .commit = decon_commit,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
.atomic_begin = decon_atomic_begin,
@@ -495,7 +488,6 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_private *priv = drm_dev->dev_private;
struct exynos_drm_plane *exynos_plane;
enum exynos_drm_output_type out_type;
- enum drm_plane_type type;
unsigned int win;
int ret;
@@ -505,10 +497,13 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
for (win = ctx->first_win; win < WINDOWS_NR; win++) {
int tmp = (win == ctx->first_win) ? 0 : win;
- type = exynos_plane_get_type(tmp, CURSOR_WIN);
+ ctx->configs[win].pixel_formats = decon_formats;
+ ctx->configs[win].num_pixel_formats = ARRAY_SIZE(decon_formats);
+ ctx->configs[win].zpos = win;
+ ctx->configs[win].type = decon_win_types[tmp];
+
ret = exynos_plane_init(drm_dev, &ctx->planes[win],
- 1 << ctx->pipe, type, decon_formats,
- ARRAY_SIZE(decon_formats), win);
+ 1 << ctx->pipe, &ctx->configs[win]);
if (ret)
return ret;
}
@@ -581,6 +576,44 @@ out:
return IRQ_HANDLED;
}
+#ifdef CONFIG_PM
+static int exynos5433_decon_suspend(struct device *dev)
+{
+ struct decon_context *ctx = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++)
+ clk_disable_unprepare(ctx->clks[i]);
+
+ return 0;
+}
+
+static int exynos5433_decon_resume(struct device *dev)
+{
+ struct decon_context *ctx = dev_get_drvdata(dev);
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
+ ret = clk_prepare_enable(ctx->clks[i]);
+ if (ret < 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ while (--i >= 0)
+ clk_disable_unprepare(ctx->clks[i]);
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops exynos5433_decon_pm_ops = {
+ SET_RUNTIME_PM_OPS(exynos5433_decon_suspend, exynos5433_decon_resume,
+ NULL)
+};
+
static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
{
.compatible = "samsung,exynos5433-decon",
@@ -684,6 +717,7 @@ struct platform_driver exynos5433_decon_driver = {
.remove = exynos5433_decon_remove,
.driver = {
.name = "exynos5433-decon",
+ .pm = &exynos5433_decon_pm_ops,
.of_match_table = exynos5433_decon_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index ead2b16e237d..c47f9af8170b 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -30,6 +30,7 @@
#include "exynos_drm_crtc.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_drv.h"
+#include "exynos_drm_fb.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_iommu.h"
@@ -40,13 +41,13 @@
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
#define WINDOWS_NR 2
-#define CURSOR_WIN 1
struct decon_context {
struct device *dev;
struct drm_device *drm_dev;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR];
+ struct exynos_drm_plane_config configs[WINDOWS_NR];
struct clk *pclk;
struct clk *aclk;
struct clk *eclk;
@@ -81,6 +82,11 @@ static const uint32_t decon_formats[] = {
DRM_FORMAT_BGRA8888,
};
+static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_CURSOR,
+};
+
static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
@@ -119,13 +125,8 @@ static void decon_clear_channels(struct exynos_drm_crtc *crtc)
}
/* Wait for vsync, as disable channel takes effect at next vsync */
- if (ch_enabled) {
- unsigned int state = ctx->suspended;
-
- ctx->suspended = 0;
+ if (ch_enabled)
decon_wait_for_vblank(ctx->crtc);
- ctx->suspended = state;
- }
}
static int decon_ctx_initialize(struct decon_context *ctx,
@@ -398,16 +399,17 @@ static void decon_atomic_begin(struct exynos_drm_crtc *crtc,
static void decon_update_plane(struct exynos_drm_crtc *crtc,
struct exynos_drm_plane *plane)
{
+ struct exynos_drm_plane_state *state =
+ to_exynos_plane_state(plane->base.state);
struct decon_context *ctx = crtc->ctx;
- struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
- struct drm_plane_state *state = plane->base.state;
+ struct drm_framebuffer *fb = state->base.fb;
int padding;
unsigned long val, alpha;
unsigned int last_x;
unsigned int last_y;
unsigned int win = plane->zpos;
- unsigned int bpp = state->fb->bits_per_pixel >> 3;
- unsigned int pitch = state->fb->pitches[0];
+ unsigned int bpp = fb->bits_per_pixel >> 3;
+ unsigned int pitch = fb->pitches[0];
if (ctx->suspended)
return;
@@ -423,41 +425,32 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
*/
/* buffer start address */
- val = (unsigned long)plane->dma_addr[0];
+ val = (unsigned long)exynos_drm_fb_dma_addr(fb, 0);
writel(val, ctx->regs + VIDW_BUF_START(win));
- padding = (pitch / bpp) - state->fb->width;
+ padding = (pitch / bpp) - fb->width;
/* buffer size */
- writel(state->fb->width + padding, ctx->regs + VIDW_WHOLE_X(win));
- writel(state->fb->height, ctx->regs + VIDW_WHOLE_Y(win));
+ writel(fb->width + padding, ctx->regs + VIDW_WHOLE_X(win));
+ writel(fb->height, ctx->regs + VIDW_WHOLE_Y(win));
/* offset from the start of the buffer to read */
- writel(plane->src_x, ctx->regs + VIDW_OFFSET_X(win));
- writel(plane->src_y, ctx->regs + VIDW_OFFSET_Y(win));
+ writel(state->src.x, ctx->regs + VIDW_OFFSET_X(win));
+ writel(state->src.y, ctx->regs + VIDW_OFFSET_Y(win));
DRM_DEBUG_KMS("start addr = 0x%lx\n",
(unsigned long)val);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- plane->crtc_w, plane->crtc_h);
+ state->crtc.w, state->crtc.h);
- /*
- * OSD position.
- * In case the window layout goes of LCD layout, DECON fails.
- */
- if ((plane->crtc_x + plane->crtc_w) > mode->hdisplay)
- plane->crtc_x = mode->hdisplay - plane->crtc_w;
- if ((plane->crtc_y + plane->crtc_h) > mode->vdisplay)
- plane->crtc_y = mode->vdisplay - plane->crtc_h;
-
- val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) |
- VIDOSDxA_TOPLEFT_Y(plane->crtc_y);
+ val = VIDOSDxA_TOPLEFT_X(state->crtc.x) |
+ VIDOSDxA_TOPLEFT_Y(state->crtc.y);
writel(val, ctx->regs + VIDOSD_A(win));
- last_x = plane->crtc_x + plane->crtc_w;
+ last_x = state->crtc.x + state->crtc.w;
if (last_x)
last_x--;
- last_y = plane->crtc_y + plane->crtc_h;
+ last_y = state->crtc.y + state->crtc.h;
if (last_y)
last_y--;
@@ -466,7 +459,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
writel(val, ctx->regs + VIDOSD_B(win));
DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
- plane->crtc_x, plane->crtc_y, last_x, last_y);
+ state->crtc.x, state->crtc.y, last_x, last_y);
/* OSD alpha */
alpha = VIDOSDxC_ALPHA0_R_F(0x0) |
@@ -481,7 +474,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
writel(alpha, ctx->regs + VIDOSD_D(win));
- decon_win_set_pixfmt(ctx, win, state->fb);
+ decon_win_set_pixfmt(ctx, win, fb);
/* hardware window 0 doesn't support color key. */
if (win != 0)
@@ -555,39 +548,12 @@ static void decon_init(struct decon_context *ctx)
static void decon_enable(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
- int ret;
if (!ctx->suspended)
return;
- ctx->suspended = false;
-
pm_runtime_get_sync(ctx->dev);
- ret = clk_prepare_enable(ctx->pclk);
- if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the pclk [%d]\n", ret);
- return;
- }
-
- ret = clk_prepare_enable(ctx->aclk);
- if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the aclk [%d]\n", ret);
- return;
- }
-
- ret = clk_prepare_enable(ctx->eclk);
- if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the eclk [%d]\n", ret);
- return;
- }
-
- ret = clk_prepare_enable(ctx->vclk);
- if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the vclk [%d]\n", ret);
- return;
- }
-
decon_init(ctx);
/* if vblank was enabled status, enable it again. */
@@ -595,6 +561,8 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
decon_enable_vblank(ctx->crtc);
decon_commit(ctx->crtc);
+
+ ctx->suspended = false;
}
static void decon_disable(struct exynos_drm_crtc *crtc)
@@ -613,11 +581,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
for (i = 0; i < WINDOWS_NR; i++)
decon_disable_plane(crtc, &ctx->planes[i]);
- clk_disable_unprepare(ctx->vclk);
- clk_disable_unprepare(ctx->eclk);
- clk_disable_unprepare(ctx->aclk);
- clk_disable_unprepare(ctx->pclk);
-
pm_runtime_put_sync(ctx->dev);
ctx->suspended = true;
@@ -679,8 +642,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
struct decon_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_plane *exynos_plane;
- enum drm_plane_type type;
- unsigned int zpos;
+ unsigned int i;
int ret;
ret = decon_ctx_initialize(ctx, drm_dev);
@@ -689,11 +651,14 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- for (zpos = 0; zpos < WINDOWS_NR; zpos++) {
- type = exynos_plane_get_type(zpos, CURSOR_WIN);
- ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
- 1 << ctx->pipe, type, decon_formats,
- ARRAY_SIZE(decon_formats), zpos);
+ for (i = 0; i < WINDOWS_NR; i++) {
+ ctx->configs[i].pixel_formats = decon_formats;
+ ctx->configs[i].num_pixel_formats = ARRAY_SIZE(decon_formats);
+ ctx->configs[i].zpos = i;
+ ctx->configs[i].type = decon_win_types[i];
+
+ ret = exynos_plane_init(drm_dev, &ctx->planes[i],
+ 1 << ctx->pipe, &ctx->configs[i]);
if (ret)
return ret;
}
@@ -843,11 +808,63 @@ static int decon_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int exynos7_decon_suspend(struct device *dev)
+{
+ struct decon_context *ctx = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(ctx->vclk);
+ clk_disable_unprepare(ctx->eclk);
+ clk_disable_unprepare(ctx->aclk);
+ clk_disable_unprepare(ctx->pclk);
+
+ return 0;
+}
+
+static int exynos7_decon_resume(struct device *dev)
+{
+ struct decon_context *ctx = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(ctx->pclk);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the pclk [%d]\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ctx->aclk);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the aclk [%d]\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ctx->eclk);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the eclk [%d]\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ctx->vclk);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the vclk [%d]\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops exynos7_decon_pm_ops = {
+ SET_RUNTIME_PM_OPS(exynos7_decon_suspend, exynos7_decon_resume,
+ NULL)
+};
+
struct platform_driver decon_driver = {
.probe = decon_probe,
.remove = decon_remove,
.driver = {
.name = "exynos-decon",
+ .pm = &exynos7_decon_pm_ops,
.of_match_table = decon_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 124fb9a56f02..b79c316c2ad2 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -953,7 +953,7 @@ static void exynos_dp_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
-static struct drm_connector_funcs exynos_dp_connector_funcs = {
+static const struct drm_connector_funcs exynos_dp_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = exynos_dp_detect,
@@ -998,7 +998,7 @@ static struct drm_encoder *exynos_dp_best_encoder(
return &dp->encoder;
}
-static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
.get_modes = exynos_dp_get_modes,
.best_encoder = exynos_dp_best_encoder,
};
@@ -1009,9 +1009,9 @@ static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp,
{
int ret;
- encoder->bridge = dp->bridge;
- dp->bridge->encoder = encoder;
- ret = drm_bridge_attach(encoder->dev, dp->bridge);
+ encoder->bridge->next = dp->ptn_bridge;
+ dp->ptn_bridge->encoder = encoder;
+ ret = drm_bridge_attach(encoder->dev, dp->ptn_bridge);
if (ret) {
DRM_ERROR("Failed to attach bridge to drm\n");
return ret;
@@ -1020,14 +1020,15 @@ static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp,
return 0;
}
-static int exynos_dp_create_connector(struct drm_encoder *encoder)
+static int exynos_dp_bridge_attach(struct drm_bridge *bridge)
{
- struct exynos_dp_device *dp = encoder_to_dp(encoder);
+ struct exynos_dp_device *dp = bridge->driver_private;
+ struct drm_encoder *encoder = &dp->encoder;
struct drm_connector *connector = &dp->connector;
int ret;
/* Pre-empt DP connector creation if there's a bridge */
- if (dp->bridge) {
+ if (dp->ptn_bridge) {
ret = exynos_drm_attach_lcd_bridge(dp, encoder);
if (!ret)
return 0;
@@ -1052,27 +1053,16 @@ static int exynos_dp_create_connector(struct drm_encoder *encoder)
return ret;
}
-static bool exynos_dp_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-static void exynos_dp_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-}
-
-static void exynos_dp_enable(struct drm_encoder *encoder)
+static void exynos_dp_bridge_enable(struct drm_bridge *bridge)
{
- struct exynos_dp_device *dp = encoder_to_dp(encoder);
+ struct exynos_dp_device *dp = bridge->driver_private;
struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
if (dp->dpms_mode == DRM_MODE_DPMS_ON)
return;
+ pm_runtime_get_sync(dp->dev);
+
if (dp->panel) {
if (drm_panel_prepare(dp->panel)) {
DRM_ERROR("failed to setup the panel\n");
@@ -1083,7 +1073,6 @@ static void exynos_dp_enable(struct drm_encoder *encoder)
if (crtc->ops->clock_enable)
crtc->ops->clock_enable(dp_to_crtc(dp), true);
- clk_prepare_enable(dp->clock);
phy_power_on(dp->phy);
exynos_dp_init_dp(dp);
enable_irq(dp->irq);
@@ -1092,9 +1081,9 @@ static void exynos_dp_enable(struct drm_encoder *encoder)
dp->dpms_mode = DRM_MODE_DPMS_ON;
}
-static void exynos_dp_disable(struct drm_encoder *encoder)
+static void exynos_dp_bridge_disable(struct drm_bridge *bridge)
{
- struct exynos_dp_device *dp = encoder_to_dp(encoder);
+ struct exynos_dp_device *dp = bridge->driver_private;
struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
@@ -1110,7 +1099,6 @@ static void exynos_dp_disable(struct drm_encoder *encoder)
disable_irq(dp->irq);
flush_work(&dp->hotplug_work);
phy_power_off(dp->phy);
- clk_disable_unprepare(dp->clock);
if (crtc->ops->clock_enable)
crtc->ops->clock_enable(dp_to_crtc(dp), false);
@@ -1120,17 +1108,82 @@ static void exynos_dp_disable(struct drm_encoder *encoder)
DRM_ERROR("failed to turnoff the panel\n");
}
+ pm_runtime_put_sync(dp->dev);
+
dp->dpms_mode = DRM_MODE_DPMS_OFF;
}
-static struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
+static void exynos_dp_bridge_nop(struct drm_bridge *bridge)
+{
+ /* do nothing */
+}
+
+static const struct drm_bridge_funcs exynos_dp_bridge_funcs = {
+ .enable = exynos_dp_bridge_enable,
+ .disable = exynos_dp_bridge_disable,
+ .pre_enable = exynos_dp_bridge_nop,
+ .post_disable = exynos_dp_bridge_nop,
+ .attach = exynos_dp_bridge_attach,
+};
+
+static int exynos_dp_create_connector(struct drm_encoder *encoder)
+{
+ struct exynos_dp_device *dp = encoder_to_dp(encoder);
+ struct drm_device *drm_dev = dp->drm_dev;
+ struct drm_bridge *bridge;
+ int ret;
+
+ bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL);
+ if (!bridge) {
+ DRM_ERROR("failed to allocate for drm bridge\n");
+ return -ENOMEM;
+ }
+
+ dp->bridge = bridge;
+
+ encoder->bridge = bridge;
+ bridge->driver_private = dp;
+ bridge->encoder = encoder;
+ bridge->funcs = &exynos_dp_bridge_funcs;
+
+ ret = drm_bridge_attach(drm_dev, bridge);
+ if (ret) {
+ DRM_ERROR("failed to attach drm bridge\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool exynos_dp_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void exynos_dp_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void exynos_dp_enable(struct drm_encoder *encoder)
+{
+}
+
+static void exynos_dp_disable(struct drm_encoder *encoder)
+{
+}
+
+static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
.mode_fixup = exynos_dp_mode_fixup,
.mode_set = exynos_dp_mode_set,
.enable = exynos_dp_enable,
.disable = exynos_dp_disable,
};
-static struct drm_encoder_funcs exynos_dp_encoder_funcs = {
+static const struct drm_encoder_funcs exynos_dp_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
@@ -1238,7 +1291,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
}
}
- if (!dp->panel && !dp->bridge) {
+ if (!dp->panel && !dp->ptn_bridge) {
ret = exynos_dp_dt_parse_panel(dp);
if (ret)
return ret;
@@ -1289,10 +1342,6 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug);
- phy_power_on(dp->phy);
-
- exynos_dp_init_dp(dp);
-
ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler,
irq_flags, "exynos-dp", dp);
if (ret) {
@@ -1313,7 +1362,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
@@ -1343,8 +1392,9 @@ static const struct component_ops exynos_dp_ops = {
static int exynos_dp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *panel_node, *bridge_node, *endpoint;
+ struct device_node *panel_node = NULL, *bridge_node, *endpoint = NULL;
struct exynos_dp_device *dp;
+ int ret;
dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
GFP_KERNEL);
@@ -1353,36 +1403,96 @@ static int exynos_dp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dp);
+ /* This is for the backward compatibility. */
panel_node = of_parse_phandle(dev->of_node, "panel", 0);
if (panel_node) {
dp->panel = of_drm_find_panel(panel_node);
of_node_put(panel_node);
if (!dp->panel)
return -EPROBE_DEFER;
+ } else {
+ endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (endpoint) {
+ panel_node = of_graph_get_remote_port_parent(endpoint);
+ if (panel_node) {
+ dp->panel = of_drm_find_panel(panel_node);
+ of_node_put(panel_node);
+ if (!dp->panel)
+ return -EPROBE_DEFER;
+ } else {
+ DRM_ERROR("no port node for panel device.\n");
+ return -EINVAL;
+ }
+ }
}
+ if (endpoint)
+ goto out;
+
endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
if (endpoint) {
bridge_node = of_graph_get_remote_port_parent(endpoint);
if (bridge_node) {
- dp->bridge = of_drm_find_bridge(bridge_node);
+ dp->ptn_bridge = of_drm_find_bridge(bridge_node);
of_node_put(bridge_node);
- if (!dp->bridge)
+ if (!dp->ptn_bridge)
return -EPROBE_DEFER;
} else
return -EPROBE_DEFER;
}
- return component_add(&pdev->dev, &exynos_dp_ops);
+out:
+ pm_runtime_enable(dev);
+
+ ret = component_add(&pdev->dev, &exynos_dp_ops);
+ if (ret)
+ goto err_disable_pm_runtime;
+
+ return ret;
+
+err_disable_pm_runtime:
+ pm_runtime_disable(dev);
+
+ return ret;
}
static int exynos_dp_remove(struct platform_device *pdev)
{
+ pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &exynos_dp_ops);
return 0;
}
+#ifdef CONFIG_PM
+static int exynos_dp_suspend(struct device *dev)
+{
+ struct exynos_dp_device *dp = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(dp->clock);
+
+ return 0;
+}
+
+static int exynos_dp_resume(struct device *dev)
+{
+ struct exynos_dp_device *dp = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(dp->clock);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops exynos_dp_pm_ops = {
+ SET_RUNTIME_PM_OPS(exynos_dp_suspend, exynos_dp_resume, NULL)
+};
+
static const struct of_device_id exynos_dp_match[] = {
{ .compatible = "samsung,exynos5-dp" },
{},
@@ -1395,6 +1505,7 @@ struct platform_driver dp_driver = {
.driver = {
.name = "exynos-dp",
.owner = THIS_MODULE,
+ .pm = &exynos_dp_pm_ops,
.of_match_table = exynos_dp_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index e413b6f7b0e7..66eec4b2d5c6 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -153,6 +153,7 @@ struct exynos_dp_device {
struct drm_connector connector;
struct drm_panel *panel;
struct drm_bridge *bridge;
+ struct drm_bridge *ptn_bridge;
struct clk *clock;
unsigned int irq;
void __iomem *reg_base;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index b3ba27fd9a6b..80f797414236 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -93,7 +93,7 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
}
}
-static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
+static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
.enable = exynos_drm_crtc_enable,
.disable = exynos_drm_crtc_disable,
.mode_set_nofb = exynos_drm_crtc_mode_set_nofb,
@@ -113,7 +113,7 @@ static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
kfree(exynos_crtc);
}
-static struct drm_crtc_funcs exynos_crtc_funcs = {
+static const struct drm_crtc_funcs exynos_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.destroy = exynos_drm_crtc_destroy,
@@ -150,7 +150,7 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
private->crtc[pipe] = crtc;
ret = drm_crtc_init_with_planes(drm_dev, crtc, plane, NULL,
- &exynos_crtc_funcs);
+ &exynos_crtc_funcs, NULL);
if (ret < 0)
goto err_crtc;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index c748b8790de3..05350ae0785b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -57,7 +57,7 @@ static void exynos_dpi_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
-static struct drm_connector_funcs exynos_dpi_connector_funcs = {
+static const struct drm_connector_funcs exynos_dpi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = exynos_dpi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -100,7 +100,7 @@ exynos_dpi_best_encoder(struct drm_connector *connector)
return &ctx->encoder;
}
-static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
.get_modes = exynos_dpi_get_modes,
.best_encoder = exynos_dpi_best_encoder,
};
@@ -161,14 +161,14 @@ static void exynos_dpi_disable(struct drm_encoder *encoder)
}
}
-static struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = {
.mode_fixup = exynos_dpi_mode_fixup,
.mode_set = exynos_dpi_mode_set,
.enable = exynos_dpi_enable,
.disable = exynos_dpi_disable,
};
-static struct drm_encoder_funcs exynos_dpi_encoder_funcs = {
+static const struct drm_encoder_funcs exynos_dpi_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
@@ -309,7 +309,7 @@ int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 2c6019d6a205..9756797a15a5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -304,45 +304,6 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
-{
- struct drm_connector *connector;
-
- drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- int old_dpms = connector->dpms;
-
- if (connector->funcs->dpms)
- connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
-
- /* Set the old mode back to the connector for resume */
- connector->dpms = old_dpms;
- }
- drm_modeset_unlock_all(dev);
-
- return 0;
-}
-
-static int exynos_drm_resume(struct drm_device *dev)
-{
- struct drm_connector *connector;
-
- drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->funcs->dpms) {
- int dpms = connector->dpms;
-
- connector->dpms = DRM_MODE_DPMS_OFF;
- connector->funcs->dpms(connector, dpms);
- }
- }
- drm_modeset_unlock_all(dev);
-
- return 0;
-}
-#endif
-
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv;
@@ -476,31 +437,54 @@ static struct drm_driver exynos_drm_driver = {
};
#ifdef CONFIG_PM_SLEEP
-static int exynos_drm_sys_suspend(struct device *dev)
+static int exynos_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- pm_message_t message;
+ struct drm_connector *connector;
if (pm_runtime_suspended(dev) || !drm_dev)
return 0;
- message.event = PM_EVENT_SUSPEND;
- return exynos_drm_suspend(drm_dev, message);
+ drm_modeset_lock_all(drm_dev);
+ drm_for_each_connector(connector, drm_dev) {
+ int old_dpms = connector->dpms;
+
+ if (connector->funcs->dpms)
+ connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
+
+ /* Set the old mode back to the connector for resume */
+ connector->dpms = old_dpms;
+ }
+ drm_modeset_unlock_all(drm_dev);
+
+ return 0;
}
-static int exynos_drm_sys_resume(struct device *dev)
+static int exynos_drm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct drm_connector *connector;
if (pm_runtime_suspended(dev) || !drm_dev)
return 0;
- return exynos_drm_resume(drm_dev);
+ drm_modeset_lock_all(drm_dev);
+ drm_for_each_connector(connector, drm_dev) {
+ if (connector->funcs->dpms) {
+ int dpms = connector->dpms;
+
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ connector->funcs->dpms(connector, dpms);
+ }
+ }
+ drm_modeset_unlock_all(drm_dev);
+
+ return 0;
}
#endif
static const struct dev_pm_ops exynos_drm_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_sys_suspend, exynos_drm_sys_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_suspend, exynos_drm_resume)
};
/* forward declaration */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index f1eda7fa4e3c..82bbd7f4b316 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -38,24 +38,44 @@ enum exynos_drm_output_type {
EXYNOS_DISPLAY_TYPE_VIDI,
};
+struct exynos_drm_rect {
+ unsigned int x, y;
+ unsigned int w, h;
+};
+
/*
- * Exynos drm common overlay structure.
+ * Exynos drm plane state structure.
*
- * @base: plane object
- * @src_x: offset x on a framebuffer to be displayed.
- * - the unit is screen coordinates.
- * @src_y: offset y on a framebuffer to be displayed.
- * - the unit is screen coordinates.
- * @src_w: width of a partial image to be displayed from framebuffer.
- * @src_h: height of a partial image to be displayed from framebuffer.
- * @crtc_x: offset x on hardware screen.
- * @crtc_y: offset y on hardware screen.
- * @crtc_w: window width to be displayed (hardware screen).
- * @crtc_h: window height to be displayed (hardware screen).
+ * @base: plane_state object (contains drm_framebuffer pointer)
+ * @src: rectangle of the source image data to be displayed (clipped to
+ * visible part).
+ * @crtc: rectangle of the target image position on hardware screen
+ * (clipped to visible part).
* @h_ratio: horizontal scaling ratio, 16.16 fixed point
* @v_ratio: vertical scaling ratio, 16.16 fixed point
- * @dma_addr: array of bus(accessed by dma) address to the memory region
- * allocated for a overlay.
+ *
+ * this structure consists plane state data that will be applied to hardware
+ * specific overlay info.
+ */
+
+struct exynos_drm_plane_state {
+ struct drm_plane_state base;
+ struct exynos_drm_rect crtc;
+ struct exynos_drm_rect src;
+ unsigned int h_ratio;
+ unsigned int v_ratio;
+};
+
+static inline struct exynos_drm_plane_state *
+to_exynos_plane_state(struct drm_plane_state *state)
+{
+ return container_of(state, struct exynos_drm_plane_state, base);
+}
+
+/*
+ * Exynos drm common overlay structure.
+ *
+ * @base: plane object
* @zpos: order of overlay layer(z position).
*
* this structure is common to exynos SoC and its contents would be copied
@@ -64,21 +84,32 @@ enum exynos_drm_output_type {
struct exynos_drm_plane {
struct drm_plane base;
- unsigned int src_x;
- unsigned int src_y;
- unsigned int src_w;
- unsigned int src_h;
- unsigned int crtc_x;
- unsigned int crtc_y;
- unsigned int crtc_w;
- unsigned int crtc_h;
- unsigned int h_ratio;
- unsigned int v_ratio;
- dma_addr_t dma_addr[MAX_FB_BUFFER];
+ const struct exynos_drm_plane_config *config;
unsigned int zpos;
struct drm_framebuffer *pending_fb;
};
+#define EXYNOS_DRM_PLANE_CAP_DOUBLE (1 << 0)
+#define EXYNOS_DRM_PLANE_CAP_SCALE (1 << 1)
+
+/*
+ * Exynos DRM plane configuration structure.
+ *
+ * @zpos: z-position of the plane.
+ * @type: type of the plane (primary, cursor or overlay).
+ * @pixel_formats: supported pixel formats.
+ * @num_pixel_formats: number of elements in 'pixel_formats'.
+ * @capabilities: supported features (see EXYNOS_DRM_PLANE_CAP_*)
+ */
+
+struct exynos_drm_plane_config {
+ unsigned int zpos;
+ enum drm_plane_type type;
+ const uint32_t *pixel_formats;
+ unsigned int num_pixel_formats;
+ unsigned int capabilities;
+};
+
/*
* Exynos drm crtc ops
*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 12b03b364703..d84a498ef099 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1458,66 +1458,6 @@ static const struct mipi_dsi_host_ops exynos_dsi_ops = {
.transfer = exynos_dsi_host_transfer,
};
-static int exynos_dsi_poweron(struct exynos_dsi *dsi)
-{
- struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- int ret, i;
-
- ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
- if (ret < 0) {
- dev_err(dsi->dev, "cannot enable regulators %d\n", ret);
- return ret;
- }
-
- for (i = 0; i < driver_data->num_clks; i++) {
- ret = clk_prepare_enable(dsi->clks[i]);
- if (ret < 0)
- goto err_clk;
- }
-
- ret = phy_power_on(dsi->phy);
- if (ret < 0) {
- dev_err(dsi->dev, "cannot enable phy %d\n", ret);
- goto err_clk;
- }
-
- return 0;
-
-err_clk:
- while (--i > -1)
- clk_disable_unprepare(dsi->clks[i]);
- regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
-
- return ret;
-}
-
-static void exynos_dsi_poweroff(struct exynos_dsi *dsi)
-{
- struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- int ret, i;
-
- usleep_range(10000, 20000);
-
- if (dsi->state & DSIM_STATE_INITIALIZED) {
- dsi->state &= ~DSIM_STATE_INITIALIZED;
-
- exynos_dsi_disable_clock(dsi);
-
- exynos_dsi_disable_irq(dsi);
- }
-
- dsi->state &= ~DSIM_STATE_CMD_LPM;
-
- phy_power_off(dsi->phy);
-
- for (i = driver_data->num_clks - 1; i > -1; i--)
- clk_disable_unprepare(dsi->clks[i]);
-
- ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
- if (ret < 0)
- dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
-}
-
static void exynos_dsi_enable(struct drm_encoder *encoder)
{
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
@@ -1526,16 +1466,14 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
if (dsi->state & DSIM_STATE_ENABLED)
return;
- ret = exynos_dsi_poweron(dsi);
- if (ret < 0)
- return;
+ pm_runtime_get_sync(dsi->dev);
dsi->state |= DSIM_STATE_ENABLED;
ret = drm_panel_prepare(dsi->panel);
if (ret < 0) {
dsi->state &= ~DSIM_STATE_ENABLED;
- exynos_dsi_poweroff(dsi);
+ pm_runtime_put_sync(dsi->dev);
return;
}
@@ -1547,7 +1485,7 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
dsi->state &= ~DSIM_STATE_ENABLED;
exynos_dsi_set_display_enable(dsi, false);
drm_panel_unprepare(dsi->panel);
- exynos_dsi_poweroff(dsi);
+ pm_runtime_put_sync(dsi->dev);
return;
}
@@ -1569,7 +1507,7 @@ static void exynos_dsi_disable(struct drm_encoder *encoder)
dsi->state &= ~DSIM_STATE_ENABLED;
- exynos_dsi_poweroff(dsi);
+ pm_runtime_put_sync(dsi->dev);
}
static enum drm_connector_status
@@ -1603,7 +1541,7 @@ static void exynos_dsi_connector_destroy(struct drm_connector *connector)
connector->dev = NULL;
}
-static struct drm_connector_funcs exynos_dsi_connector_funcs = {
+static const struct drm_connector_funcs exynos_dsi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = exynos_dsi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -1631,7 +1569,7 @@ exynos_dsi_best_encoder(struct drm_connector *connector)
return &dsi->encoder;
}
-static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
.get_modes = exynos_dsi_get_modes,
.best_encoder = exynos_dsi_best_encoder,
};
@@ -1684,14 +1622,14 @@ static void exynos_dsi_mode_set(struct drm_encoder *encoder,
vm->hsync_len = m->hsync_end - m->hsync_start;
}
-static struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
.mode_fixup = exynos_dsi_mode_fixup,
.mode_set = exynos_dsi_mode_set,
.enable = exynos_dsi_enable,
.disable = exynos_dsi_disable,
};
-static struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
+static const struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
@@ -1797,13 +1735,13 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
ep = of_graph_get_next_endpoint(node, NULL);
if (!ep) {
- ret = -ENXIO;
+ ret = -EINVAL;
goto end;
}
dsi->bridge_node = of_graph_get_remote_port_parent(ep);
if (!dsi->bridge_node) {
- ret = -ENXIO;
+ ret = -EINVAL;
goto end;
}
end:
@@ -1831,7 +1769,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
@@ -1954,22 +1892,99 @@ static int exynos_dsi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, &dsi->encoder);
+ pm_runtime_enable(dev);
+
return component_add(dev, &exynos_dsi_component_ops);
}
static int exynos_dsi_remove(struct platform_device *pdev)
{
+ pm_runtime_disable(&pdev->dev);
+
component_del(&pdev->dev, &exynos_dsi_component_ops);
return 0;
}
+#ifdef CONFIG_PM
+static int exynos_dsi_suspend(struct device *dev)
+{
+ struct drm_encoder *encoder = dev_get_drvdata(dev);
+ struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+ struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+ int ret, i;
+
+ usleep_range(10000, 20000);
+
+ if (dsi->state & DSIM_STATE_INITIALIZED) {
+ dsi->state &= ~DSIM_STATE_INITIALIZED;
+
+ exynos_dsi_disable_clock(dsi);
+
+ exynos_dsi_disable_irq(dsi);
+ }
+
+ dsi->state &= ~DSIM_STATE_CMD_LPM;
+
+ phy_power_off(dsi->phy);
+
+ for (i = driver_data->num_clks - 1; i > -1; i--)
+ clk_disable_unprepare(dsi->clks[i]);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+ if (ret < 0)
+ dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
+
+ return 0;
+}
+
+static int exynos_dsi_resume(struct device *dev)
+{
+ struct drm_encoder *encoder = dev_get_drvdata(dev);
+ struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+ struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+ int ret, i;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+ if (ret < 0) {
+ dev_err(dsi->dev, "cannot enable regulators %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < driver_data->num_clks; i++) {
+ ret = clk_prepare_enable(dsi->clks[i]);
+ if (ret < 0)
+ goto err_clk;
+ }
+
+ ret = phy_power_on(dsi->phy);
+ if (ret < 0) {
+ dev_err(dsi->dev, "cannot enable phy %d\n", ret);
+ goto err_clk;
+ }
+
+ return 0;
+
+err_clk:
+ while (--i > -1)
+ clk_disable_unprepare(dsi->clks[i]);
+ regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops exynos_dsi_pm_ops = {
+ SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL)
+};
+
struct platform_driver dsi_driver = {
.probe = exynos_dsi_probe,
.remove = exynos_dsi_remove,
.driver = {
.name = "exynos-dsi",
.owner = THIS_MODULE,
+ .pm = &exynos_dsi_pm_ops,
.of_match_table = exynos_dsi_of_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 49b9bc302e87..cbbb1a86e70a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -37,6 +37,7 @@
struct exynos_drm_fb {
struct drm_framebuffer fb;
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
+ dma_addr_t dma_addr[MAX_FB_BUFFER];
};
static int check_fb_gem_memory_type(struct drm_device *drm_dev,
@@ -109,7 +110,7 @@ static int exynos_drm_fb_dirty(struct drm_framebuffer *fb,
return 0;
}
-static struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
+static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
.destroy = exynos_drm_fb_destroy,
.create_handle = exynos_drm_fb_create_handle,
.dirty = exynos_drm_fb_dirty,
@@ -135,6 +136,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
goto err;
exynos_fb->exynos_gem[i] = exynos_gem[i];
+ exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr
+ + mode_cmd->offsets[i];
}
drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
@@ -189,21 +192,14 @@ err:
return ERR_PTR(ret);
}
-struct exynos_drm_gem *exynos_drm_fb_gem(struct drm_framebuffer *fb, int index)
+dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
- struct exynos_drm_gem *exynos_gem;
if (index >= MAX_FB_BUFFER)
- return NULL;
+ return DMA_ERROR_CODE;
- exynos_gem = exynos_fb->exynos_gem[index];
- if (!exynos_gem)
- return NULL;
-
- DRM_DEBUG_KMS("dma_addr: 0x%lx\n", (unsigned long)exynos_gem->dma_addr);
-
- return exynos_gem;
+ return exynos_fb->dma_addr[index];
}
static void exynos_drm_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index a8a75ac87e59..4aae9dd2b0d1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -22,8 +22,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
struct exynos_drm_gem **exynos_gem,
int count);
-/* get gem object of a drm framebuffer */
-struct exynos_drm_gem *exynos_drm_fb_gem(struct drm_framebuffer *fb, int index);
+dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index);
void exynos_drm_mode_config_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index bd75c1531cac..2e2247126581 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -29,6 +29,7 @@
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
+#include "exynos_drm_fb.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_plane.h"
@@ -87,7 +88,6 @@
/* FIMD has totally five hardware windows. */
#define WINDOWS_NR 5
-#define CURSOR_WIN 4
struct fimd_driver_data {
unsigned int timing_base;
@@ -150,6 +150,7 @@ struct fimd_context {
struct drm_device *drm_dev;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR];
+ struct exynos_drm_plane_config configs[WINDOWS_NR];
struct clk *bus_clk;
struct clk *lcd_clk;
void __iomem *regs;
@@ -187,6 +188,14 @@ static const struct of_device_id fimd_driver_dt_match[] = {
};
MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
+static const enum drm_plane_type fimd_win_types[WINDOWS_NR] = {
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_OVERLAY,
+ DRM_PLANE_TYPE_OVERLAY,
+ DRM_PLANE_TYPE_OVERLAY,
+ DRM_PLANE_TYPE_CURSOR,
+};
+
static const uint32_t fimd_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_XRGB1555,
@@ -478,7 +487,7 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
- struct drm_framebuffer *fb)
+ uint32_t pixel_format, int width)
{
unsigned long val;
@@ -489,11 +498,11 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
* So the request format is ARGB8888 then change it to XRGB8888.
*/
if (ctx->driver_data->has_limited_fmt && !win) {
- if (fb->pixel_format == DRM_FORMAT_ARGB8888)
- fb->pixel_format = DRM_FORMAT_XRGB8888;
+ if (pixel_format == DRM_FORMAT_ARGB8888)
+ pixel_format = DRM_FORMAT_XRGB8888;
}
- switch (fb->pixel_format) {
+ switch (pixel_format) {
case DRM_FORMAT_C8:
val |= WINCON0_BPPMODE_8BPP_PALETTE;
val |= WINCONx_BURSTLEN_8WORD;
@@ -529,17 +538,15 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
break;
}
- DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel);
-
/*
- * In case of exynos, setting dma-burst to 16Word causes permanent
- * tearing for very small buffers, e.g. cursor buffer. Burst Mode
- * switching which is based on plane size is not recommended as
- * plane size varies alot towards the end of the screen and rapid
- * movement causes unstable DMA which results into iommu crash/tear.
+ * Setting dma-burst to 16Word causes permanent tearing for very small
+ * buffers, e.g. cursor buffer. Burst Mode switching which based on
+ * plane size is not recommended as plane size varies alot towards the
+ * end of the screen and rapid movement causes unstable DMA, but it is
+ * still better to change dma-burst than displaying garbage.
*/
- if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+ if (width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_4WORD;
}
@@ -640,39 +647,41 @@ static void fimd_atomic_flush(struct exynos_drm_crtc *crtc,
static void fimd_update_plane(struct exynos_drm_crtc *crtc,
struct exynos_drm_plane *plane)
{
+ struct exynos_drm_plane_state *state =
+ to_exynos_plane_state(plane->base.state);
struct fimd_context *ctx = crtc->ctx;
- struct drm_plane_state *state = plane->base.state;
+ struct drm_framebuffer *fb = state->base.fb;
dma_addr_t dma_addr;
unsigned long val, size, offset;
unsigned int last_x, last_y, buf_offsize, line_size;
unsigned int win = plane->zpos;
- unsigned int bpp = state->fb->bits_per_pixel >> 3;
- unsigned int pitch = state->fb->pitches[0];
+ unsigned int bpp = fb->bits_per_pixel >> 3;
+ unsigned int pitch = fb->pitches[0];
if (ctx->suspended)
return;
- offset = plane->src_x * bpp;
- offset += plane->src_y * pitch;
+ offset = state->src.x * bpp;
+ offset += state->src.y * pitch;
/* buffer start address */
- dma_addr = plane->dma_addr[0] + offset;
+ dma_addr = exynos_drm_fb_dma_addr(fb, 0) + offset;
val = (unsigned long)dma_addr;
writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
/* buffer end address */
- size = pitch * plane->crtc_h;
+ size = pitch * state->crtc.h;
val = (unsigned long)(dma_addr + size);
writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
(unsigned long)dma_addr, val, size);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- plane->crtc_w, plane->crtc_h);
+ state->crtc.w, state->crtc.h);
/* buffer size */
- buf_offsize = pitch - (plane->crtc_w * bpp);
- line_size = plane->crtc_w * bpp;
+ buf_offsize = pitch - (state->crtc.w * bpp);
+ line_size = state->crtc.w * bpp;
val = VIDW_BUF_SIZE_OFFSET(buf_offsize) |
VIDW_BUF_SIZE_PAGEWIDTH(line_size) |
VIDW_BUF_SIZE_OFFSET_E(buf_offsize) |
@@ -680,16 +689,16 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
/* OSD position */
- val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) |
- VIDOSDxA_TOPLEFT_Y(plane->crtc_y) |
- VIDOSDxA_TOPLEFT_X_E(plane->crtc_x) |
- VIDOSDxA_TOPLEFT_Y_E(plane->crtc_y);
+ val = VIDOSDxA_TOPLEFT_X(state->crtc.x) |
+ VIDOSDxA_TOPLEFT_Y(state->crtc.y) |
+ VIDOSDxA_TOPLEFT_X_E(state->crtc.x) |
+ VIDOSDxA_TOPLEFT_Y_E(state->crtc.y);
writel(val, ctx->regs + VIDOSD_A(win));
- last_x = plane->crtc_x + plane->crtc_w;
+ last_x = state->crtc.x + state->crtc.w;
if (last_x)
last_x--;
- last_y = plane->crtc_y + plane->crtc_h;
+ last_y = state->crtc.y + state->crtc.h;
if (last_y)
last_y--;
@@ -699,20 +708,20 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
writel(val, ctx->regs + VIDOSD_B(win));
DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
- plane->crtc_x, plane->crtc_y, last_x, last_y);
+ state->crtc.x, state->crtc.y, last_x, last_y);
/* OSD size */
if (win != 3 && win != 4) {
u32 offset = VIDOSD_D(win);
if (win == 0)
offset = VIDOSD_C(win);
- val = plane->crtc_w * plane->crtc_h;
+ val = state->crtc.w * state->crtc.h;
writel(val, ctx->regs + offset);
DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
}
- fimd_win_set_pixfmt(ctx, win, state->fb);
+ fimd_win_set_pixfmt(ctx, win, fb->pixel_format, state->src.w);
/* hardware window 0 doesn't support color key. */
if (win != 0)
@@ -745,7 +754,6 @@ static void fimd_disable_plane(struct exynos_drm_crtc *crtc,
static void fimd_enable(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
- int ret;
if (!ctx->suspended)
return;
@@ -754,18 +762,6 @@ static void fimd_enable(struct exynos_drm_crtc *crtc)
pm_runtime_get_sync(ctx->dev);
- ret = clk_prepare_enable(ctx->bus_clk);
- if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the bus clk [%d]\n", ret);
- return;
- }
-
- ret = clk_prepare_enable(ctx->lcd_clk);
- if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the lcd clk [%d]\n", ret);
- return;
- }
-
/* if vblank was enabled status, enable it again. */
if (test_and_clear_bit(0, &ctx->irq_flags))
fimd_enable_vblank(ctx->crtc);
@@ -795,11 +791,7 @@ static void fimd_disable(struct exynos_drm_crtc *crtc)
writel(0, ctx->regs + VIDCON0);
- clk_disable_unprepare(ctx->lcd_clk);
- clk_disable_unprepare(ctx->bus_clk);
-
pm_runtime_put_sync(ctx->dev);
-
ctx->suspended = true;
}
@@ -941,18 +933,19 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm_dev = data;
struct exynos_drm_private *priv = drm_dev->dev_private;
struct exynos_drm_plane *exynos_plane;
- enum drm_plane_type type;
- unsigned int zpos;
+ unsigned int i;
int ret;
ctx->drm_dev = drm_dev;
ctx->pipe = priv->pipe++;
- for (zpos = 0; zpos < WINDOWS_NR; zpos++) {
- type = exynos_plane_get_type(zpos, CURSOR_WIN);
- ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
- 1 << ctx->pipe, type, fimd_formats,
- ARRAY_SIZE(fimd_formats), zpos);
+ for (i = 0; i < WINDOWS_NR; i++) {
+ ctx->configs[i].pixel_formats = fimd_formats;
+ ctx->configs[i].num_pixel_formats = ARRAY_SIZE(fimd_formats);
+ ctx->configs[i].zpos = i;
+ ctx->configs[i].type = fimd_win_types[i];
+ ret = exynos_plane_init(drm_dev, &ctx->planes[i],
+ 1 << ctx->pipe, &ctx->configs[i]);
if (ret)
return ret;
}
@@ -1121,12 +1114,49 @@ static int fimd_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int exynos_fimd_suspend(struct device *dev)
+{
+ struct fimd_context *ctx = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(ctx->lcd_clk);
+ clk_disable_unprepare(ctx->bus_clk);
+
+ return 0;
+}
+
+static int exynos_fimd_resume(struct device *dev)
+{
+ struct fimd_context *ctx = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(ctx->bus_clk);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the bus clk [%d]\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ctx->lcd_clk);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the lcd clk [%d]\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops exynos_fimd_pm_ops = {
+ SET_RUNTIME_PM_OPS(exynos_fimd_suspend, exynos_fimd_resume, NULL)
+};
+
struct platform_driver fimd_driver = {
.probe = fimd_probe,
.remove = fimd_remove,
.driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
+ .pm = &exynos_fimd_pm_ops,
.of_match_table = fimd_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 37ab8b282db6..9ca5047959ec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -55,8 +55,6 @@ struct exynos_drm_gem {
struct sg_table *sgt;
};
-struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
-
/* destroy a buffer with gem object */
void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem);
@@ -91,10 +89,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *filp);
-/* map user space allocated by malloc to pages. */
-int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
/* get buffer information to memory region allocated by gem. */
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -123,28 +117,6 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
/* set vm_flags and we can change the vm attribute to other one at here. */
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-static inline int vma_is_io(struct vm_area_struct *vma)
-{
- return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
-}
-
-/* get a copy of a virtual memory region. */
-struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
-
-/* release a userspace virtual memory area. */
-void exynos_gem_put_vma(struct vm_area_struct *vma);
-
-/* get pages from user space. */
-int exynos_gem_get_pages_from_userptr(unsigned long start,
- unsigned int npages,
- struct page **pages,
- struct vm_area_struct *vma);
-
-/* drop the reference to pages. */
-void exynos_gem_put_pages_to_userptr(struct page **pages,
- unsigned int npages,
- struct vm_area_struct *vma);
-
/* map sgt with dma region. */
int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
struct sg_table *sgt,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 11b87d2a7913..7aecd23cfa11 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -15,7 +15,8 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
-#include <plat/map-base.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
@@ -126,6 +127,7 @@ struct gsc_capability {
* @ippdrv: prepare initialization using ippdrv.
* @regs_res: register resources.
* @regs: memory mapped io registers.
+ * @sysreg: handle to SYSREG block regmap.
* @lock: locking of operations.
* @gsc_clk: gsc gate clock.
* @sc: scaler infomations.
@@ -138,6 +140,7 @@ struct gsc_context {
struct exynos_drm_ippdrv ippdrv;
struct resource *regs_res;
void __iomem *regs;
+ struct regmap *sysreg;
struct mutex lock;
struct clk *gsc_clk;
struct gsc_scaler sc;
@@ -437,9 +440,12 @@ static int gsc_sw_reset(struct gsc_context *ctx)
static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
{
- u32 gscblk_cfg;
+ unsigned int gscblk_cfg;
- gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
+ if (!ctx->sysreg)
+ return;
+
+ regmap_read(ctx->sysreg, SYSREG_GSCBLK_CFG1, &gscblk_cfg);
if (enable)
gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
@@ -448,7 +454,7 @@ static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
else
gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
- writel(gscblk_cfg, SYSREG_GSCBLK_CFG1);
+ regmap_write(ctx->sysreg, SYSREG_GSCBLK_CFG1, gscblk_cfg);
}
static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
@@ -1215,10 +1221,10 @@ static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
DRM_DEBUG_KMS("enable[%d]\n", enable);
if (enable) {
- clk_enable(ctx->gsc_clk);
+ clk_prepare_enable(ctx->gsc_clk);
ctx->suspended = false;
} else {
- clk_disable(ctx->gsc_clk);
+ clk_disable_unprepare(ctx->gsc_clk);
ctx->suspended = true;
}
@@ -1663,6 +1669,15 @@ static int gsc_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
+ if (dev->of_node) {
+ ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,sysreg");
+ if (IS_ERR(ctx->sysreg)) {
+ dev_warn(dev, "failed to get system register.\n");
+ ctx->sysreg = NULL;
+ }
+ }
+
/* clock control */
ctx->gsc_clk = devm_clk_get(dev, "gscl");
if (IS_ERR(ctx->gsc_clk)) {
@@ -1713,7 +1728,6 @@ static int gsc_probe(struct platform_device *pdev)
mutex_init(&ctx->lock);
platform_set_drvdata(pdev, ctx);
- pm_runtime_set_active(dev);
pm_runtime_enable(dev);
ret = exynos_drm_ippdrv_register(ippdrv);
@@ -1797,6 +1811,12 @@ static const struct dev_pm_ops gsc_pm_ops = {
SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
};
+static const struct of_device_id exynos_drm_gsc_of_match[] = {
+ { .compatible = "samsung,exynos5-gsc" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, exynos_drm_gsc_of_match);
+
struct platform_driver gsc_driver = {
.probe = gsc_probe,
.remove = gsc_remove,
@@ -1804,6 +1824,7 @@ struct platform_driver gsc_driver = {
.name = "exynos-drm-gsc",
.owner = THIS_MODULE,
.pm = &gsc_pm_ops,
+ .of_match_table = of_match_ptr(exynos_drm_gsc_of_match),
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 8994eab56ba8..4eaef36aec5a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -389,7 +389,7 @@ already_disabled:
mutex_unlock(&mic_mutex);
}
-struct drm_bridge_funcs mic_bridge_funcs = {
+static const struct drm_bridge_funcs mic_bridge_funcs = {
.disable = mic_disable,
.post_disable = mic_post_disable,
.pre_enable = mic_pre_enable,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 179311760bb7..e668fcdbcafc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -56,93 +56,170 @@ static int exynos_plane_get_size(int start, unsigned length, unsigned last)
return size;
}
-static void exynos_plane_mode_set(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
+
{
- struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
+ struct drm_plane_state *state = &exynos_state->base;
+ struct drm_crtc *crtc = exynos_state->base.crtc;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ int crtc_x, crtc_y;
+ unsigned int crtc_w, crtc_h;
+ unsigned int src_x, src_y;
+ unsigned int src_w, src_h;
unsigned int actual_w;
unsigned int actual_h;
+ /*
+ * The original src/dest coordinates are stored in exynos_state->base,
+ * but we want to keep another copy internal to our driver that we can
+ * clip/modify ourselves.
+ */
+
+ crtc_x = state->crtc_x;
+ crtc_y = state->crtc_y;
+ crtc_w = state->crtc_w;
+ crtc_h = state->crtc_h;
+
+ src_x = state->src_x >> 16;
+ src_y = state->src_y >> 16;
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
+
+ /* set ratio */
+ exynos_state->h_ratio = (src_w << 16) / crtc_w;
+ exynos_state->v_ratio = (src_h << 16) / crtc_h;
+
+ /* clip to visible area */
actual_w = exynos_plane_get_size(crtc_x, crtc_w, mode->hdisplay);
actual_h = exynos_plane_get_size(crtc_y, crtc_h, mode->vdisplay);
if (crtc_x < 0) {
if (actual_w)
- src_x -= crtc_x;
+ src_x += ((-crtc_x) * exynos_state->h_ratio) >> 16;
crtc_x = 0;
}
if (crtc_y < 0) {
if (actual_h)
- src_y -= crtc_y;
+ src_y += ((-crtc_y) * exynos_state->v_ratio) >> 16;
crtc_y = 0;
}
- /* set ratio */
- exynos_plane->h_ratio = (src_w << 16) / crtc_w;
- exynos_plane->v_ratio = (src_h << 16) / crtc_h;
-
/* set drm framebuffer data. */
- exynos_plane->src_x = src_x;
- exynos_plane->src_y = src_y;
- exynos_plane->src_w = (actual_w * exynos_plane->h_ratio) >> 16;
- exynos_plane->src_h = (actual_h * exynos_plane->v_ratio) >> 16;
+ exynos_state->src.x = src_x;
+ exynos_state->src.y = src_y;
+ exynos_state->src.w = (actual_w * exynos_state->h_ratio) >> 16;
+ exynos_state->src.h = (actual_h * exynos_state->v_ratio) >> 16;
/* set plane range to be displayed. */
- exynos_plane->crtc_x = crtc_x;
- exynos_plane->crtc_y = crtc_y;
- exynos_plane->crtc_w = actual_w;
- exynos_plane->crtc_h = actual_h;
+ exynos_state->crtc.x = crtc_x;
+ exynos_state->crtc.y = crtc_y;
+ exynos_state->crtc.w = actual_w;
+ exynos_state->crtc.h = actual_h;
DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)",
- exynos_plane->crtc_x, exynos_plane->crtc_y,
- exynos_plane->crtc_w, exynos_plane->crtc_h);
+ exynos_state->crtc.x, exynos_state->crtc.y,
+ exynos_state->crtc.w, exynos_state->crtc.h);
+}
+
+static void exynos_drm_plane_reset(struct drm_plane *plane)
+{
+ struct exynos_drm_plane_state *exynos_state;
+
+ if (plane->state) {
+ exynos_state = to_exynos_plane_state(plane->state);
+ if (exynos_state->base.fb)
+ drm_framebuffer_unreference(exynos_state->base.fb);
+ kfree(exynos_state);
+ plane->state = NULL;
+ }
+
+ exynos_state = kzalloc(sizeof(*exynos_state), GFP_KERNEL);
+ if (exynos_state) {
+ plane->state = &exynos_state->base;
+ plane->state->plane = plane;
+ }
+}
+
+static struct drm_plane_state *
+exynos_drm_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct exynos_drm_plane_state *exynos_state;
+ struct exynos_drm_plane_state *copy;
+
+ exynos_state = to_exynos_plane_state(plane->state);
+ copy = kzalloc(sizeof(*exynos_state), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
+ return &copy->base;
+}
- plane->crtc = crtc;
+static void exynos_drm_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct exynos_drm_plane_state *old_exynos_state =
+ to_exynos_plane_state(old_state);
+ __drm_atomic_helper_plane_destroy_state(plane, old_state);
+ kfree(old_exynos_state);
}
static struct drm_plane_funcs exynos_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .reset = exynos_drm_plane_reset,
+ .atomic_duplicate_state = exynos_drm_plane_duplicate_state,
+ .atomic_destroy_state = exynos_drm_plane_destroy_state,
};
+static int
+exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
+ struct exynos_drm_plane_state *state)
+{
+ bool width_ok = false, height_ok = false;
+
+ if (config->capabilities & EXYNOS_DRM_PLANE_CAP_SCALE)
+ return 0;
+
+ if (state->src.w == state->crtc.w)
+ width_ok = true;
+
+ if (state->src.h == state->crtc.h)
+ height_ok = true;
+
+ if ((config->capabilities & EXYNOS_DRM_PLANE_CAP_DOUBLE) &&
+ state->h_ratio == (1 << 15))
+ width_ok = true;
+
+ if ((config->capabilities & EXYNOS_DRM_PLANE_CAP_DOUBLE) &&
+ state->v_ratio == (1 << 15))
+ height_ok = true;
+
+ if (width_ok & height_ok)
+ return 0;
+
+ DRM_DEBUG_KMS("scaling mode is not supported");
+ return -ENOTSUPP;
+}
+
static int exynos_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
- int nr;
- int i;
+ struct exynos_drm_plane_state *exynos_state =
+ to_exynos_plane_state(state);
+ int ret = 0;
- if (!state->fb)
+ if (!state->crtc || !state->fb)
return 0;
- nr = drm_format_num_planes(state->fb->pixel_format);
- for (i = 0; i < nr; i++) {
- struct exynos_drm_gem *exynos_gem =
- exynos_drm_fb_gem(state->fb, i);
- if (!exynos_gem) {
- DRM_DEBUG_KMS("gem object is null\n");
- return -EFAULT;
- }
-
- exynos_plane->dma_addr[i] = exynos_gem->dma_addr +
- state->fb->offsets[i];
-
- DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
- i, (unsigned long)exynos_plane->dma_addr[i]);
- }
+ /* translate state into exynos_state */
+ exynos_plane_mode_set(exynos_state);
- return 0;
+ ret = exynos_drm_plane_check_size(exynos_plane->config, exynos_state);
+ return ret;
}
static void exynos_plane_atomic_update(struct drm_plane *plane,
@@ -155,12 +232,7 @@ static void exynos_plane_atomic_update(struct drm_plane *plane,
if (!state->crtc)
return;
- exynos_plane_mode_set(plane, state->crtc, state->fb,
- state->crtc_x, state->crtc_y,
- state->crtc_w, state->crtc_h,
- state->src_x >> 16, state->src_y >> 16,
- state->src_w >> 16, state->src_h >> 16);
-
+ plane->crtc = state->crtc;
exynos_plane->pending_fb = state->fb;
if (exynos_crtc->ops->update_plane)
@@ -177,8 +249,7 @@ static void exynos_plane_atomic_disable(struct drm_plane *plane,
return;
if (exynos_crtc->ops->disable_plane)
- exynos_crtc->ops->disable_plane(exynos_crtc,
- exynos_plane);
+ exynos_crtc->ops->disable_plane(exynos_crtc, exynos_plane);
}
static const struct drm_plane_helper_funcs plane_helper_funcs = {
@@ -207,28 +278,19 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane,
drm_object_attach_property(&plane->base, prop, zpos);
}
-enum drm_plane_type exynos_plane_get_type(unsigned int zpos,
- unsigned int cursor_win)
-{
- if (zpos == DEFAULT_WIN)
- return DRM_PLANE_TYPE_PRIMARY;
- else if (zpos == cursor_win)
- return DRM_PLANE_TYPE_CURSOR;
- else
- return DRM_PLANE_TYPE_OVERLAY;
-}
-
int exynos_plane_init(struct drm_device *dev,
struct exynos_drm_plane *exynos_plane,
- unsigned long possible_crtcs, enum drm_plane_type type,
- const uint32_t *formats, unsigned int fcount,
- unsigned int zpos)
+ unsigned long possible_crtcs,
+ const struct exynos_drm_plane_config *config)
{
int err;
- err = drm_universal_plane_init(dev, &exynos_plane->base, possible_crtcs,
- &exynos_plane_funcs, formats, fcount,
- type);
+ err = drm_universal_plane_init(dev, &exynos_plane->base,
+ possible_crtcs,
+ &exynos_plane_funcs,
+ config->pixel_formats,
+ config->num_pixel_formats,
+ config->type, NULL);
if (err) {
DRM_ERROR("failed to initialize plane\n");
return err;
@@ -236,10 +298,12 @@ int exynos_plane_init(struct drm_device *dev,
drm_plane_helper_add(&exynos_plane->base, &plane_helper_funcs);
- exynos_plane->zpos = zpos;
+ exynos_plane->zpos = config->zpos;
+ exynos_plane->config = config;
- if (type == DRM_PLANE_TYPE_OVERLAY)
- exynos_plane_attach_zpos_property(&exynos_plane->base, zpos);
+ if (config->type == DRM_PLANE_TYPE_OVERLAY)
+ exynos_plane_attach_zpos_property(&exynos_plane->base,
+ config->zpos);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index abb641e64c23..0dd096548284 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -9,10 +9,7 @@
*
*/
-enum drm_plane_type exynos_plane_get_type(unsigned int zpos,
- unsigned int cursor_win);
int exynos_plane_init(struct drm_device *dev,
struct exynos_drm_plane *exynos_plane,
- unsigned long possible_crtcs, enum drm_plane_type type,
- const uint32_t *formats, unsigned int fcount,
- unsigned int zpos);
+ unsigned long possible_crtcs,
+ const struct exynos_drm_plane_config *config);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 2f5c118f4c8e..bea0f7826d30 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -790,10 +790,10 @@ static int rotator_remove(struct platform_device *pdev)
static int rotator_clk_crtl(struct rot_context *rot, bool enable)
{
if (enable) {
- clk_enable(rot->clock);
+ clk_prepare_enable(rot->clock);
rot->suspended = false;
} else {
- clk_disable(rot->clock);
+ clk_disable_unprepare(rot->clock);
rot->suspended = true;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 669362c53f49..0be29c1b2c05 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -24,12 +24,12 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
+#include "exynos_drm_fb.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_vidi.h"
/* vidi has totally three virtual windows. */
#define WINDOWS_NR 3
-#define CURSOR_WIN 2
#define ctx_from_connector(c) container_of(c, struct vidi_context, \
connector)
@@ -89,6 +89,12 @@ static const uint32_t formats[] = {
DRM_FORMAT_NV12,
};
+static const enum drm_plane_type vidi_win_types[WINDOWS_NR] = {
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_OVERLAY,
+ DRM_PLANE_TYPE_CURSOR,
+};
+
static int vidi_enable_vblank(struct exynos_drm_crtc *crtc)
{
struct vidi_context *ctx = crtc->ctx;
@@ -125,12 +131,15 @@ static void vidi_disable_vblank(struct exynos_drm_crtc *crtc)
static void vidi_update_plane(struct exynos_drm_crtc *crtc,
struct exynos_drm_plane *plane)
{
+ struct drm_plane_state *state = plane->base.state;
struct vidi_context *ctx = crtc->ctx;
+ dma_addr_t addr;
if (ctx->suspended)
return;
- DRM_DEBUG_KMS("dma_addr = %pad\n", plane->dma_addr);
+ addr = exynos_drm_fb_dma_addr(state->fb, 0);
+ DRM_DEBUG_KMS("dma_addr = %pad\n", &addr);
if (ctx->vblank_on)
schedule_work(&ctx->work);
@@ -330,7 +339,7 @@ static void vidi_connector_destroy(struct drm_connector *connector)
{
}
-static struct drm_connector_funcs vidi_connector_funcs = {
+static const struct drm_connector_funcs vidi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = vidi_detect,
@@ -374,7 +383,7 @@ static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
return &ctx->encoder;
}
-static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
.get_modes = vidi_get_modes,
.best_encoder = vidi_best_encoder,
};
@@ -422,14 +431,14 @@ static void exynos_vidi_disable(struct drm_encoder *encoder)
{
}
-static struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = {
.mode_fixup = exynos_vidi_mode_fixup,
.mode_set = exynos_vidi_mode_set,
.enable = exynos_vidi_enable,
.disable = exynos_vidi_disable,
};
-static struct drm_encoder_funcs exynos_vidi_encoder_funcs = {
+static const struct drm_encoder_funcs exynos_vidi_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
@@ -439,17 +448,21 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm_dev = data;
struct drm_encoder *encoder = &ctx->encoder;
struct exynos_drm_plane *exynos_plane;
- enum drm_plane_type type;
- unsigned int zpos;
+ struct exynos_drm_plane_config plane_config = { 0 };
+ unsigned int i;
int pipe, ret;
vidi_ctx_initialize(ctx, drm_dev);
- for (zpos = 0; zpos < WINDOWS_NR; zpos++) {
- type = exynos_plane_get_type(zpos, CURSOR_WIN);
- ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
- 1 << ctx->pipe, type, formats,
- ARRAY_SIZE(formats), zpos);
+ plane_config.pixel_formats = formats;
+ plane_config.num_pixel_formats = ARRAY_SIZE(formats);
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ plane_config.zpos = i;
+ plane_config.type = vidi_win_types[i];
+
+ ret = exynos_plane_init(drm_dev, &ctx->planes[i],
+ 1 << ctx->pipe, &plane_config);
if (ret)
return ret;
}
@@ -473,7 +486,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 57b675563e94..21a29dbce18c 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -113,7 +113,7 @@ struct hdmi_context {
void __iomem *regs_hdmiphy;
struct i2c_client *hdmiphy_port;
struct i2c_adapter *ddc_adpt;
- struct gpio_desc *hpd_gpio;
+ struct gpio_desc *hpd_gpio;
int irq;
struct regmap *pmureg;
struct clk *hdmi;
@@ -956,7 +956,7 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
-static struct drm_connector_funcs hdmi_connector_funcs = {
+static const struct drm_connector_funcs hdmi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = hdmi_detect,
@@ -1030,7 +1030,7 @@ static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector)
return &hdata->encoder;
}
-static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
.get_modes = hdmi_get_modes,
.mode_valid = hdmi_mode_valid,
.best_encoder = hdmi_best_encoder,
@@ -1588,8 +1588,6 @@ static void hdmi_enable(struct drm_encoder *encoder)
if (hdata->powered)
return;
- hdata->powered = true;
-
pm_runtime_get_sync(hdata->dev);
if (regulator_bulk_enable(ARRAY_SIZE(supply), hdata->regul_bulk))
@@ -1599,10 +1597,9 @@ static void hdmi_enable(struct drm_encoder *encoder)
regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
PMU_HDMI_PHY_ENABLE_BIT, 1);
- clk_prepare_enable(hdata->hdmi);
- clk_prepare_enable(hdata->sclk_hdmi);
-
hdmi_conf_apply(hdata);
+
+ hdata->powered = true;
}
static void hdmi_disable(struct drm_encoder *encoder)
@@ -1633,9 +1630,6 @@ static void hdmi_disable(struct drm_encoder *encoder)
cancel_delayed_work(&hdata->hotplug_work);
- clk_disable_unprepare(hdata->sclk_hdmi);
- clk_disable_unprepare(hdata->hdmi);
-
/* reset pmu hdmiphy control bit to disable hdmiphy */
regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
PMU_HDMI_PHY_ENABLE_BIT, 0);
@@ -1647,14 +1641,14 @@ static void hdmi_disable(struct drm_encoder *encoder)
hdata->powered = false;
}
-static struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
.mode_fixup = hdmi_mode_fixup,
.mode_set = hdmi_mode_set,
.enable = hdmi_enable,
.disable = hdmi_disable,
};
-static struct drm_encoder_funcs exynos_hdmi_encoder_funcs = {
+static const struct drm_encoder_funcs exynos_hdmi_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
@@ -1793,7 +1787,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs);
@@ -1978,12 +1972,49 @@ static int hdmi_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int exynos_hdmi_suspend(struct device *dev)
+{
+ struct hdmi_context *hdata = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(hdata->sclk_hdmi);
+ clk_disable_unprepare(hdata->hdmi);
+
+ return 0;
+}
+
+static int exynos_hdmi_resume(struct device *dev)
+{
+ struct hdmi_context *hdata = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(hdata->hdmi);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
+ return ret;
+ }
+ ret = clk_prepare_enable(hdata->sclk_hdmi);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the sclk_mixer clk [%d]\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops exynos_hdmi_pm_ops = {
+ SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL)
+};
+
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
.remove = hdmi_remove,
.driver = {
.name = "exynos-hdmi",
.owner = THIS_MODULE,
+ .pm = &exynos_hdmi_pm_ops,
.of_match_table = hdmi_match_types,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index d09f8f9a8939..dfb35e2da4db 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -37,12 +37,12 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
+#include "exynos_drm_fb.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_iommu.h"
#define MIXER_WIN_NR 3
#define VP_DEFAULT_WIN 2
-#define CURSOR_WIN 1
/* The pixelformats that are natively supported by the mixer. */
#define MXR_FORMAT_RGB565 4
@@ -111,6 +111,28 @@ struct mixer_drv_data {
bool has_sclk;
};
+static const struct exynos_drm_plane_config plane_configs[MIXER_WIN_NR] = {
+ {
+ .zpos = 0,
+ .type = DRM_PLANE_TYPE_PRIMARY,
+ .pixel_formats = mixer_formats,
+ .num_pixel_formats = ARRAY_SIZE(mixer_formats),
+ .capabilities = EXYNOS_DRM_PLANE_CAP_DOUBLE,
+ }, {
+ .zpos = 1,
+ .type = DRM_PLANE_TYPE_CURSOR,
+ .pixel_formats = mixer_formats,
+ .num_pixel_formats = ARRAY_SIZE(mixer_formats),
+ .capabilities = EXYNOS_DRM_PLANE_CAP_DOUBLE,
+ }, {
+ .zpos = 2,
+ .type = DRM_PLANE_TYPE_OVERLAY,
+ .pixel_formats = vp_formats,
+ .num_pixel_formats = ARRAY_SIZE(vp_formats),
+ .capabilities = EXYNOS_DRM_PLANE_CAP_SCALE,
+ },
+};
+
static const u8 filter_y_horiz_tap8[] = {
0, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, 0, 0, 0,
@@ -399,10 +421,11 @@ static void mixer_stop(struct mixer_context *ctx)
static void vp_video_buffer(struct mixer_context *ctx,
struct exynos_drm_plane *plane)
{
+ struct exynos_drm_plane_state *state =
+ to_exynos_plane_state(plane->base.state);
+ struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode;
struct mixer_resources *res = &ctx->mixer_res;
- struct drm_plane_state *state = plane->base.state;
- struct drm_framebuffer *fb = state->fb;
- struct drm_display_mode *mode = &state->crtc->mode;
+ struct drm_framebuffer *fb = state->base.fb;
unsigned long flags;
dma_addr_t luma_addr[2], chroma_addr[2];
bool tiled_mode = false;
@@ -422,8 +445,8 @@ static void vp_video_buffer(struct mixer_context *ctx,
return;
}
- luma_addr[0] = plane->dma_addr[0];
- chroma_addr[0] = plane->dma_addr[1];
+ luma_addr[0] = exynos_drm_fb_dma_addr(fb, 0);
+ chroma_addr[0] = exynos_drm_fb_dma_addr(fb, 1);
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
ctx->interlace = true;
@@ -459,24 +482,24 @@ static void vp_video_buffer(struct mixer_context *ctx,
vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
VP_IMG_VSIZE(fb->height / 2));
- vp_reg_write(res, VP_SRC_WIDTH, plane->src_w);
- vp_reg_write(res, VP_SRC_HEIGHT, plane->src_h);
+ vp_reg_write(res, VP_SRC_WIDTH, state->src.w);
+ vp_reg_write(res, VP_SRC_HEIGHT, state->src.h);
vp_reg_write(res, VP_SRC_H_POSITION,
- VP_SRC_H_POSITION_VAL(plane->src_x));
- vp_reg_write(res, VP_SRC_V_POSITION, plane->src_y);
+ VP_SRC_H_POSITION_VAL(state->src.x));
+ vp_reg_write(res, VP_SRC_V_POSITION, state->src.y);
- vp_reg_write(res, VP_DST_WIDTH, plane->crtc_w);
- vp_reg_write(res, VP_DST_H_POSITION, plane->crtc_x);
+ vp_reg_write(res, VP_DST_WIDTH, state->crtc.w);
+ vp_reg_write(res, VP_DST_H_POSITION, state->crtc.x);
if (ctx->interlace) {
- vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h / 2);
- vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y / 2);
+ vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h / 2);
+ vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y / 2);
} else {
- vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h);
- vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y);
+ vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h);
+ vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y);
}
- vp_reg_write(res, VP_H_RATIO, plane->h_ratio);
- vp_reg_write(res, VP_V_RATIO, plane->v_ratio);
+ vp_reg_write(res, VP_H_RATIO, state->h_ratio);
+ vp_reg_write(res, VP_V_RATIO, state->v_ratio);
vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
@@ -505,37 +528,14 @@ static void mixer_layer_update(struct mixer_context *ctx)
mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
}
-static int mixer_setup_scale(const struct exynos_drm_plane *plane,
- unsigned int *x_ratio, unsigned int *y_ratio)
-{
- if (plane->crtc_w != plane->src_w) {
- if (plane->crtc_w == 2 * plane->src_w)
- *x_ratio = 1;
- else
- goto fail;
- }
-
- if (plane->crtc_h != plane->src_h) {
- if (plane->crtc_h == 2 * plane->src_h)
- *y_ratio = 1;
- else
- goto fail;
- }
-
- return 0;
-
-fail:
- DRM_DEBUG_KMS("only 2x width/height scaling of plane supported\n");
- return -ENOTSUPP;
-}
-
static void mixer_graph_buffer(struct mixer_context *ctx,
struct exynos_drm_plane *plane)
{
+ struct exynos_drm_plane_state *state =
+ to_exynos_plane_state(plane->base.state);
+ struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode;
struct mixer_resources *res = &ctx->mixer_res;
- struct drm_plane_state *state = plane->base.state;
- struct drm_framebuffer *fb = state->fb;
- struct drm_display_mode *mode = &state->crtc->mode;
+ struct drm_framebuffer *fb = state->base.fb;
unsigned long flags;
unsigned int win = plane->zpos;
unsigned int x_ratio = 0, y_ratio = 0;
@@ -567,17 +567,17 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
return;
}
- /* check if mixer supports requested scaling setup */
- if (mixer_setup_scale(plane, &x_ratio, &y_ratio))
- return;
+ /* ratio is already checked by common plane code */
+ x_ratio = state->h_ratio == (1 << 15);
+ y_ratio = state->v_ratio == (1 << 15);
- dst_x_offset = plane->crtc_x;
- dst_y_offset = plane->crtc_y;
+ dst_x_offset = state->crtc.x;
+ dst_y_offset = state->crtc.y;
/* converting dma address base and source offset */
- dma_addr = plane->dma_addr[0]
- + (plane->src_x * fb->bits_per_pixel >> 3)
- + (plane->src_y * fb->pitches[0]);
+ dma_addr = exynos_drm_fb_dma_addr(fb, 0)
+ + (state->src.x * fb->bits_per_pixel >> 3)
+ + (state->src.y * fb->pitches[0]);
src_x_offset = 0;
src_y_offset = 0;
@@ -605,8 +605,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
mixer_reg_write(res, MXR_RESOLUTION, val);
}
- val = MXR_GRP_WH_WIDTH(plane->src_w);
- val |= MXR_GRP_WH_HEIGHT(plane->src_h);
+ val = MXR_GRP_WH_WIDTH(state->src.w);
+ val |= MXR_GRP_WH_HEIGHT(state->src.h);
val |= MXR_GRP_WH_H_SCALE(x_ratio);
val |= MXR_GRP_WH_V_SCALE(y_ratio);
mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
@@ -1020,43 +1020,12 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
{
struct mixer_context *ctx = crtc->ctx;
struct mixer_resources *res = &ctx->mixer_res;
- int ret;
if (test_bit(MXR_BIT_POWERED, &ctx->flags))
return;
pm_runtime_get_sync(ctx->dev);
- ret = clk_prepare_enable(res->mixer);
- if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the mixer clk [%d]\n", ret);
- return;
- }
- ret = clk_prepare_enable(res->hdmi);
- if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
- return;
- }
- if (ctx->vp_enabled) {
- ret = clk_prepare_enable(res->vp);
- if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n",
- ret);
- return;
- }
- if (ctx->has_sclk) {
- ret = clk_prepare_enable(res->sclk_mixer);
- if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the " \
- "sclk_mixer clk [%d]\n",
- ret);
- return;
- }
- }
- }
-
- set_bit(MXR_BIT_POWERED, &ctx->flags);
-
mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
if (test_bit(MXR_BIT_VSYNC, &ctx->flags)) {
@@ -1064,12 +1033,13 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
}
mixer_win_reset(ctx);
+
+ set_bit(MXR_BIT_POWERED, &ctx->flags);
}
static void mixer_disable(struct exynos_drm_crtc *crtc)
{
struct mixer_context *ctx = crtc->ctx;
- struct mixer_resources *res = &ctx->mixer_res;
int i;
if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
@@ -1081,17 +1051,9 @@ static void mixer_disable(struct exynos_drm_crtc *crtc)
for (i = 0; i < MIXER_WIN_NR; i++)
mixer_disable_plane(crtc, &ctx->planes[i]);
- clear_bit(MXR_BIT_POWERED, &ctx->flags);
+ pm_runtime_put(ctx->dev);
- clk_disable_unprepare(res->hdmi);
- clk_disable_unprepare(res->mixer);
- if (ctx->vp_enabled) {
- clk_disable_unprepare(res->vp);
- if (ctx->has_sclk)
- clk_disable_unprepare(res->sclk_mixer);
- }
-
- pm_runtime_put_sync(ctx->dev);
+ clear_bit(MXR_BIT_POWERED, &ctx->flags);
}
/* Only valid for Mixer version 16.0.33.0 */
@@ -1187,30 +1149,19 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
struct mixer_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_plane *exynos_plane;
- unsigned int zpos;
+ unsigned int i;
int ret;
ret = mixer_initialize(ctx, drm_dev);
if (ret)
return ret;
- for (zpos = 0; zpos < MIXER_WIN_NR; zpos++) {
- enum drm_plane_type type;
- const uint32_t *formats;
- unsigned int fcount;
-
- if (zpos < VP_DEFAULT_WIN) {
- formats = mixer_formats;
- fcount = ARRAY_SIZE(mixer_formats);
- } else {
- formats = vp_formats;
- fcount = ARRAY_SIZE(vp_formats);
- }
+ for (i = 0; i < MIXER_WIN_NR; i++) {
+ if (i == VP_DEFAULT_WIN && !ctx->vp_enabled)
+ continue;
- type = exynos_plane_get_type(zpos, CURSOR_WIN);
- ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
- 1 << ctx->pipe, type, formats, fcount,
- zpos);
+ ret = exynos_plane_init(drm_dev, &ctx->planes[i],
+ 1 << ctx->pipe, &plane_configs[i]);
if (ret)
return ret;
}
@@ -1293,10 +1244,70 @@ static int mixer_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int exynos_mixer_suspend(struct device *dev)
+{
+ struct mixer_context *ctx = dev_get_drvdata(dev);
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ clk_disable_unprepare(res->hdmi);
+ clk_disable_unprepare(res->mixer);
+ if (ctx->vp_enabled) {
+ clk_disable_unprepare(res->vp);
+ if (ctx->has_sclk)
+ clk_disable_unprepare(res->sclk_mixer);
+ }
+
+ return 0;
+}
+
+static int exynos_mixer_resume(struct device *dev)
+{
+ struct mixer_context *ctx = dev_get_drvdata(dev);
+ struct mixer_resources *res = &ctx->mixer_res;
+ int ret;
+
+ ret = clk_prepare_enable(res->mixer);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the mixer clk [%d]\n", ret);
+ return ret;
+ }
+ ret = clk_prepare_enable(res->hdmi);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
+ return ret;
+ }
+ if (ctx->vp_enabled) {
+ ret = clk_prepare_enable(res->vp);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n",
+ ret);
+ return ret;
+ }
+ if (ctx->has_sclk) {
+ ret = clk_prepare_enable(res->sclk_mixer);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the " \
+ "sclk_mixer clk [%d]\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops exynos_mixer_pm_ops = {
+ SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL)
+};
+
struct platform_driver mixer_driver = {
.driver = {
.name = "exynos-mixer",
.owner = THIS_MODULE,
+ .pm = &exynos_mixer_pm_ops,
.of_match_table = mixer_match_types,
},
.probe = mixer_probe,
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
index 9ad592707aaf..4704a993cbb7 100644
--- a/drivers/gpu/drm/exynos/regs-gsc.h
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -273,12 +273,12 @@
#define GSC_CLK_GATE_MODE_SNOOP_CNT(x) ((x) << 0)
/* SYSCON. GSCBLK_CFG */
-#define SYSREG_GSCBLK_CFG1 (S3C_VA_SYS + 0x0224)
+#define SYSREG_GSCBLK_CFG1 0x0224
#define GSC_BLK_DISP1WB_DEST(x) (x << 10)
#define GSC_BLK_SW_RESET_WB_DEST(x) (1 << (18 + x))
#define GSC_BLK_PXLASYNC_LO_MASK_WB(x) (0 << (14 + x))
#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x) (1 << (2 * x))
-#define SYSREG_GSCBLK_CFG2 (S3C_VA_SYS + 0x2000)
+#define SYSREG_GSCBLK_CFG2 0x2000
#define PXLASYNC_LO_MASK_CAMIF_GSCL(x) (1 << (x))
#endif /* EXYNOS_REGS_GSC_H_ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 82a3d311e164..d8ab8f0af10c 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -175,7 +175,7 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm);
ret = drm_crtc_init_with_planes(fsl_dev->drm, crtc, primary, NULL,
- &fsl_dcu_drm_crtc_funcs);
+ &fsl_dcu_drm_crtc_funcs, NULL);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 1930234ba5f1..fca97d3fc846 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -363,7 +363,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
fsl_dev->np = dev->of_node;
drm->dev_private = fsl_dev;
dev_set_drvdata(dev, fsl_dev);
- drm_dev_set_unique(drm, dev_name(dev));
ret = drm_dev_register(drm, 0);
if (ret < 0)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 51daaea40b4d..4b13cf919575 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -249,7 +249,7 @@ struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
&fsl_dcu_drm_plane_funcs,
fsl_dcu_drm_plane_formats,
ARRAY_SIZE(fsl_dcu_drm_plane_formats),
- DRM_PLANE_TYPE_PRIMARY);
+ DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
kfree(primary);
primary = NULL;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index fe8ab5da04fb..8780deba5e8a 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -57,7 +57,7 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
encoder->possible_crtcs = 1;
ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_LVDS, NULL);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 3531f90e53d0..8745971a7680 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -619,6 +619,8 @@ const struct psb_ops cdv_chip_ops = {
.init_pm = cdv_init_pm,
.save_regs = cdv_save_display_registers,
.restore_regs = cdv_restore_display_registers,
+ .save_crtc = gma_crtc_save,
+ .restore_crtc = gma_crtc_restore,
.power_down = cdv_power_down,
.power_up = cdv_power_up,
.update_wm = cdv_update_wm,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 248c33a35ebf..d0717a85c7ec 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -273,7 +273,7 @@ void cdv_intel_crt_init(struct drm_device *dev,
encoder = &gma_encoder->base;
drm_encoder_init(dev, encoder,
- &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC);
+ &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC, NULL);
gma_connector_attach_encoder(gma_connector, gma_encoder);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 7d47b3d5cc0d..6126546295e9 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -983,8 +983,6 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
};
const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
- .save = gma_crtc_save,
- .restore = gma_crtc_restore,
.cursor_set = gma_crtc_cursor_set,
.cursor_move = gma_crtc_cursor_move,
.gamma_set = gma_crtc_gamma_set,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 17cea400ae32..7bb1f1aff932 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -2020,7 +2020,8 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
encoder = &gma_encoder->base;
drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
- drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
gma_connector_attach_encoder(gma_connector, gma_encoder);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 6b1d3340ba14..ddf2d7700759 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -270,8 +270,6 @@ static const struct drm_connector_helper_funcs
static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = cdv_hdmi_save,
- .restore = cdv_hdmi_restore,
.detect = cdv_hdmi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = cdv_hdmi_set_property,
@@ -306,13 +304,16 @@ void cdv_hdmi_init(struct drm_device *dev,
connector = &gma_connector->base;
connector->polled = DRM_CONNECTOR_POLL_HPD;
+ gma_connector->save = cdv_hdmi_save;
+ gma_connector->restore = cdv_hdmi_restore;
+
encoder = &gma_encoder->base;
drm_connector_init(dev, connector,
&cdv_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_DVID);
drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 211069b2b951..813ef23a8054 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -530,8 +530,6 @@ static const struct drm_connector_helper_funcs
static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = cdv_intel_lvds_save,
- .restore = cdv_intel_lvds_restore,
.detect = cdv_intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = cdv_intel_lvds_set_property,
@@ -643,6 +641,8 @@ void cdv_intel_lvds_init(struct drm_device *dev,
gma_encoder->dev_priv = lvds_priv;
connector = &gma_connector->base;
+ gma_connector->save = cdv_intel_lvds_save;
+ gma_connector->restore = cdv_intel_lvds_restore;
encoder = &gma_encoder->base;
@@ -652,7 +652,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
drm_encoder_init(dev, encoder,
&cdv_intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_LVDS, NULL);
gma_connector_attach_encoder(gma_connector, gma_encoder);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index dc0508dca1d4..ee95c03a8c54 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -406,8 +406,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
memset(dev_priv->vram_addr + backing->offset, 0, size);
- mutex_lock(&dev->struct_mutex);
-
info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
@@ -463,17 +461,15 @@ static int psbfb_create(struct psb_fbdev *fbdev,
dev_dbg(dev->dev, "allocated %dx%d fb\n",
psbfb->base.width, psbfb->base.height);
- mutex_unlock(&dev->struct_mutex);
return 0;
out_unref:
if (backing->stolen)
psb_gtt_free_range(dev, backing);
else
- drm_gem_object_unreference(&backing->gem);
+ drm_gem_object_unreference_unlocked(&backing->gem);
drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
out_err1:
- mutex_unlock(&dev->struct_mutex);
psb_gtt_free_range(dev, backing);
return ret;
}
@@ -569,7 +565,7 @@ static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
drm_framebuffer_cleanup(&psbfb->base);
if (psbfb->gtt)
- drm_gem_object_unreference(&psbfb->gtt->gem);
+ drm_gem_object_unreference_unlocked(&psbfb->gtt->gem);
return 0;
}
@@ -784,12 +780,8 @@ void psb_modeset_cleanup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
if (dev_priv->modeset) {
- mutex_lock(&dev->struct_mutex);
-
drm_kms_helper_poll_fini(dev);
psb_fbdev_fini(dev);
drm_mode_config_cleanup(dev);
-
- mutex_unlock(&dev->struct_mutex);
}
}
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index c707fa6fca85..506224b3a0ad 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -62,15 +62,10 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
int ret = 0;
struct drm_gem_object *obj;
- mutex_lock(&dev->struct_mutex);
-
/* GEM does all our handle to object mapping */
obj = drm_gem_object_lookup(dev, file, handle);
- if (obj == NULL) {
- ret = -ENOENT;
- goto unlock;
- }
- /* What validation is needed here ? */
+ if (obj == NULL)
+ return -ENOENT;
/* Make it mmapable */
ret = drm_gem_create_mmap_offset(obj);
@@ -78,9 +73,7 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
goto out;
*offset = drm_vma_node_offset_addr(&obj->vma_node);
out:
- drm_gem_object_unreference(obj);
-unlock:
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
return ret;
}
@@ -130,7 +123,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
return ret;
}
/* We have the initial and handle reference but need only one now */
- drm_gem_object_unreference(&r->gem);
+ drm_gem_object_unreference_unlocked(&r->gem);
*handlep = handle;
return 0;
}
@@ -189,7 +182,7 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Make sure we don't parallel update on a fault, nor move or remove
something from beneath our feet */
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->mmap_mutex);
/* For now the mmap pins the object and it stays pinned. As things
stand that will do us no harm */
@@ -215,7 +208,7 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
fail:
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->mmap_mutex);
switch (ret) {
case 0:
case -ERESTARTSYS:
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 001b450b27b3..ff17af4cfc64 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -349,8 +349,6 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
/* If we didn't get a handle then turn the cursor off */
if (!handle) {
temp = CURSOR_MODE_DISABLE;
- mutex_lock(&dev->struct_mutex);
-
if (gma_power_begin(dev, false)) {
REG_WRITE(control, temp);
REG_WRITE(base, 0);
@@ -362,11 +360,9 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
gt = container_of(gma_crtc->cursor_obj,
struct gtt_range, gem);
psb_gtt_unpin(gt);
- drm_gem_object_unreference(gma_crtc->cursor_obj);
+ drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
gma_crtc->cursor_obj = NULL;
}
-
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -376,7 +372,6 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
return -EINVAL;
}
- mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file_priv, handle);
if (!obj) {
ret = -ENOENT;
@@ -441,17 +436,15 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
if (gma_crtc->cursor_obj) {
gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
psb_gtt_unpin(gt);
- drm_gem_object_unreference(gma_crtc->cursor_obj);
+ drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
}
gma_crtc->cursor_obj = obj;
unlock:
- mutex_unlock(&dev->struct_mutex);
return ret;
unref_cursor:
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
return ret;
}
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index ce015db59dc6..8f69225ce2b4 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -425,6 +425,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
if (!resume) {
mutex_init(&dev_priv->gtt_mutex);
+ mutex_init(&dev_priv->mmap_mutex);
psb_gtt_alloc(dev);
}
diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c
index 265ad0de44a6..e2ab858122f9 100644
--- a/drivers/gpu/drm/gma500/mdfld_device.c
+++ b/drivers/gpu/drm/gma500/mdfld_device.c
@@ -546,6 +546,8 @@ const struct psb_ops mdfld_chip_ops = {
.save_regs = mdfld_save_registers,
.restore_regs = mdfld_restore_registers,
+ .save_crtc = gma_crtc_save,
+ .restore_crtc = gma_crtc_restore,
.power_down = mdfld_power_down,
.power_up = mdfld_power_up,
};
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index d4813e03f5ee..7cd87a0c2385 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -821,14 +821,18 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = dsi_config->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
-
u32 pipeconf_reg = PIPEACONF;
u32 dspcntr_reg = DSPACNTR;
+ u32 pipeconf, dspcntr;
- u32 pipeconf = dev_priv->pipeconf[pipe];
- u32 dspcntr = dev_priv->dspcntr[pipe];
u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
+ if (WARN_ON(pipe < 0))
+ return;
+
+ pipeconf = dev_priv->pipeconf[pipe];
+ dspcntr = dev_priv->dspcntr[pipe];
+
if (pipe) {
pipeconf_reg = PIPECCONF;
dspcntr_reg = DSPCCNTR;
@@ -994,7 +998,7 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
drm_encoder_init(dev,
encoder,
p_funcs->encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_LVDS, NULL);
drm_encoder_helper_add(encoder,
p_funcs->encoder_helper_funcs);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 89f705c3a5eb..d758f4cc6805 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -405,8 +405,6 @@ static struct drm_encoder *mdfld_dsi_connector_best_encoder(
/*DSI connector funcs*/
static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
.dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms,
- .save = mdfld_dsi_connector_save,
- .restore = mdfld_dsi_connector_restore,
.detect = mdfld_dsi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = mdfld_dsi_connector_set_property,
@@ -563,6 +561,9 @@ void mdfld_dsi_output_init(struct drm_device *dev,
connector = &dsi_connector->base.base;
+ dsi_connector->base.save = mdfld_dsi_connector_save;
+ dsi_connector->base.restore = mdfld_dsi_connector_restore;
+
drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs);
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 368a03ae3010..ba30b43a3412 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -568,6 +568,8 @@ const struct psb_ops oaktrail_chip_ops = {
.save_regs = oaktrail_save_display_registers,
.restore_regs = oaktrail_restore_display_registers,
+ .save_crtc = gma_crtc_save,
+ .restore_crtc = gma_crtc_restore,
.power_down = oaktrail_power_down,
.power_up = oaktrail_power_up,
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 2310d879cdc2..2d18499d6060 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -654,7 +654,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
drm_encoder_init(dev, encoder,
&oaktrail_hdmi_enc_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
gma_connector_attach_encoder(gma_connector, gma_encoder);
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 83bbc271bcfb..f7038f12ac76 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -323,7 +323,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
DRM_MODE_CONNECTOR_LVDS);
drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_LVDS, NULL);
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_LVDS;
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 07df7d4eea72..dc0f8527570c 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -181,7 +181,7 @@ static int psb_save_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
- struct drm_connector *connector;
+ struct gma_connector *connector;
struct psb_state *regs = &dev_priv->regs.psb;
/* Display arbitration control + watermarks */
@@ -198,12 +198,12 @@ static int psb_save_display_registers(struct drm_device *dev)
drm_modeset_lock_all(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (drm_helper_crtc_in_use(crtc))
- crtc->funcs->save(crtc);
+ dev_priv->ops->save_crtc(crtc);
}
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- if (connector->funcs->save)
- connector->funcs->save(connector);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
+ if (connector->save)
+ connector->save(&connector->base);
drm_modeset_unlock_all(dev);
return 0;
@@ -219,7 +219,7 @@ static int psb_restore_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
- struct drm_connector *connector;
+ struct gma_connector *connector;
struct psb_state *regs = &dev_priv->regs.psb;
/* Display arbitration + watermarks */
@@ -238,11 +238,11 @@ static int psb_restore_display_registers(struct drm_device *dev)
drm_modeset_lock_all(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
if (drm_helper_crtc_in_use(crtc))
- crtc->funcs->restore(crtc);
+ dev_priv->ops->restore_crtc(crtc);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- if (connector->funcs->restore)
- connector->funcs->restore(connector);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
+ if (connector->restore)
+ connector->restore(&connector->base);
drm_modeset_unlock_all(dev);
return 0;
@@ -354,6 +354,8 @@ const struct psb_ops psb_chip_ops = {
.init_pm = psb_init_pm,
.save_regs = psb_save_display_registers,
.restore_regs = psb_restore_display_registers,
+ .save_crtc = gma_crtc_save,
+ .restore_crtc = gma_crtc_restore,
.power_down = psb_power_down,
.power_up = psb_power_up,
};
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index e21726ecac32..b74372760d7f 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -465,6 +465,8 @@ struct drm_psb_private {
struct mutex gtt_mutex;
struct resource *gtt_mem; /* Our PCI resource */
+ struct mutex mmap_mutex;
+
struct psb_mmu_driver *mmu;
struct psb_mmu_pd *pf_pd;
@@ -651,6 +653,8 @@ struct psb_ops {
void (*init_pm)(struct drm_device *dev);
int (*save_regs)(struct drm_device *dev);
int (*restore_regs)(struct drm_device *dev);
+ void (*save_crtc)(struct drm_crtc *crtc);
+ void (*restore_crtc)(struct drm_crtc *crtc);
int (*power_up)(struct drm_device *dev);
int (*power_down)(struct drm_device *dev);
void (*update_wm)(struct drm_device *dev, struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 6659da88fe5b..dcdbc37e55e1 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -439,8 +439,6 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
};
const struct drm_crtc_funcs psb_intel_crtc_funcs = {
- .save = gma_crtc_save,
- .restore = gma_crtc_restore,
.cursor_set = gma_crtc_cursor_set,
.cursor_move = gma_crtc_cursor_move,
.gamma_set = gma_crtc_gamma_set,
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 860dd2177ca1..2a3b7c684db2 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -140,6 +140,9 @@ struct gma_encoder {
struct gma_connector {
struct drm_connector base;
struct gma_encoder *encoder;
+
+ void (*save)(struct drm_connector *connector);
+ void (*restore)(struct drm_connector *connector);
};
struct psb_intel_crtc_state {
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index ce0645d0c1e5..b1b93317d054 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -653,8 +653,6 @@ const struct drm_connector_helper_funcs
const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = psb_intel_lvds_save,
- .restore = psb_intel_lvds_restore,
.detect = psb_intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = psb_intel_lvds_set_property,
@@ -715,6 +713,9 @@ void psb_intel_lvds_init(struct drm_device *dev,
gma_encoder->dev_priv = lvds_priv;
connector = &gma_connector->base;
+ gma_connector->save = psb_intel_lvds_save;
+ gma_connector->restore = psb_intel_lvds_restore;
+
encoder = &gma_encoder->base;
drm_connector_init(dev, connector,
&psb_intel_lvds_connector_funcs,
@@ -722,7 +723,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
drm_encoder_init(dev, encoder,
&psb_intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_LVDS, NULL);
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_LVDS;
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 58529cea575d..e787d376ba67 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1837,8 +1837,6 @@ static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = psb_intel_sdvo_save,
- .restore = psb_intel_sdvo_restore,
.detect = psb_intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = psb_intel_sdvo_set_property,
@@ -2021,6 +2019,9 @@ psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
connector->base.base.doublescan_allowed = 0;
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->base.save = psb_intel_sdvo_save;
+ connector->base.restore = psb_intel_sdvo_restore;
+
gma_connector_attach_encoder(&connector->base, &encoder->base);
drm_connector_register(&connector->base.base);
}
@@ -2525,7 +2526,8 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
/* encoder type will be decided later */
gma_encoder = &psb_intel_sdvo->base;
gma_encoder->type = INTEL_OUTPUT_SDVO;
- drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
+ drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs,
+ 0, NULL);
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 00416f23b5cb..533d1e3d4a99 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -752,7 +752,7 @@ static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
adv7511->f_tmds = mode->clock;
}
-static struct drm_encoder_slave_funcs adv7511_encoder_funcs = {
+static const struct drm_encoder_slave_funcs adv7511_encoder_funcs = {
.dpms = adv7511_encoder_dpms,
.mode_valid = adv7511_encoder_mode_valid,
.mode_set = adv7511_encoder_mode_set,
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index d9a72c96e56c..90db5f4dcce5 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -371,7 +371,7 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
return 0;
}
-static struct drm_encoder_slave_funcs ch7006_encoder_funcs = {
+static const struct drm_encoder_slave_funcs ch7006_encoder_funcs = {
.set_config = ch7006_encoder_set_config,
.destroy = ch7006_encoder_destroy,
.dpms = ch7006_encoder_dpms,
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index 002ce7874332..c400428f6c8c 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -341,7 +341,7 @@ sil164_encoder_destroy(struct drm_encoder *encoder)
drm_i2c_encoder_destroy(encoder);
}
-static struct drm_encoder_slave_funcs sil164_encoder_funcs = {
+static const struct drm_encoder_slave_funcs sil164_encoder_funcs = {
.set_config = sil164_encoder_set_config,
.destroy = sil164_encoder_destroy,
.dpms = sil164_encoder_dpms,
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 896b6aaf8c4d..012d36d9a75b 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -22,6 +22,7 @@
#include <sound/asoundef.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
@@ -855,18 +856,6 @@ static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
priv->dpms = mode;
}
-static void
-tda998x_encoder_save(struct drm_encoder *encoder)
-{
- DBG("");
-}
-
-static void
-tda998x_encoder_restore(struct drm_encoder *encoder)
-{
- DBG("");
-}
-
static bool
tda998x_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
@@ -878,7 +867,10 @@ tda998x_encoder_mode_fixup(struct drm_encoder *encoder,
static int tda998x_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- if (mode->clock > 150000)
+ /* TDA19988 dotclock can go up to 165MHz */
+ struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
+
+ if (mode->clock > ((priv->rev == TDA19988) ? 165000 : 150000))
return MODE_CLOCK_HIGH;
if (mode->htotal >= BIT(13))
return MODE_BAD_HVALUE;
@@ -1351,8 +1343,6 @@ static void tda998x_encoder_commit(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = {
.dpms = tda998x_encoder_dpms,
- .save = tda998x_encoder_save,
- .restore = tda998x_encoder_restore,
.mode_fixup = tda998x_encoder_mode_fixup,
.prepare = tda998x_encoder_prepare,
.commit = tda998x_encoder_commit,
@@ -1393,10 +1383,13 @@ static void tda998x_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs tda998x_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = tda998x_connector_detect,
.destroy = tda998x_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int tda998x_bind(struct device *dev, struct device *master, void *data)
@@ -1437,7 +1430,7 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs);
ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
if (ret)
goto err_encoder;
@@ -1472,6 +1465,7 @@ static void tda998x_unbind(struct device *dev, struct device *master,
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
+ drm_connector_unregister(&priv->connector);
drm_connector_cleanup(&priv->connector);
drm_encoder_cleanup(&priv->encoder);
tda998x_destroy(priv);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 411a9c68b4ee..a8721fccd8a0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1639,7 +1639,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->fbc.lock);
- if (intel_fbc_enabled(dev_priv))
+ if (intel_fbc_is_active(dev_priv))
seq_puts(m, "FBC enabled\n");
else
seq_printf(m, "FBC disabled: %s\n",
@@ -1869,33 +1869,29 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct intel_fbdev *ifbdev = NULL;
- struct intel_framebuffer *fb;
+ struct intel_framebuffer *fbdev_fb = NULL;
struct drm_framebuffer *drm_fb;
#ifdef CONFIG_DRM_FBDEV_EMULATION
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- ifbdev = dev_priv->fbdev;
- if (ifbdev) {
- fb = to_intel_framebuffer(ifbdev->helper.fb);
-
- seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
- fb->base.width,
- fb->base.height,
- fb->base.depth,
- fb->base.bits_per_pixel,
- fb->base.modifier[0],
- atomic_read(&fb->base.refcount.refcount));
- describe_obj(m, fb->obj);
- seq_putc(m, '\n');
- }
+ if (to_i915(dev)->fbdev) {
+ fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
+
+ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+ fbdev_fb->base.width,
+ fbdev_fb->base.height,
+ fbdev_fb->base.depth,
+ fbdev_fb->base.bits_per_pixel,
+ fbdev_fb->base.modifier[0],
+ atomic_read(&fbdev_fb->base.refcount.refcount));
+ describe_obj(m, fbdev_fb->obj);
+ seq_putc(m, '\n');
+ }
#endif
mutex_lock(&dev->mode_config.fb_lock);
drm_for_each_fb(drm_fb, dev) {
- fb = to_intel_framebuffer(drm_fb);
- if (ifbdev && &fb->base == ifbdev->helper.fb)
+ struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
+ if (fb == fbdev_fb)
continue;
seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
@@ -2473,15 +2469,15 @@ static int i915_guc_info(struct seq_file *m, void *data)
if (!HAS_GUC_SCHED(dev_priv->dev))
return 0;
+ if (mutex_lock_interruptible(&dev->struct_mutex))
+ return 0;
+
/* Take a local copy of the GuC data, so we can dump it at leisure */
- spin_lock(&dev_priv->guc.host2guc_lock);
guc = dev_priv->guc;
- if (guc.execbuf_client) {
- spin_lock(&guc.execbuf_client->wq_lock);
+ if (guc.execbuf_client)
client = *guc.execbuf_client;
- spin_unlock(&guc.execbuf_client->wq_lock);
- }
- spin_unlock(&dev_priv->guc.host2guc_lock);
+
+ mutex_unlock(&dev->struct_mutex);
seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
@@ -2582,8 +2578,11 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
}
seq_puts(m, "\n");
- /* CHV PSR has no kind of performance counter */
- if (HAS_DDI(dev)) {
+ /*
+ * VLV/CHV PSR has no kind of performance counter
+ * SKL+ Perf counter is reset to 0 everytime DC state is entered
+ */
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
psrperf = I915_READ(EDP_PSR_PERF_CNT) &
EDP_PSR_PERF_CNT_MASK;
@@ -2685,71 +2684,6 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
return 0;
}
-static const char *power_domain_str(enum intel_display_power_domain domain)
-{
- switch (domain) {
- case POWER_DOMAIN_PIPE_A:
- return "PIPE_A";
- case POWER_DOMAIN_PIPE_B:
- return "PIPE_B";
- case POWER_DOMAIN_PIPE_C:
- return "PIPE_C";
- case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
- return "PIPE_A_PANEL_FITTER";
- case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
- return "PIPE_B_PANEL_FITTER";
- case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
- return "PIPE_C_PANEL_FITTER";
- case POWER_DOMAIN_TRANSCODER_A:
- return "TRANSCODER_A";
- case POWER_DOMAIN_TRANSCODER_B:
- return "TRANSCODER_B";
- case POWER_DOMAIN_TRANSCODER_C:
- return "TRANSCODER_C";
- case POWER_DOMAIN_TRANSCODER_EDP:
- return "TRANSCODER_EDP";
- case POWER_DOMAIN_PORT_DDI_A_LANES:
- return "PORT_DDI_A_LANES";
- case POWER_DOMAIN_PORT_DDI_B_LANES:
- return "PORT_DDI_B_LANES";
- case POWER_DOMAIN_PORT_DDI_C_LANES:
- return "PORT_DDI_C_LANES";
- case POWER_DOMAIN_PORT_DDI_D_LANES:
- return "PORT_DDI_D_LANES";
- case POWER_DOMAIN_PORT_DDI_E_LANES:
- return "PORT_DDI_E_LANES";
- case POWER_DOMAIN_PORT_DSI:
- return "PORT_DSI";
- case POWER_DOMAIN_PORT_CRT:
- return "PORT_CRT";
- case POWER_DOMAIN_PORT_OTHER:
- return "PORT_OTHER";
- case POWER_DOMAIN_VGA:
- return "VGA";
- case POWER_DOMAIN_AUDIO:
- return "AUDIO";
- case POWER_DOMAIN_PLLS:
- return "PLLS";
- case POWER_DOMAIN_AUX_A:
- return "AUX_A";
- case POWER_DOMAIN_AUX_B:
- return "AUX_B";
- case POWER_DOMAIN_AUX_C:
- return "AUX_C";
- case POWER_DOMAIN_AUX_D:
- return "AUX_D";
- case POWER_DOMAIN_GMBUS:
- return "GMBUS";
- case POWER_DOMAIN_MODESET:
- return "MODESET";
- case POWER_DOMAIN_INIT:
- return "INIT";
- default:
- MISSING_CASE(domain);
- return "?";
- }
-}
-
static int i915_power_domain_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
@@ -2775,7 +2709,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
continue;
seq_printf(m, " %-23s %d\n",
- power_domain_str(power_domain),
+ intel_display_power_domain_str(power_domain),
power_domains->domain_use_count[power_domain]);
}
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6344dfb72177..e6935f1cb689 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -228,121 +228,83 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.need_gfx_hws = 1, .has_hotplug = 1, \
.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
- .has_llc = 1
+ .has_llc = 1, \
+ GEN_DEFAULT_PIPEOFFSETS, \
+ IVB_CURSOR_OFFSETS
static const struct intel_device_info intel_ivybridge_d_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_ivybridge_m_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
.is_mobile = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_ivybridge_q_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
.num_pipes = 0, /* legal, last one wins */
- GEN_DEFAULT_PIPEOFFSETS,
- IVB_CURSOR_OFFSETS,
};
+#define VLV_FEATURES \
+ .gen = 7, .num_pipes = 2, \
+ .need_gfx_hws = 1, .has_hotplug = 1, \
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+ .display_mmio_offset = VLV_DISPLAY_BASE, \
+ GEN_DEFAULT_PIPEOFFSETS, \
+ CURSOR_OFFSETS
+
static const struct intel_device_info intel_valleyview_m_info = {
- GEN7_FEATURES,
- .is_mobile = 1,
- .num_pipes = 2,
+ VLV_FEATURES,
.is_valleyview = 1,
- .display_mmio_offset = VLV_DISPLAY_BASE,
- .has_fbc = 0, /* legal, last one wins */
- .has_llc = 0, /* legal, last one wins */
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
+ .is_mobile = 1,
};
static const struct intel_device_info intel_valleyview_d_info = {
- GEN7_FEATURES,
- .num_pipes = 2,
+ VLV_FEATURES,
.is_valleyview = 1,
- .display_mmio_offset = VLV_DISPLAY_BASE,
- .has_fbc = 0, /* legal, last one wins */
- .has_llc = 0, /* legal, last one wins */
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
};
+#define HSW_FEATURES \
+ GEN7_FEATURES, \
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
+ .has_ddi = 1, \
+ .has_fpga_dbg = 1
+
static const struct intel_device_info intel_haswell_d_info = {
- GEN7_FEATURES,
+ HSW_FEATURES,
.is_haswell = 1,
- .has_ddi = 1,
- .has_fpga_dbg = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_haswell_m_info = {
- GEN7_FEATURES,
+ HSW_FEATURES,
.is_haswell = 1,
.is_mobile = 1,
- .has_ddi = 1,
- .has_fpga_dbg = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_broadwell_d_info = {
- .gen = 8, .num_pipes = 3,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
- .has_llc = 1,
- .has_ddi = 1,
- .has_fpga_dbg = 1,
- .has_fbc = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- IVB_CURSOR_OFFSETS,
+ HSW_FEATURES,
+ .gen = 8,
};
static const struct intel_device_info intel_broadwell_m_info = {
- .gen = 8, .is_mobile = 1, .num_pipes = 3,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
- .has_llc = 1,
- .has_ddi = 1,
- .has_fpga_dbg = 1,
- .has_fbc = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- IVB_CURSOR_OFFSETS,
+ HSW_FEATURES,
+ .gen = 8, .is_mobile = 1,
};
static const struct intel_device_info intel_broadwell_gt3d_info = {
- .gen = 8, .num_pipes = 3,
- .need_gfx_hws = 1, .has_hotplug = 1,
+ HSW_FEATURES,
+ .gen = 8,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
- .has_llc = 1,
- .has_ddi = 1,
- .has_fpga_dbg = 1,
- .has_fbc = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_broadwell_gt3m_info = {
- .gen = 8, .is_mobile = 1, .num_pipes = 3,
- .need_gfx_hws = 1, .has_hotplug = 1,
+ HSW_FEATURES,
+ .gen = 8, .is_mobile = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
- .has_llc = 1,
- .has_ddi = 1,
- .has_fpga_dbg = 1,
- .has_fbc = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_cherryview_info = {
@@ -356,29 +318,16 @@ static const struct intel_device_info intel_cherryview_info = {
};
static const struct intel_device_info intel_skylake_info = {
+ HSW_FEATURES,
.is_skylake = 1,
- .gen = 9, .num_pipes = 3,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
- .has_llc = 1,
- .has_ddi = 1,
- .has_fpga_dbg = 1,
- .has_fbc = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- IVB_CURSOR_OFFSETS,
+ .gen = 9,
};
static const struct intel_device_info intel_skylake_gt3_info = {
+ HSW_FEATURES,
.is_skylake = 1,
- .gen = 9, .num_pipes = 3,
- .need_gfx_hws = 1, .has_hotplug = 1,
+ .gen = 9,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
- .has_llc = 1,
- .has_ddi = 1,
- .has_fpga_dbg = 1,
- .has_fbc = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_broxton_info = {
@@ -396,33 +345,18 @@ static const struct intel_device_info intel_broxton_info = {
};
static const struct intel_device_info intel_kabylake_info = {
+ HSW_FEATURES,
.is_preliminary = 1,
.is_kabylake = 1,
.gen = 9,
- .num_pipes = 3,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
- .has_llc = 1,
- .has_ddi = 1,
- .has_fpga_dbg = 1,
- .has_fbc = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_kabylake_gt3_info = {
+ HSW_FEATURES,
.is_preliminary = 1,
.is_kabylake = 1,
.gen = 9,
- .num_pipes = 3,
- .need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
- .has_llc = 1,
- .has_ddi = 1,
- .has_fpga_dbg = 1,
- .has_fbc = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- IVB_CURSOR_OFFSETS,
};
/*
@@ -465,6 +399,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_SKL_GT1_IDS(&intel_skylake_info),
INTEL_SKL_GT2_IDS(&intel_skylake_info),
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
+ INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
INTEL_BXT_IDS(&intel_broxton_info),
INTEL_KBL_GT1_IDS(&intel_kabylake_info),
INTEL_KBL_GT2_IDS(&intel_kabylake_info),
@@ -565,7 +500,8 @@ void intel_detect_pch(struct drm_device *dev)
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev));
- } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) {
+ } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
+ (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE)) {
dev_priv->pch_type = intel_virt_detect_pch(dev);
} else
continue;
@@ -624,6 +560,14 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume);
static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
+static bool suspend_to_idle(struct drm_i915_private *dev_priv)
+{
+#if IS_ENABLED(CONFIG_ACPI_SLEEP)
+ if (acpi_target_system_state() < ACPI_STATE_S3)
+ return true;
+#endif
+ return false;
+}
static int i915_drm_suspend(struct drm_device *dev)
{
@@ -676,11 +620,7 @@ static int i915_drm_suspend(struct drm_device *dev)
i915_save_state(dev);
- opregion_target_state = PCI_D3cold;
-#if IS_ENABLED(CONFIG_ACPI_SLEEP)
- if (acpi_target_system_state() < ACPI_STATE_S3)
- opregion_target_state = PCI_D1;
-#endif
+ opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_notify_adapter(dev, opregion_target_state);
intel_uncore_forcewake_reset(dev, false);
@@ -701,15 +641,26 @@ static int i915_drm_suspend(struct drm_device *dev)
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
{
struct drm_i915_private *dev_priv = drm_dev->dev_private;
+ bool fw_csr;
int ret;
- intel_power_domains_suspend(dev_priv);
+ fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
+ /*
+ * In case of firmware assisted context save/restore don't manually
+ * deinit the power domains. This also means the CSR/DMC firmware will
+ * stay active, it will power down any HW resources as required and
+ * also enable deeper system power states that would be blocked if the
+ * firmware was inactive.
+ */
+ if (!fw_csr)
+ intel_power_domains_suspend(dev_priv);
ret = intel_suspend_complete(dev_priv);
if (ret) {
DRM_ERROR("Suspend complete failed: %d\n", ret);
- intel_power_domains_init_hw(dev_priv, true);
+ if (!fw_csr)
+ intel_power_domains_init_hw(dev_priv, true);
return ret;
}
@@ -730,6 +681,8 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
pci_set_power_state(drm_dev->pdev, PCI_D3hot);
+ dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
+
return 0;
}
@@ -842,8 +795,10 @@ static int i915_drm_resume_early(struct drm_device *dev)
* FIXME: This should be solved with a special hdmi sink device or
* similar so that power domains can be employed.
*/
- if (pci_enable_device(dev->pdev))
- return -EIO;
+ if (pci_enable_device(dev->pdev)) {
+ ret = -EIO;
+ goto out;
+ }
pci_set_master(dev->pdev);
@@ -861,7 +816,12 @@ static int i915_drm_resume_early(struct drm_device *dev)
hsw_disable_pc8(dev_priv);
intel_uncore_sanitize(dev);
- intel_power_domains_init_hw(dev_priv, true);
+
+ if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
+ intel_power_domains_init_hw(dev_priv, true);
+
+out:
+ dev_priv->suspended_to_idle = false;
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 15c6dc0b4f37..f1a8a53e9e30 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -57,7 +57,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20151120"
+#define DRIVER_DATE "20151204"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -902,7 +902,6 @@ struct i915_fbc {
/* This is always the inner lock when overlapping with struct_mutex and
* it's the outer lock when overlapping with stolen_lock. */
struct mutex lock;
- unsigned long uncompressed_size;
unsigned threshold;
unsigned int fb_id;
unsigned int possible_framebuffer_bits;
@@ -915,21 +914,21 @@ struct i915_fbc {
bool false_color;
- /* Tracks whether the HW is actually enabled, not whether the feature is
- * possible. */
bool enabled;
+ bool active;
struct intel_fbc_work {
- struct delayed_work work;
- struct intel_crtc *crtc;
+ bool scheduled;
+ struct work_struct work;
struct drm_framebuffer *fb;
- } *fbc_work;
+ unsigned long enable_jiffies;
+ } work;
const char *no_fbc_reason;
- bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
- void (*enable_fbc)(struct intel_crtc *crtc);
- void (*disable_fbc)(struct drm_i915_private *dev_priv);
+ bool (*is_active)(struct drm_i915_private *dev_priv);
+ void (*activate)(struct intel_crtc *crtc);
+ void (*deactivate)(struct drm_i915_private *dev_priv);
};
/**
@@ -1885,6 +1884,7 @@ struct drm_i915_private {
u32 chv_phy_control;
u32 suspend_count;
+ bool suspended_to_idle;
struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state vlv_s0ix_state;
@@ -2608,11 +2608,13 @@ struct drm_i915_cmd_table {
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
+#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+#define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
@@ -2749,17 +2751,47 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
uint32_t mask,
uint32_t bits);
-void
-ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
-void
-ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
+void ilk_update_display_irq(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask);
+static inline void
+ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
+{
+ ilk_update_display_irq(dev_priv, bits, bits);
+}
+static inline void
+ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
+{
+ ilk_update_display_irq(dev_priv, bits, 0);
+}
+void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask);
+static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
+ enum pipe pipe, uint32_t bits)
+{
+ bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
+}
+static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
+ enum pipe pipe, uint32_t bits)
+{
+ bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
+}
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask);
-#define ibx_enable_display_interrupt(dev_priv, bits) \
- ibx_display_interrupt_update((dev_priv), (bits), (bits))
-#define ibx_disable_display_interrupt(dev_priv, bits) \
- ibx_display_interrupt_update((dev_priv), (bits), 0)
+static inline void
+ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
+{
+ ibx_display_interrupt_update(dev_priv, bits, bits);
+}
+static inline void
+ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
+{
+ ibx_display_interrupt_update(dev_priv, bits, 0);
+}
+
/* i915_gem.c */
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 33adc8f8ab20..b7d7cecdddf6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1210,8 +1210,16 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
if (i915_gem_request_completed(req, true))
return 0;
- timeout_expire = timeout ?
- jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
+ timeout_expire = 0;
+ if (timeout) {
+ if (WARN_ON(*timeout < 0))
+ return -EINVAL;
+
+ if (*timeout == 0)
+ return -ETIME;
+
+ timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
+ }
if (INTEL_INFO(dev_priv)->gen >= 6)
gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
@@ -2941,6 +2949,10 @@ i915_gem_idle_work_handler(struct work_struct *work)
if (!list_empty(&ring->request_list))
return;
+ /* we probably should sync with hangcheck here, using cancel_work_sync.
+ * Also locking seems to be fubar here, ring->request_list is protected
+ * by dev->struct_mutex. */
+
intel_mark_idle(dev);
if (mutex_trylock(&dev->struct_mutex)) {
@@ -3065,7 +3077,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (ret == 0)
ret = __i915_wait_request(req[i], reset_counter, true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
- file->driver_priv);
+ to_rps_client(file));
i915_gem_request_unreference__unlocked(req[i]);
}
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 4b9400402aa3..43761c5bcaca 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -141,8 +141,6 @@ static void i915_gem_context_clean(struct intel_context *ctx)
if (!ppgtt)
return;
- WARN_ON(!list_empty(&ppgtt->base.active_list));
-
list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
mm_list) {
if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index b80d0456fe03..598198543dcd 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -642,11 +642,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
}
/* check for L-shaped memory aka modified enhanced addressing */
- if (IS_GEN4(dev)) {
- uint32_t ddc2 = I915_READ(DCC2);
-
- if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
- dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+ if (IS_GEN4(dev) &&
+ !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
if (dcc == 0xffffffff) {
@@ -675,16 +674,35 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
* matching, which was the case for the swizzling required in
* the table above, or from the 1-ch value being less than
* the minimum size of a rank.
+ *
+ * Reports indicate that the swizzling actually
+ * varies depending upon page placement inside the
+ * channels, i.e. we see swizzled pages where the
+ * banks of memory are paired and unswizzled on the
+ * uneven portion, so leave that as unknown.
*/
- if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else {
+ if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}
}
+ if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
+ swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
+ /* Userspace likes to explode if it sees unknown swizzling,
+ * so lie. We will finish the lie when reporting through
+ * the get-tiling-ioctl by reporting the physical swizzle
+ * mode as unknown instead.
+ *
+ * As we don't strictly know what the swizzling is, it may be
+ * bit17 dependent, and so we need to also prevent the pages
+ * from being moved.
+ */
+ dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
+
dev_priv->mm.bit_6_swizzle_x = swizzle_x;
dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index ed9f1002ab36..0d23785ba818 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -86,7 +86,6 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
return -EINVAL;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- spin_lock(&dev_priv->guc.host2guc_lock);
dev_priv->guc.action_count += 1;
dev_priv->guc.action_cmd = data[0];
@@ -119,7 +118,6 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
}
dev_priv->guc.action_status = status;
- spin_unlock(&dev_priv->guc.host2guc_lock);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
@@ -292,16 +290,12 @@ static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
const uint32_t cacheline_size = cache_line_size();
uint32_t offset;
- spin_lock(&guc->host2guc_lock);
-
/* Doorbell uses a single cache line within a page */
offset = offset_in_page(guc->db_cacheline);
/* Moving to next cache line to reduce contention */
guc->db_cacheline += cacheline_size;
- spin_unlock(&guc->host2guc_lock);
-
DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
offset, guc->db_cacheline, cacheline_size);
@@ -322,13 +316,11 @@ static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
const uint16_t end = start + half;
uint16_t id;
- spin_lock(&guc->host2guc_lock);
id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
if (id == end)
id = GUC_INVALID_DOORBELL_ID;
else
bitmap_set(guc->doorbell_bitmap, id, 1);
- spin_unlock(&guc->host2guc_lock);
DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
hi_pri ? "high" : "normal", id);
@@ -338,9 +330,7 @@ static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
static void release_doorbell(struct intel_guc *guc, uint16_t id)
{
- spin_lock(&guc->host2guc_lock);
bitmap_clear(guc->doorbell_bitmap, id, 1);
- spin_unlock(&guc->host2guc_lock);
}
/*
@@ -487,16 +477,13 @@ static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
struct guc_process_desc *desc;
void *base;
u32 size = sizeof(struct guc_wq_item);
- int ret = 0, timeout_counter = 200;
+ int ret = -ETIMEDOUT, timeout_counter = 200;
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
desc = base + gc->proc_desc_offset;
while (timeout_counter-- > 0) {
- ret = wait_for_atomic(CIRC_SPACE(gc->wq_tail, desc->head,
- gc->wq_size) >= size, 1);
-
- if (!ret) {
+ if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
*offset = gc->wq_tail;
/* advance the tail for next workqueue item */
@@ -505,7 +492,11 @@ static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
/* this will break the loop */
timeout_counter = 0;
+ ret = 0;
}
+
+ if (timeout_counter)
+ usleep_range(1000, 2000);
};
kunmap_atomic(base);
@@ -597,15 +588,12 @@ int i915_guc_submit(struct i915_guc_client *client,
{
struct intel_guc *guc = client->guc;
enum intel_ring_id ring_id = rq->ring->id;
- unsigned long flags;
int q_ret, b_ret;
/* Need this because of the deferred pin ctx and ring */
/* Shall we move this right after ring is pinned? */
lr_context_update(rq);
- spin_lock_irqsave(&client->wq_lock, flags);
-
q_ret = guc_add_workqueue_item(client, rq);
if (q_ret == 0)
b_ret = guc_ring_doorbell(client);
@@ -620,12 +608,8 @@ int i915_guc_submit(struct i915_guc_client *client,
} else {
client->retcode = 0;
}
- spin_unlock_irqrestore(&client->wq_lock, flags);
-
- spin_lock(&guc->host2guc_lock);
guc->submissions[ring_id] += 1;
guc->last_seqno[ring_id] = rq->seqno;
- spin_unlock(&guc->host2guc_lock);
return q_ret;
}
@@ -677,7 +661,7 @@ static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
/**
* gem_release_guc_obj() - Release gem object allocated for GuC usage
* @obj: gem obj to be released
- */
+ */
static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
{
if (!obj)
@@ -768,7 +752,6 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
client->client_obj = obj;
client->wq_offset = GUC_DB_SIZE;
client->wq_size = GUC_WQ_SIZE;
- spin_lock_init(&client->wq_lock);
client->doorbell_offset = select_doorbell_cacheline(guc);
@@ -871,8 +854,6 @@ int i915_guc_submission_init(struct drm_device *dev)
if (!guc->ctx_pool_obj)
return -ENOMEM;
- spin_lock_init(&dev_priv->guc.host2guc_lock);
-
ida_init(&guc->ctx_ids);
guc_create_log(guc);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c8ba94968aaf..e88d692583a5 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -215,9 +215,9 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
+void ilk_update_display_irq(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask)
{
uint32_t new_val;
@@ -239,18 +239,6 @@ static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
}
}
-void
-ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
- ilk_update_display_irq(dev_priv, mask, mask);
-}
-
-void
-ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
- ilk_update_display_irq(dev_priv, mask, 0);
-}
-
/**
* ilk_update_gt_irq - update GTIMR
* @dev_priv: driver private
@@ -300,11 +288,11 @@ static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
}
/**
- * snb_update_pm_irq - update GEN6_PMIMR
- * @dev_priv: driver private
- * @interrupt_mask: mask of interrupt bits to update
- * @enabled_irq_mask: mask of interrupt bits to enable
- */
+ * snb_update_pm_irq - update GEN6_PMIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
@@ -418,11 +406,11 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
}
/**
- * bdw_update_port_irq - update DE port interrupt
- * @dev_priv: driver private
- * @interrupt_mask: mask of interrupt bits to update
- * @enabled_irq_mask: mask of interrupt bits to enable
- */
+ * bdw_update_port_irq - update DE port interrupt
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
@@ -450,6 +438,38 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
}
/**
+ * bdw_update_pipe_irq - update DE pipe interrupt
+ * @dev_priv: driver private
+ * @pipe: pipe whose interrupt to update
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
+void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask)
+{
+ uint32_t new_val;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ return;
+
+ new_val = dev_priv->de_irq_mask[pipe];
+ new_val &= ~interrupt_mask;
+ new_val |= (~enabled_irq_mask & interrupt_mask);
+
+ if (new_val != dev_priv->de_irq_mask[pipe]) {
+ dev_priv->de_irq_mask[pipe] = new_val;
+ I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+ POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+ }
+}
+
+/**
* ibx_display_interrupt_update - update SDEIMR
* @dev_priv: driver private
* @interrupt_mask: mask of interrupt bits to update
@@ -1824,8 +1844,24 @@ static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
struct drm_i915_private *dev_priv = to_i915(dev);
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
+ /*
+ * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
+ * unless we touch the hotplug register, even if hotplug_trigger is
+ * zero. Not acking leads to "The master control interrupt lied (SDE)!"
+ * errors.
+ */
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+ if (!hotplug_trigger) {
+ u32 mask = PORTA_HOTPLUG_STATUS_MASK |
+ PORTD_HOTPLUG_STATUS_MASK |
+ PORTC_HOTPLUG_STATUS_MASK |
+ PORTB_HOTPLUG_STATUS_MASK;
+ dig_hotplug_reg &= ~mask;
+ }
+
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ if (!hotplug_trigger)
+ return;
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
dig_hotplug_reg, hpd,
@@ -1840,8 +1876,7 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
- if (hotplug_trigger)
- ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
+ ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1934,8 +1969,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
- if (hotplug_trigger)
- ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
+ ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2351,13 +2385,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
spt_irq_handler(dev, pch_iir);
else
cpt_irq_handler(dev, pch_iir);
- } else {
- /*
- * Like on previous PCH there seems to be something
- * fishy going on with forwarding PCH interrupts.
- */
- DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
- }
+ } else
+ DRM_ERROR("The master control interrupt lied (SDE)!\n");
+
}
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -2645,7 +2675,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_enable_display_irq(dev_priv, bit);
+ ilk_enable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -2670,10 +2700,9 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
- I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
- POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+ bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
return 0;
}
@@ -2700,7 +2729,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_disable_display_irq(dev_priv, bit);
+ ilk_disable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -2721,9 +2750,7 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
- I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
- POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+ bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -3452,7 +3479,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
* setup is guaranteed to run in single-threaded context. But we
* need it to make the assert_spin_locked happy. */
spin_lock_irq(&dev_priv->irq_lock);
- ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+ ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
spin_unlock_irq(&dev_priv->irq_lock);
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1a12d44b9710..1dae5ac3e0b1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -855,31 +855,31 @@ enum skl_disp_power_wells {
*
* Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
* digital port D (CHV) or port A (BXT).
- */
-/*
- * Dual channel PHY (VLV/CHV/BXT)
- * ---------------------------------
- * | CH0 | CH1 |
- * | CMN/PLL/REF | CMN/PLL/REF |
- * |---------------|---------------| Display PHY
- * | PCS01 | PCS23 | PCS01 | PCS23 |
- * |-------|-------|-------|-------|
- * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
- * ---------------------------------
- * | DDI0 | DDI1 | DP/HDMI ports
- * ---------------------------------
*
- * Single channel PHY (CHV/BXT)
- * -----------------
- * | CH0 |
- * | CMN/PLL/REF |
- * |---------------| Display PHY
- * | PCS01 | PCS23 |
- * |-------|-------|
- * |TX0|TX1|TX2|TX3|
- * -----------------
- * | DDI2 | DP/HDMI port
- * -----------------
+ *
+ * Dual channel PHY (VLV/CHV/BXT)
+ * ---------------------------------
+ * | CH0 | CH1 |
+ * | CMN/PLL/REF | CMN/PLL/REF |
+ * |---------------|---------------| Display PHY
+ * | PCS01 | PCS23 | PCS01 | PCS23 |
+ * |-------|-------|-------|-------|
+ * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
+ * ---------------------------------
+ * | DDI0 | DDI1 | DP/HDMI ports
+ * ---------------------------------
+ *
+ * Single channel PHY (CHV/BXT)
+ * -----------------
+ * | CH0 |
+ * | CMN/PLL/REF |
+ * |---------------| Display PHY
+ * | PCS01 | PCS23 |
+ * |-------|-------|
+ * |TX0|TX1|TX2|TX3|
+ * -----------------
+ * | DDI2 | DP/HDMI port
+ * -----------------
*/
#define DPIO_DEVFN 0
@@ -2973,6 +2973,13 @@ enum skl_disp_power_wells {
#define OGAMC0 _MMIO(0x30024)
/*
+ * GEN9 clock gating regs
+ */
+#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530)
+#define PWM2_GATING_DIS (1 << 14)
+#define PWM1_GATING_DIS (1 << 13)
+
+/*
* Display engine regs
*/
@@ -7549,6 +7556,7 @@ enum skl_disp_power_wells {
#define SFUSE_STRAP _MMIO(0xc2014)
#define SFUSE_STRAP_FUSE_LOCK (1<<13)
#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7)
+#define SFUSE_STRAP_CRT_DISABLED (1<<6)
#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
#define SFUSE_STRAP_DDID_DETECTED (1<<0)
@@ -7706,7 +7714,7 @@ enum skl_disp_power_wells {
#define BXT_DSI_PLL_RATIO_MAX 0x7D
#define BXT_DSI_PLL_RATIO_MIN 0x22
#define BXT_DSI_PLL_RATIO_MASK 0xFF
-#define BXT_REF_CLOCK_KHZ 19500
+#define BXT_REF_CLOCK_KHZ 19200
#define BXT_DSI_PLL_ENABLE _MMIO(0x46080)
#define BXT_DSI_PLL_DO_ENABLE (1 << 31)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index ce82f9c7df24..070470fe9a91 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -356,7 +356,10 @@ parse_general_features(struct drm_i915_private *dev_priv,
general = find_section(bdb, BDB_GENERAL_FEATURES);
if (general) {
dev_priv->vbt.int_tv_support = general->int_tv_support;
- dev_priv->vbt.int_crt_support = general->int_crt_support;
+ /* int_crt_support can't be trusted on earlier platforms */
+ if (bdb->version >= 155 &&
+ (HAS_DDI(dev_priv) || IS_VALLEYVIEW(dev_priv)))
+ dev_priv->vbt.int_crt_support = general->int_crt_support;
dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
dev_priv->vbt.lvds_ssc_freq =
intel_bios_ssc_frequency(dev, general->ssc_freq);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 27b3e610e8f0..9285fc1e64ee 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -777,11 +777,37 @@ void intel_crt_init(struct drm_device *dev)
struct intel_crt *crt;
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
+ i915_reg_t adpa_reg;
+ u32 adpa;
/* Skip machines without VGA that falsely report hotplug events */
if (dmi_check_system(intel_no_crt))
return;
+ if (HAS_PCH_SPLIT(dev))
+ adpa_reg = PCH_ADPA;
+ else if (IS_VALLEYVIEW(dev))
+ adpa_reg = VLV_ADPA;
+ else
+ adpa_reg = ADPA;
+
+ adpa = I915_READ(adpa_reg);
+ if ((adpa & ADPA_DAC_ENABLE) == 0) {
+ /*
+ * On some machines (some IVB at least) CRT can be
+ * fused off, but there's no known fuse bit to
+ * indicate that. On these machine the ADPA register
+ * works normally, except the DAC enable bit won't
+ * take. So the only way to tell is attempt to enable
+ * it and see what happens.
+ */
+ I915_WRITE(adpa_reg, adpa | ADPA_DAC_ENABLE |
+ ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+ if ((I915_READ(adpa_reg) & ADPA_DAC_ENABLE) == 0)
+ return;
+ I915_WRITE(adpa_reg, adpa);
+ }
+
crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
if (!crt)
return;
@@ -798,7 +824,7 @@ void intel_crt_init(struct drm_device *dev)
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
- DRM_MODE_ENCODER_DAC);
+ DRM_MODE_ENCODER_DAC, NULL);
intel_connector_attach_encoder(intel_connector, &crt->base);
@@ -815,12 +841,7 @@ void intel_crt_init(struct drm_device *dev)
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- if (HAS_PCH_SPLIT(dev))
- crt->adpa_reg = PCH_ADPA;
- else if (IS_VALLEYVIEW(dev))
- crt->adpa_reg = VLV_ADPA;
- else
- crt->adpa_reg = ADPA;
+ crt->adpa_reg = adpa_reg;
crt->base.compute_config = intel_crt_compute_config;
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 76ce7c2960b6..4afb3103eb96 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -3151,7 +3151,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = true;
intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- if (intel_hdmi->infoframe_enabled(&encoder->base))
+ if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
pipe_config->has_infoframe = true;
break;
case TRANS_DDI_MODE_SELECT_DVI:
@@ -3284,7 +3284,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
encoder = &intel_encoder->base;
drm_encoder_init(dev, encoder, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->enable = intel_enable_ddi;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9228ec018e98..bda6b9c82e66 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -44,6 +44,8 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
#include <linux/dma_remapping.h>
+#include <linux/reservation.h>
+#include <linux/dma-buf.h>
/* Primary plane formats for gen <= 3 */
static const uint32_t i8xx_primary_formats[] = {
@@ -2130,7 +2132,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
* need the check.
*/
if (HAS_GMCH_DISPLAY(dev_priv->dev))
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
+ if (crtc->config->has_dsi_encoder)
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
@@ -3174,8 +3176,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->fbc.disable_fbc)
- dev_priv->fbc.disable_fbc(dev_priv);
+ if (dev_priv->fbc.deactivate)
+ dev_priv->fbc.deactivate(dev_priv);
dev_priv->display.update_primary_plane(crtc, fb, x, y);
@@ -4137,6 +4139,12 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(FDI_RX_TUSIZE1(pipe),
I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+ /*
+ * Sometimes spurious CPU pipe underruns happen during FDI
+ * training, at least with VGA+HDMI cloning. Suppress them.
+ */
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
@@ -4170,6 +4178,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_fdi_normal_train(crtc);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
const struct drm_display_mode *adjusted_mode =
@@ -4628,7 +4638,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
return;
if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
- if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
+ if (intel_crtc->config->has_dsi_encoder)
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
@@ -4784,7 +4794,6 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
{
struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
if (atomic->wait_vblank)
intel_wait_for_vblank(dev, crtc->pipe);
@@ -4798,7 +4807,7 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
intel_update_watermarks(&crtc->base);
if (atomic->update_fbc)
- intel_fbc_update(dev_priv);
+ intel_fbc_update(crtc);
if (atomic->post_enable_primary)
intel_post_enable_primary(&crtc->base);
@@ -4813,7 +4822,7 @@ static void intel_pre_plane_update(struct intel_crtc *crtc)
struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
if (atomic->disable_fbc)
- intel_fbc_disable_crtc(crtc);
+ intel_fbc_deactivate(crtc);
if (crtc->atomic.disable_ips)
hsw_disable_ips(crtc);
@@ -4921,6 +4930,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->config->has_pch_encoder)
intel_wait_for_vblank(dev, pipe);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
+
+ intel_fbc_enable(intel_crtc);
}
/* IPS only exists on ULT machines and is tied to pipe A. */
@@ -4938,7 +4949,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->state);
- bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
if (WARN_ON(intel_crtc->active))
return;
@@ -4971,10 +4981,12 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+ if (intel_crtc->config->has_pch_encoder)
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+ else
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
for_each_encoder_on_crtc(dev, crtc, encoder) {
- if (encoder->pre_pll_enable)
- encoder->pre_pll_enable(encoder);
if (encoder->pre_enable)
encoder->pre_enable(encoder);
}
@@ -4982,7 +4994,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->config->has_pch_encoder)
dev_priv->display.fdi_link_train(crtc);
- if (!is_dsi)
+ if (!intel_crtc->config->has_dsi_encoder)
intel_ddi_enable_pipe_clock(intel_crtc);
if (INTEL_INFO(dev)->gen >= 9)
@@ -4997,7 +5009,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_crtc_load_lut(crtc);
intel_ddi_set_pipe_settings(crtc);
- if (!is_dsi)
+ if (!intel_crtc->config->has_dsi_encoder)
intel_ddi_enable_transcoder_func(crtc);
intel_update_watermarks(crtc);
@@ -5006,7 +5018,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->config->has_pch_encoder)
lpt_pch_enable(crtc);
- if (intel_crtc->config->dp_encoder_is_mst && !is_dsi)
+ if (intel_crtc->config->dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, true);
assert_vblank_disabled(crtc);
@@ -5017,9 +5029,13 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_opregion_notify_encoder(encoder, true);
}
- if (intel_crtc->config->has_pch_encoder)
+ if (intel_crtc->config->has_pch_encoder) {
+ intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev, pipe);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
+ }
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
@@ -5028,6 +5044,8 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_wait_for_vblank(dev, hsw_workaround_pipe);
intel_wait_for_vblank(dev, hsw_workaround_pipe);
}
+
+ intel_fbc_enable(intel_crtc);
}
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
@@ -5062,12 +5080,22 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
+ /*
+ * Sometimes spurious CPU pipe underruns happen when the
+ * pipe is already disabled, but FDI RX/TX is still enabled.
+ * Happens at least with VGA+HDMI cloning. Suppress them.
+ */
+ if (intel_crtc->config->has_pch_encoder)
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
intel_disable_pipe(intel_crtc);
ironlake_pfit_disable(intel_crtc, false);
- if (intel_crtc->config->has_pch_encoder)
+ if (intel_crtc->config->has_pch_encoder) {
ironlake_fdi_disable(crtc);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+ }
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
@@ -5098,6 +5126,8 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
}
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
+
+ intel_fbc_disable_crtc(intel_crtc);
}
static void haswell_crtc_disable(struct drm_crtc *crtc)
@@ -5107,7 +5137,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
- bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
@@ -5126,7 +5155,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
if (intel_crtc->config->dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, false);
- if (!is_dsi)
+ if (!intel_crtc->config->has_dsi_encoder)
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
if (INTEL_INFO(dev)->gen >= 9)
@@ -5134,7 +5163,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
else
ironlake_pfit_disable(intel_crtc, false);
- if (!is_dsi)
+ if (!intel_crtc->config->has_dsi_encoder)
intel_ddi_disable_pipe_clock(intel_crtc);
if (intel_crtc->config->has_pch_encoder) {
@@ -5149,6 +5178,8 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
+
+ intel_fbc_disable_crtc(intel_crtc);
}
static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -5214,10 +5245,6 @@ static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
}
}
-#define for_each_power_domain(domain, mask) \
- for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
- if ((1 << (domain)) & (mask))
-
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
{
@@ -6140,13 +6167,10 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- bool is_dsi;
if (WARN_ON(intel_crtc->active))
return;
- is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
-
if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc, M1_N1);
@@ -6169,7 +6193,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
- if (!is_dsi) {
+ if (!intel_crtc->config->has_dsi_encoder) {
if (IS_CHERRYVIEW(dev)) {
chv_prepare_pll(intel_crtc, intel_crtc->config);
chv_enable_pll(intel_crtc, intel_crtc->config);
@@ -6248,6 +6272,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
+
+ intel_fbc_enable(intel_crtc);
}
static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -6295,7 +6321,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (encoder->post_disable)
encoder->post_disable(encoder);
- if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
+ if (!intel_crtc->config->has_dsi_encoder) {
if (IS_CHERRYVIEW(dev))
chv_disable_pll(dev_priv, pipe);
else if (IS_VALLEYVIEW(dev))
@@ -6310,6 +6336,8 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (!IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
+ intel_fbc_disable_crtc(intel_crtc);
}
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
@@ -7908,8 +7936,6 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
int refclk, num_connectors = 0;
intel_clock_t clock;
bool ok;
- bool is_dsi = false;
- struct intel_encoder *encoder;
const intel_limit_t *limit;
struct drm_atomic_state *state = crtc_state->base.state;
struct drm_connector *connector;
@@ -7919,26 +7945,14 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- for_each_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != &crtc->base)
- continue;
-
- encoder = to_intel_encoder(connector_state->best_encoder);
-
- switch (encoder->type) {
- case INTEL_OUTPUT_DSI:
- is_dsi = true;
- break;
- default:
- break;
- }
+ if (crtc_state->has_dsi_encoder)
+ return 0;
- num_connectors++;
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc == &crtc->base)
+ num_connectors++;
}
- if (is_dsi)
- return 0;
-
if (!crtc_state->clock_set) {
refclk = i9xx_get_refclk(crtc_state, num_connectors);
@@ -8931,7 +8945,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
+ is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
"Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
@@ -9705,14 +9719,10 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
else
cdclk = 337500;
- /*
- * FIXME move the cdclk caclulation to
- * compute_config() so we can fail gracegully.
- */
if (cdclk > dev_priv->max_cdclk_freq) {
- DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
- cdclk, dev_priv->max_cdclk_freq);
- cdclk = dev_priv->max_cdclk_freq;
+ DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
+ cdclk, dev_priv->max_cdclk_freq);
+ return -EINVAL;
}
to_intel_atomic_state(state)->cdclk = cdclk;
@@ -9807,6 +9817,7 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
break;
case PORT_CLK_SEL_SPLL:
pipe_config->shared_dpll = DPLL_ID_SPLL;
+ break;
}
}
@@ -11191,6 +11202,10 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
return true;
else if (i915.enable_execlists)
return true;
+ else if (obj->base.dma_buf &&
+ !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
+ false))
+ return true;
else
return ring != i915_gem_request_get_ring(obj->last_write_req);
}
@@ -11305,6 +11320,9 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
{
struct intel_mmio_flip *mmio_flip =
container_of(work, struct intel_mmio_flip, work);
+ struct intel_framebuffer *intel_fb =
+ to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
if (mmio_flip->req) {
WARN_ON(__i915_wait_request(mmio_flip->req,
@@ -11314,6 +11332,12 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
i915_gem_request_unreference__unlocked(mmio_flip->req);
}
+ /* For framebuffer backed by dmabuf, wait for fence */
+ if (obj->base.dma_buf)
+ WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
+ false, false,
+ MAX_SCHEDULE_TIMEOUT) < 0);
+
intel_do_mmio_flip(mmio_flip);
kfree(mmio_flip);
}
@@ -11584,7 +11608,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
to_intel_plane(primary)->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
- intel_fbc_disable_crtc(intel_crtc);
+ intel_fbc_deactivate(intel_crtc);
intel_frontbuffer_flip_prepare(dev,
to_intel_plane(primary)->frontbuffer_bit);
@@ -12582,12 +12606,13 @@ intel_pipe_config_compare(struct drm_device *dev,
if (INTEL_INFO(dev)->gen < 8) {
PIPE_CONF_CHECK_M_N(dp_m_n);
- PIPE_CONF_CHECK_I(has_drrs);
if (current_config->has_drrs)
PIPE_CONF_CHECK_M_N(dp_m2_n2);
} else
PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
+ PIPE_CONF_CHECK_I(has_dsi_encoder);
+
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
@@ -13384,6 +13409,13 @@ static int intel_atomic_commit(struct drm_device *dev,
dev_priv->display.crtc_disable(crtc);
intel_crtc->active = false;
intel_disable_shared_dpll(intel_crtc);
+
+ /*
+ * Underruns don't always raise
+ * interrupts, so check manually.
+ */
+ intel_check_cpu_fifo_underruns(dev_priv);
+ intel_check_pch_fifo_underruns(dev_priv);
}
}
@@ -13653,6 +13685,17 @@ intel_prepare_plane_fb(struct drm_plane *plane,
return ret;
}
+ /* For framebuffer backed by dmabuf, wait for fence */
+ if (obj && obj->base.dma_buf) {
+ ret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
+ false, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret == -ERESTARTSYS)
+ return ret;
+
+ WARN_ON(ret < 0);
+ }
+
if (!obj) {
ret = 0;
} else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
@@ -13905,7 +13948,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
drm_universal_plane_init(dev, &primary->base, 0,
&intel_plane_funcs,
intel_primary_formats, num_formats,
- DRM_PLANE_TYPE_PRIMARY);
+ DRM_PLANE_TYPE_PRIMARY, NULL);
if (INTEL_INFO(dev)->gen >= 4)
intel_create_rotation_property(dev, primary);
@@ -14044,7 +14087,7 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
&intel_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
- DRM_PLANE_TYPE_CURSOR);
+ DRM_PLANE_TYPE_CURSOR, NULL);
if (INTEL_INFO(dev)->gen >= 4) {
if (!dev->mode_config.rotation_property)
@@ -14121,7 +14164,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
goto fail;
ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
- cursor, &intel_crtc_funcs);
+ cursor, &intel_crtc_funcs, NULL);
if (ret)
goto fail;
@@ -14247,7 +14290,14 @@ static bool intel_crt_present(struct drm_device *dev)
if (IS_CHERRYVIEW(dev))
return false;
- if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
+ if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
+ return false;
+
+ /* DDI E can't be used if DDI A requires 4 lanes */
+ if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ return false;
+
+ if (!dev_priv->vbt.int_crt_support)
return false;
return true;
@@ -14790,9 +14840,6 @@ static void intel_init_display(struct drm_device *dev)
else if (IS_I945GM(dev) || IS_845G(dev))
dev_priv->display.get_display_clock_speed =
i9xx_misc_get_display_clock_speed;
- else if (IS_PINEVIEW(dev))
- dev_priv->display.get_display_clock_speed =
- pnv_get_display_clock_speed;
else if (IS_I915GM(dev))
dev_priv->display.get_display_clock_speed =
i915gm_get_display_clock_speed;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index bec443a629da..0f0573aa1b0d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -681,7 +681,7 @@ static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* The clock divider is based off the hrawclk, and would like to run at
* 2MHz. So, take the hrawclk value and divide by 2 and use that
*/
- return index ? 0 : intel_hrawclk(dev) / 2;
+ return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
}
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
@@ -694,10 +694,10 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return 0;
if (intel_dig_port->port == PORT_A) {
- return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
+ return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
} else {
- return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+ return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
}
}
@@ -711,7 +711,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
if (index)
return 0;
return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
- } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+ } else if (HAS_PCH_LPT_H(dev_priv)) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
@@ -719,7 +719,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
default: return 0;
}
} else {
- return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+ return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
}
}
@@ -2697,6 +2697,15 @@ static void intel_enable_dp(struct intel_encoder *encoder)
if (IS_VALLEYVIEW(dev))
vlv_init_panel_power_sequencer(intel_dp);
+ /*
+ * We get an occasional spurious underrun between the port
+ * enable and vdd enable, when enabling port A eDP.
+ *
+ * FIXME: Not sure if this applies to (PCH) port D eDP as well
+ */
+ if (port == PORT_A)
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
intel_dp_enable_port(intel_dp);
if (port == PORT_A && IS_GEN5(dev_priv)) {
@@ -2714,6 +2723,9 @@ static void intel_enable_dp(struct intel_encoder *encoder)
edp_panel_on(intel_dp);
edp_panel_vdd_off(intel_dp, true);
+ if (port == PORT_A)
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
pps_unlock(intel_dp);
if (IS_VALLEYVIEW(dev)) {
@@ -4962,7 +4974,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
enum intel_display_power_domain power_domain;
enum irqreturn ret = IRQ_NONE;
- if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
+ if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
+ intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
@@ -5976,7 +5989,7 @@ intel_dp_init(struct drm_device *dev,
encoder = &intel_encoder->base;
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
intel_encoder->compute_config = intel_dp_compute_config;
intel_encoder->disable = intel_disable_dp;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 8c4e7dfe304c..e8d369d0a713 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -536,7 +536,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_mst->primary = intel_dig_port;
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
- DRM_MODE_ENCODER_DPMST);
+ DRM_MODE_ENCODER_DPMST, NULL);
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->crtc_mask = 0x7;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ab5c147fa9e9..50f83d220249 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -393,6 +393,9 @@ struct intel_crtc_state {
* accordingly. */
bool has_dp_encoder;
+ /* DSI has special cases */
+ bool has_dsi_encoder;
+
/* Whether we should send NULL infoframes. Required for audio. */
bool has_hdmi_sink;
@@ -710,7 +713,8 @@ struct intel_hdmi {
void (*set_infoframes)(struct drm_encoder *encoder,
bool enable,
const struct drm_display_mode *adjusted_mode);
- bool (*infoframe_enabled)(struct drm_encoder *encoder);
+ bool (*infoframe_enabled)(struct drm_encoder *encoder,
+ const struct intel_crtc_state *pipe_config);
};
struct intel_dp_mst_encoder;
@@ -1316,9 +1320,11 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
#endif
/* intel_fbc.c */
-bool intel_fbc_enabled(struct drm_i915_private *dev_priv);
-void intel_fbc_update(struct drm_i915_private *dev_priv);
+bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
+void intel_fbc_deactivate(struct intel_crtc *crtc);
+void intel_fbc_update(struct intel_crtc *crtc);
void intel_fbc_init(struct drm_i915_private *dev_priv);
+void intel_fbc_enable(struct intel_crtc *crtc);
void intel_fbc_disable(struct drm_i915_private *dev_priv);
void intel_fbc_disable_crtc(struct intel_crtc *crtc);
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
@@ -1410,6 +1416,8 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv);
void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
+const char *
+intel_display_power_domain_str(enum intel_display_power_domain domain);
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index efb5a27dd49c..fff9a66c32a1 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -266,16 +266,18 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
}
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *config)
+ struct intel_crtc_state *pipe_config)
{
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- struct drm_display_mode *adjusted_mode = &config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
DRM_DEBUG_KMS("\n");
+ pipe_config->has_dsi_encoder = true;
+
if (fixed_mode)
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
@@ -462,6 +464,8 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
intel_panel_enable_backlight(intel_dsi->attached_connector);
}
+static void intel_dsi_prepare(struct intel_encoder *intel_encoder);
+
static void intel_dsi_pre_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
@@ -474,6 +478,9 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
+ intel_dsi_prepare(encoder);
+ intel_enable_dsi_pll(encoder);
+
/* Panel Enable over CRC PMIC */
if (intel_dsi->gpio_panel)
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
@@ -699,6 +706,8 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
u32 pclk = 0;
DRM_DEBUG_KMS("\n");
+ pipe_config->has_dsi_encoder = true;
+
/*
* DPLL_MD is not used in case of DSI, reading will get some default value
* set dpll_md = 0
@@ -1026,15 +1035,6 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
}
}
-static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
-{
- DRM_DEBUG_KMS("\n");
-
- intel_dsi_prepare(encoder);
- intel_enable_dsi_pll(encoder);
-
-}
-
static enum drm_connector_status
intel_dsi_detect(struct drm_connector *connector, bool force)
{
@@ -1152,11 +1152,10 @@ void intel_dsi_init(struct drm_device *dev)
connector = &intel_connector->base;
- drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
+ drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
+ NULL);
- /* XXX: very likely not all of these are needed */
intel_encoder->compute_config = intel_dsi_compute_config;
- intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
intel_encoder->pre_enable = intel_dsi_pre_enable;
intel_encoder->enable = intel_dsi_enable_nop;
intel_encoder->disable = intel_dsi_pre_disable;
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 7161deb2aed8..286baec979c8 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -429,7 +429,7 @@ void intel_dvo_init(struct drm_device *dev)
intel_encoder = &intel_dvo->base;
drm_encoder_init(dev, &intel_encoder->base,
- &intel_dvo_enc_funcs, encoder_type);
+ &intel_dvo_enc_funcs, encoder_type, NULL);
intel_encoder->disable = intel_disable_dvo;
intel_encoder->enable = intel_enable_dvo;
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 11fc5281e8ef..a1988a486b92 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -43,7 +43,7 @@
static inline bool fbc_supported(struct drm_i915_private *dev_priv)
{
- return dev_priv->fbc.enable_fbc != NULL;
+ return dev_priv->fbc.activate != NULL;
}
static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
@@ -51,6 +51,11 @@ static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8;
}
+static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
+{
+ return INTEL_INFO(dev_priv)->gen < 4;
+}
+
/*
* In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
* frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
@@ -64,11 +69,51 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
return crtc->base.y - crtc->adjusted_y;
}
-static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
+/*
+ * For SKL+, the plane source size used by the hardware is based on the value we
+ * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
+ * we wrote to PIPESRC.
+ */
+static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
+ int *width, int *height)
+{
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(crtc->base.primary->state);
+ int w, h;
+
+ if (intel_rotation_90_or_270(plane_state->base.rotation)) {
+ w = drm_rect_height(&plane_state->src) >> 16;
+ h = drm_rect_width(&plane_state->src) >> 16;
+ } else {
+ w = drm_rect_width(&plane_state->src) >> 16;
+ h = drm_rect_height(&plane_state->src) >> 16;
+ }
+
+ if (width)
+ *width = w;
+ if (height)
+ *height = h;
+}
+
+static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc,
+ struct drm_framebuffer *fb)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ int lines;
+
+ intel_fbc_get_plane_source_size(crtc, NULL, &lines);
+ if (INTEL_INFO(dev_priv)->gen >= 7)
+ lines = min(lines, 2048);
+
+ /* Hardware needs the full buffer stride, not just the active area. */
+ return lines * fb->pitches[0];
+}
+
+static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
{
u32 fbc_ctl;
- dev_priv->fbc.enabled = false;
+ dev_priv->fbc.active = false;
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
@@ -83,11 +128,9 @@ static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}
-
- DRM_DEBUG_KMS("disabled FBC\n");
}
-static void i8xx_fbc_enable(struct intel_crtc *crtc)
+static void i8xx_fbc_activate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
@@ -96,10 +139,10 @@ static void i8xx_fbc_enable(struct intel_crtc *crtc)
int i;
u32 fbc_ctl;
- dev_priv->fbc.enabled = true;
+ dev_priv->fbc.active = true;
/* Note: fbc.threshold == 1 for i8xx */
- cfb_pitch = dev_priv->fbc.uncompressed_size / FBC_LL_SIZE;
+ cfb_pitch = intel_fbc_calculate_cfb_size(crtc, fb) / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
@@ -132,24 +175,21 @@ static void i8xx_fbc_enable(struct intel_crtc *crtc)
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
-
- DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
- cfb_pitch, crtc->base.y, plane_name(crtc->plane));
}
-static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv)
+static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
{
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}
-static void g4x_fbc_enable(struct intel_crtc *crtc)
+static void g4x_fbc_activate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 dpfc_ctl;
- dev_priv->fbc.enabled = true;
+ dev_priv->fbc.active = true;
dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
@@ -162,27 +202,23 @@ static void g4x_fbc_enable(struct intel_crtc *crtc)
/* enable it... */
I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
- DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
}
-static void g4x_fbc_disable(struct drm_i915_private *dev_priv)
+static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
{
u32 dpfc_ctl;
- dev_priv->fbc.enabled = false;
+ dev_priv->fbc.active = false;
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
-
- DRM_DEBUG_KMS("disabled FBC\n");
}
}
-static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv)
+static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
{
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
@@ -194,7 +230,7 @@ static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
POSTING_READ(MSG_FBC_REND_STATE);
}
-static void ilk_fbc_enable(struct intel_crtc *crtc)
+static void ilk_fbc_activate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
@@ -203,7 +239,7 @@ static void ilk_fbc_enable(struct intel_crtc *crtc)
int threshold = dev_priv->fbc.threshold;
unsigned int y_offset;
- dev_priv->fbc.enabled = true;
+ dev_priv->fbc.active = true;
dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
@@ -238,32 +274,28 @@ static void ilk_fbc_enable(struct intel_crtc *crtc)
}
intel_fbc_recompress(dev_priv);
-
- DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
}
-static void ilk_fbc_disable(struct drm_i915_private *dev_priv)
+static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
{
u32 dpfc_ctl;
- dev_priv->fbc.enabled = false;
+ dev_priv->fbc.active = false;
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
-
- DRM_DEBUG_KMS("disabled FBC\n");
}
}
-static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv)
+static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
{
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
-static void gen7_fbc_enable(struct intel_crtc *crtc)
+static void gen7_fbc_activate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
@@ -271,7 +303,7 @@ static void gen7_fbc_enable(struct intel_crtc *crtc)
u32 dpfc_ctl;
int threshold = dev_priv->fbc.threshold;
- dev_priv->fbc.enabled = true;
+ dev_priv->fbc.active = true;
dpfc_ctl = 0;
if (IS_IVYBRIDGE(dev_priv))
@@ -317,153 +349,119 @@ static void gen7_fbc_enable(struct intel_crtc *crtc)
I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
intel_fbc_recompress(dev_priv);
-
- DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
}
/**
- * intel_fbc_enabled - Is FBC enabled?
+ * intel_fbc_is_active - Is FBC active?
* @dev_priv: i915 device instance
*
* This function is used to verify the current state of FBC.
* FIXME: This should be tracked in the plane config eventually
* instead of queried at runtime for most callers.
*/
-bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
+bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
{
- return dev_priv->fbc.enabled;
+ return dev_priv->fbc.active;
}
-static void intel_fbc_enable(struct intel_crtc *crtc,
- const struct drm_framebuffer *fb)
+static void intel_fbc_activate(const struct drm_framebuffer *fb)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = fb->dev->dev_private;
+ struct intel_crtc *crtc = dev_priv->fbc.crtc;
- dev_priv->fbc.enable_fbc(crtc);
+ dev_priv->fbc.activate(crtc);
- dev_priv->fbc.crtc = crtc;
dev_priv->fbc.fb_id = fb->base.id;
dev_priv->fbc.y = crtc->base.y;
}
static void intel_fbc_work_fn(struct work_struct *__work)
{
- struct intel_fbc_work *work =
- container_of(to_delayed_work(__work),
- struct intel_fbc_work, work);
- struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private;
- struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb;
+ struct drm_i915_private *dev_priv =
+ container_of(__work, struct drm_i915_private, fbc.work.work);
+ struct intel_fbc_work *work = &dev_priv->fbc.work;
+ struct intel_crtc *crtc = dev_priv->fbc.crtc;
+ int delay_ms = 50;
+
+retry:
+ /* Delay the actual enabling to let pageflipping cease and the
+ * display to settle before starting the compression. Note that
+ * this delay also serves a second purpose: it allows for a
+ * vblank to pass after disabling the FBC before we attempt
+ * to modify the control registers.
+ *
+ * A more complicated solution would involve tracking vblanks
+ * following the termination of the page-flipping sequence
+ * and indeed performing the enable as a co-routine and not
+ * waiting synchronously upon the vblank.
+ *
+ * WaFbcWaitForVBlankBeforeEnable:ilk,snb
+ */
+ wait_remaining_ms_from_jiffies(work->enable_jiffies, delay_ms);
mutex_lock(&dev_priv->fbc.lock);
- if (work == dev_priv->fbc.fbc_work) {
- /* Double check that we haven't switched fb without cancelling
- * the prior work.
- */
- if (crtc_fb == work->fb)
- intel_fbc_enable(work->crtc, work->fb);
- dev_priv->fbc.fbc_work = NULL;
+ /* Were we cancelled? */
+ if (!work->scheduled)
+ goto out;
+
+ /* Were we delayed again while this function was sleeping? */
+ if (time_after(work->enable_jiffies + msecs_to_jiffies(delay_ms),
+ jiffies)) {
+ mutex_unlock(&dev_priv->fbc.lock);
+ goto retry;
}
- mutex_unlock(&dev_priv->fbc.lock);
- kfree(work);
+ if (crtc->base.primary->fb == work->fb)
+ intel_fbc_activate(work->fb);
+
+ work->scheduled = false;
+
+out:
+ mutex_unlock(&dev_priv->fbc.lock);
}
static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
{
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
-
- if (dev_priv->fbc.fbc_work == NULL)
- return;
-
- /* Synchronisation is provided by struct_mutex and checking of
- * dev_priv->fbc.fbc_work, so we can perform the cancellation
- * entirely asynchronously.
- */
- if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
- /* tasklet was killed before being run, clean up */
- kfree(dev_priv->fbc.fbc_work);
-
- /* Mark the work as no longer wanted so that if it does
- * wake-up (because the work was already running and waiting
- * for our mutex), it will discover that is no longer
- * necessary to run.
- */
- dev_priv->fbc.fbc_work = NULL;
+ dev_priv->fbc.work.scheduled = false;
}
-static void intel_fbc_schedule_enable(struct intel_crtc *crtc)
+static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
{
- struct intel_fbc_work *work;
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct intel_fbc_work *work = &dev_priv->fbc.work;
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
- intel_fbc_cancel_work(dev_priv);
-
- work = kzalloc(sizeof(*work), GFP_KERNEL);
- if (work == NULL) {
- DRM_ERROR("Failed to allocate FBC work structure\n");
- intel_fbc_enable(crtc, crtc->base.primary->fb);
- return;
- }
-
- work->crtc = crtc;
+ /* It is useless to call intel_fbc_cancel_work() in this function since
+ * we're not releasing fbc.lock, so it won't have an opportunity to grab
+ * it to discover that it was cancelled. So we just update the expected
+ * jiffy count. */
work->fb = crtc->base.primary->fb;
- INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
-
- dev_priv->fbc.fbc_work = work;
+ work->scheduled = true;
+ work->enable_jiffies = jiffies;
- /* Delay the actual enabling to let pageflipping cease and the
- * display to settle before starting the compression. Note that
- * this delay also serves a second purpose: it allows for a
- * vblank to pass after disabling the FBC before we attempt
- * to modify the control registers.
- *
- * A more complicated solution would involve tracking vblanks
- * following the termination of the page-flipping sequence
- * and indeed performing the enable as a co-routine and not
- * waiting synchronously upon the vblank.
- *
- * WaFbcWaitForVBlankBeforeEnable:ilk,snb
- */
- schedule_delayed_work(&work->work, msecs_to_jiffies(50));
+ schedule_work(&work->work);
}
-static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
+static void __intel_fbc_deactivate(struct drm_i915_private *dev_priv)
{
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
intel_fbc_cancel_work(dev_priv);
- if (dev_priv->fbc.enabled)
- dev_priv->fbc.disable_fbc(dev_priv);
- dev_priv->fbc.crtc = NULL;
-}
-
-/**
- * intel_fbc_disable - disable FBC
- * @dev_priv: i915 device instance
- *
- * This function disables FBC.
- */
-void intel_fbc_disable(struct drm_i915_private *dev_priv)
-{
- if (!fbc_supported(dev_priv))
- return;
-
- mutex_lock(&dev_priv->fbc.lock);
- __intel_fbc_disable(dev_priv);
- mutex_unlock(&dev_priv->fbc.lock);
+ if (dev_priv->fbc.active)
+ dev_priv->fbc.deactivate(dev_priv);
}
/*
- * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
+ * intel_fbc_deactivate - deactivate FBC if it's associated with crtc
* @crtc: the CRTC
*
- * This function disables FBC if it's associated with the provided CRTC.
+ * This function deactivates FBC if it's associated with the provided CRTC.
*/
-void intel_fbc_disable_crtc(struct intel_crtc *crtc)
+void intel_fbc_deactivate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
@@ -472,7 +470,7 @@ void intel_fbc_disable_crtc(struct intel_crtc *crtc)
mutex_lock(&dev_priv->fbc.lock);
if (dev_priv->fbc.crtc == crtc)
- __intel_fbc_disable(dev_priv);
+ __intel_fbc_deactivate(dev_priv);
mutex_unlock(&dev_priv->fbc.lock);
}
@@ -486,38 +484,28 @@ static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("Disabling FBC: %s\n", reason);
}
-static bool crtc_is_valid(struct intel_crtc *crtc)
+static bool crtc_can_fbc(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
return false;
- if (!intel_crtc_active(&crtc->base))
- return false;
-
- if (!to_intel_plane_state(crtc->base.primary->state)->visible)
+ if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
return false;
return true;
}
-static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
+static bool crtc_is_valid(struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = NULL, *tmp_crtc;
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe) {
- tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-
- if (crtc_is_valid(to_intel_crtc(tmp_crtc)))
- crtc = tmp_crtc;
- }
+ if (!intel_crtc_active(&crtc->base))
+ return false;
- if (!crtc)
- return NULL;
+ if (!to_intel_plane_state(crtc->base.primary->state)->visible)
+ return false;
- return crtc;
+ return true;
}
static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
@@ -590,11 +578,17 @@ again:
}
}
-static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
- int fb_cpp)
+static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_framebuffer *fb = crtc->base.primary->state->fb;
struct drm_mm_node *uninitialized_var(compressed_llb);
- int ret;
+ int size, fb_cpp, ret;
+
+ WARN_ON(drm_mm_node_allocated(&dev_priv->fbc.compressed_fb));
+
+ size = intel_fbc_calculate_cfb_size(crtc, fb);
+ fb_cpp = drm_format_plane_cpp(fb->pixel_format, 0);
ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
size, fb_cpp);
@@ -629,8 +623,6 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
dev_priv->mm.stolen_base + compressed_llb->start);
}
- dev_priv->fbc.uncompressed_size = size;
-
DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
dev_priv->fbc.compressed_fb.size,
dev_priv->fbc.threshold);
@@ -647,18 +639,15 @@ err_llb:
static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
{
- if (dev_priv->fbc.uncompressed_size == 0)
- return;
-
- i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
+ if (drm_mm_node_allocated(&dev_priv->fbc.compressed_fb))
+ i915_gem_stolen_remove_node(dev_priv,
+ &dev_priv->fbc.compressed_fb);
if (dev_priv->fbc.compressed_llb) {
i915_gem_stolen_remove_node(dev_priv,
dev_priv->fbc.compressed_llb);
kfree(dev_priv->fbc.compressed_llb);
}
-
- dev_priv->fbc.uncompressed_size = 0;
}
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
@@ -671,64 +660,6 @@ void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->fbc.lock);
}
-/*
- * For SKL+, the plane source size used by the hardware is based on the value we
- * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
- * we wrote to PIPESRC.
- */
-static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
- int *width, int *height)
-{
- struct intel_plane_state *plane_state =
- to_intel_plane_state(crtc->base.primary->state);
- int w, h;
-
- if (intel_rotation_90_or_270(plane_state->base.rotation)) {
- w = drm_rect_height(&plane_state->src) >> 16;
- h = drm_rect_width(&plane_state->src) >> 16;
- } else {
- w = drm_rect_width(&plane_state->src) >> 16;
- h = drm_rect_height(&plane_state->src) >> 16;
- }
-
- if (width)
- *width = w;
- if (height)
- *height = h;
-}
-
-static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- struct drm_framebuffer *fb = crtc->base.primary->fb;
- int lines;
-
- intel_fbc_get_plane_source_size(crtc, NULL, &lines);
- if (INTEL_INFO(dev_priv)->gen >= 7)
- lines = min(lines, 2048);
-
- /* Hardware needs the full buffer stride, not just the active area. */
- return lines * fb->pitches[0];
-}
-
-static int intel_fbc_setup_cfb(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- struct drm_framebuffer *fb = crtc->base.primary->fb;
- int size, cpp;
-
- size = intel_fbc_calculate_cfb_size(crtc);
- cpp = drm_format_plane_cpp(fb->pixel_format, 0);
-
- if (size <= dev_priv->fbc.uncompressed_size)
- return 0;
-
- /* Release any current block */
- __intel_fbc_cleanup_cfb(dev_priv);
-
- return intel_fbc_alloc_cfb(dev_priv, size, cpp);
-}
-
static bool stride_is_valid(struct drm_i915_private *dev_priv,
unsigned int stride)
{
@@ -803,47 +734,34 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
}
/**
- * __intel_fbc_update - enable/disable FBC as needed, unlocked
- * @dev_priv: i915 device instance
+ * __intel_fbc_update - activate/deactivate FBC as needed, unlocked
+ * @crtc: the CRTC that triggered the update
*
- * This function completely reevaluates the status of FBC, then enables,
- * disables or maintains it on the same state.
+ * This function completely reevaluates the status of FBC, then activates,
+ * deactivates or maintains it on the same state.
*/
-static void __intel_fbc_update(struct drm_i915_private *dev_priv)
+static void __intel_fbc_update(struct intel_crtc *crtc)
{
- struct drm_crtc *drm_crtc = NULL;
- struct intel_crtc *crtc;
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
const struct drm_display_mode *adjusted_mode;
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
- if (intel_vgpu_active(dev_priv->dev))
- i915.enable_fbc = 0;
-
- if (i915.enable_fbc < 0) {
- set_no_fbc_reason(dev_priv, "disabled per chip default");
+ if (!multiple_pipes_ok(dev_priv)) {
+ set_no_fbc_reason(dev_priv, "more than one pipe active");
goto out_disable;
}
- if (!i915.enable_fbc) {
- set_no_fbc_reason(dev_priv, "disabled per module param");
- goto out_disable;
- }
+ if (!dev_priv->fbc.enabled || dev_priv->fbc.crtc != crtc)
+ return;
- drm_crtc = intel_fbc_find_crtc(dev_priv);
- if (!drm_crtc) {
+ if (!crtc_is_valid(crtc)) {
set_no_fbc_reason(dev_priv, "no output");
goto out_disable;
}
- if (!multiple_pipes_ok(dev_priv)) {
- set_no_fbc_reason(dev_priv, "more than one pipe active");
- goto out_disable;
- }
-
- crtc = to_intel_crtc(drm_crtc);
fb = crtc->base.primary->fb;
obj = intel_fb_obj(fb);
adjusted_mode = &crtc->config->base.adjusted_mode;
@@ -859,12 +777,6 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
goto out_disable;
}
- if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
- crtc->plane != PLANE_A) {
- set_no_fbc_reason(dev_priv, "FBC unsupported on plane");
- goto out_disable;
- }
-
/* The use of a CPU fence is mandatory in order to detect writes
* by the CPU to the scanout and trigger updates to the FBC.
*/
@@ -897,8 +809,19 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
goto out_disable;
}
- if (intel_fbc_setup_cfb(crtc)) {
- set_no_fbc_reason(dev_priv, "not enough stolen memory");
+ /* It is possible for the required CFB size change without a
+ * crtc->disable + crtc->enable since it is possible to change the
+ * stride without triggering a full modeset. Since we try to
+ * over-allocate the CFB, there's a chance we may keep FBC enabled even
+ * if this happens, but if we exceed the current CFB size we'll have to
+ * disable FBC. Notice that it would be possible to disable FBC, wait
+ * for a frame, free the stolen node, then try to reenable FBC in case
+ * we didn't get any invalidate/deactivate calls, but this would require
+ * a lot of tracking just for a specific case. If we conclude it's an
+ * important case, we can implement it later. */
+ if (intel_fbc_calculate_cfb_size(crtc, fb) >
+ dev_priv->fbc.compressed_fb.size * dev_priv->fbc.threshold) {
+ set_no_fbc_reason(dev_priv, "CFB requirements changed");
goto out_disable;
}
@@ -909,10 +832,11 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
*/
if (dev_priv->fbc.crtc == crtc &&
dev_priv->fbc.fb_id == fb->base.id &&
- dev_priv->fbc.y == crtc->base.y)
+ dev_priv->fbc.y == crtc->base.y &&
+ dev_priv->fbc.active)
return;
- if (intel_fbc_enabled(dev_priv)) {
+ if (intel_fbc_is_active(dev_priv)) {
/* We update FBC along two paths, after changing fb/crtc
* configuration (modeswitching) and after page-flipping
* finishes. For the latter, we know that not only did
@@ -936,36 +860,37 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
* disabling paths we do need to wait for a vblank at
* some point. And we wait before enabling FBC anyway.
*/
- DRM_DEBUG_KMS("disabling active FBC for update\n");
- __intel_fbc_disable(dev_priv);
+ DRM_DEBUG_KMS("deactivating FBC for update\n");
+ __intel_fbc_deactivate(dev_priv);
}
- intel_fbc_schedule_enable(crtc);
+ intel_fbc_schedule_activation(crtc);
dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)";
return;
out_disable:
/* Multiple disables should be harmless */
- if (intel_fbc_enabled(dev_priv)) {
- DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
- __intel_fbc_disable(dev_priv);
+ if (intel_fbc_is_active(dev_priv)) {
+ DRM_DEBUG_KMS("unsupported config, deactivating FBC\n");
+ __intel_fbc_deactivate(dev_priv);
}
- __intel_fbc_cleanup_cfb(dev_priv);
}
/*
- * intel_fbc_update - enable/disable FBC as needed
- * @dev_priv: i915 device instance
+ * intel_fbc_update - activate/deactivate FBC as needed
+ * @crtc: the CRTC that triggered the update
*
- * This function reevaluates the overall state and enables or disables FBC.
+ * This function reevaluates the overall state and activates or deactivates FBC.
*/
-void intel_fbc_update(struct drm_i915_private *dev_priv)
+void intel_fbc_update(struct intel_crtc *crtc)
{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
if (!fbc_supported(dev_priv))
return;
mutex_lock(&dev_priv->fbc.lock);
- __intel_fbc_update(dev_priv);
+ __intel_fbc_update(crtc);
mutex_unlock(&dev_priv->fbc.lock);
}
@@ -985,16 +910,13 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
if (dev_priv->fbc.enabled)
fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
- else if (dev_priv->fbc.fbc_work)
- fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
- dev_priv->fbc.fbc_work->crtc->pipe);
else
fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
if (dev_priv->fbc.busy_bits)
- __intel_fbc_disable(dev_priv);
+ __intel_fbc_deactivate(dev_priv);
mutex_unlock(&dev_priv->fbc.lock);
}
@@ -1012,11 +934,136 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
- if (!dev_priv->fbc.busy_bits) {
+ if (!dev_priv->fbc.busy_bits && dev_priv->fbc.enabled) {
+ if (origin != ORIGIN_FLIP && dev_priv->fbc.active) {
+ intel_fbc_recompress(dev_priv);
+ } else {
+ __intel_fbc_deactivate(dev_priv);
+ __intel_fbc_update(dev_priv->fbc.crtc);
+ }
+ }
+
+ mutex_unlock(&dev_priv->fbc.lock);
+}
+
+/**
+ * intel_fbc_enable: tries to enable FBC on the CRTC
+ * @crtc: the CRTC
+ *
+ * This function checks if it's possible to enable FBC on the following CRTC,
+ * then enables it. Notice that it doesn't activate FBC.
+ */
+void intel_fbc_enable(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+ if (!fbc_supported(dev_priv))
+ return;
+
+ mutex_lock(&dev_priv->fbc.lock);
+
+ if (dev_priv->fbc.enabled) {
+ WARN_ON(dev_priv->fbc.crtc == crtc);
+ goto out;
+ }
+
+ WARN_ON(dev_priv->fbc.active);
+ WARN_ON(dev_priv->fbc.crtc != NULL);
+
+ if (intel_vgpu_active(dev_priv->dev)) {
+ set_no_fbc_reason(dev_priv, "VGPU is active");
+ goto out;
+ }
+
+ if (i915.enable_fbc < 0) {
+ set_no_fbc_reason(dev_priv, "disabled per chip default");
+ goto out;
+ }
+
+ if (!i915.enable_fbc) {
+ set_no_fbc_reason(dev_priv, "disabled per module param");
+ goto out;
+ }
+
+ if (!crtc_can_fbc(crtc)) {
+ set_no_fbc_reason(dev_priv, "no enabled pipes can have FBC");
+ goto out;
+ }
+
+ if (intel_fbc_alloc_cfb(crtc)) {
+ set_no_fbc_reason(dev_priv, "not enough stolen memory");
+ goto out;
+ }
+
+ DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
+ dev_priv->fbc.no_fbc_reason = "FBC enabled but not active yet\n";
+
+ dev_priv->fbc.enabled = true;
+ dev_priv->fbc.crtc = crtc;
+out:
+ mutex_unlock(&dev_priv->fbc.lock);
+}
+
+/**
+ * __intel_fbc_disable - disable FBC
+ * @dev_priv: i915 device instance
+ *
+ * This is the low level function that actually disables FBC. Callers should
+ * grab the FBC lock.
+ */
+static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc = dev_priv->fbc.crtc;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+ WARN_ON(!dev_priv->fbc.enabled);
+ WARN_ON(dev_priv->fbc.active);
+ assert_pipe_disabled(dev_priv, crtc->pipe);
+
+ DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
+
+ __intel_fbc_cleanup_cfb(dev_priv);
+
+ dev_priv->fbc.enabled = false;
+ dev_priv->fbc.crtc = NULL;
+}
+
+/**
+ * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
+ * @crtc: the CRTC
+ *
+ * This function disables FBC if it's associated with the provided CRTC.
+ */
+void intel_fbc_disable_crtc(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+ if (!fbc_supported(dev_priv))
+ return;
+
+ mutex_lock(&dev_priv->fbc.lock);
+ if (dev_priv->fbc.crtc == crtc) {
+ WARN_ON(!dev_priv->fbc.enabled);
+ WARN_ON(dev_priv->fbc.active);
__intel_fbc_disable(dev_priv);
- __intel_fbc_update(dev_priv);
}
+ mutex_unlock(&dev_priv->fbc.lock);
+}
+/**
+ * intel_fbc_disable - globally disable FBC
+ * @dev_priv: i915 device instance
+ *
+ * This function disables FBC regardless of which CRTC is associated with it.
+ */
+void intel_fbc_disable(struct drm_i915_private *dev_priv)
+{
+ if (!fbc_supported(dev_priv))
+ return;
+
+ mutex_lock(&dev_priv->fbc.lock);
+ if (dev_priv->fbc.enabled)
+ __intel_fbc_disable(dev_priv);
mutex_unlock(&dev_priv->fbc.lock);
}
@@ -1030,8 +1077,11 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
+ INIT_WORK(&dev_priv->fbc.work.work, intel_fbc_work_fn);
mutex_init(&dev_priv->fbc.lock);
dev_priv->fbc.enabled = false;
+ dev_priv->fbc.active = false;
+ dev_priv->fbc.work.scheduled = false;
if (!HAS_FBC(dev_priv)) {
dev_priv->fbc.no_fbc_reason = "unsupported by this chipset";
@@ -1047,29 +1097,29 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
}
if (INTEL_INFO(dev_priv)->gen >= 7) {
- dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
- dev_priv->fbc.enable_fbc = gen7_fbc_enable;
- dev_priv->fbc.disable_fbc = ilk_fbc_disable;
+ dev_priv->fbc.is_active = ilk_fbc_is_active;
+ dev_priv->fbc.activate = gen7_fbc_activate;
+ dev_priv->fbc.deactivate = ilk_fbc_deactivate;
} else if (INTEL_INFO(dev_priv)->gen >= 5) {
- dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
- dev_priv->fbc.enable_fbc = ilk_fbc_enable;
- dev_priv->fbc.disable_fbc = ilk_fbc_disable;
+ dev_priv->fbc.is_active = ilk_fbc_is_active;
+ dev_priv->fbc.activate = ilk_fbc_activate;
+ dev_priv->fbc.deactivate = ilk_fbc_deactivate;
} else if (IS_GM45(dev_priv)) {
- dev_priv->fbc.fbc_enabled = g4x_fbc_enabled;
- dev_priv->fbc.enable_fbc = g4x_fbc_enable;
- dev_priv->fbc.disable_fbc = g4x_fbc_disable;
+ dev_priv->fbc.is_active = g4x_fbc_is_active;
+ dev_priv->fbc.activate = g4x_fbc_activate;
+ dev_priv->fbc.deactivate = g4x_fbc_deactivate;
} else {
- dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled;
- dev_priv->fbc.enable_fbc = i8xx_fbc_enable;
- dev_priv->fbc.disable_fbc = i8xx_fbc_disable;
+ dev_priv->fbc.is_active = i8xx_fbc_is_active;
+ dev_priv->fbc.activate = i8xx_fbc_activate;
+ dev_priv->fbc.deactivate = i8xx_fbc_deactivate;
/* This value was pulled out of someone's hat */
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
}
/* We still don't have any sort of hardware state readout for FBC, so
- * disable it in case the BIOS enabled it to make sure software matches
- * the hardware state. */
- if (dev_priv->fbc.fbc_enabled(dev_priv))
- dev_priv->fbc.disable_fbc(dev_priv);
+ * deactivate it in case the BIOS activated it to make sure software
+ * matches the hardware state. */
+ if (dev_priv->fbc.is_active(dev_priv))
+ dev_priv->fbc.deactivate(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 7ae182d0594b..bda526660e20 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -128,9 +128,9 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
DE_PIPEB_FIFO_UNDERRUN;
if (enable)
- ironlake_enable_display_irq(dev_priv, bit);
+ ilk_enable_display_irq(dev_priv, bit);
else
- ironlake_disable_display_irq(dev_priv, bit);
+ ilk_disable_display_irq(dev_priv, bit);
}
static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
@@ -161,9 +161,9 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
if (!ivb_can_enable_err_int(dev))
return;
- ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
} else {
- ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
if (old &&
I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
@@ -178,14 +178,10 @@ static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
- assert_spin_locked(&dev_priv->irq_lock);
-
if (enable)
- dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
+ bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
else
- dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
- I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
- POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+ bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
}
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 5ba586683c87..822952235dcf 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -42,8 +42,6 @@ struct i915_guc_client {
uint32_t wq_offset;
uint32_t wq_size;
-
- spinlock_t wq_lock; /* Protects all data below */
uint32_t wq_tail;
/* GuC submission statistics & status */
@@ -95,8 +93,6 @@ struct intel_guc {
struct i915_guc_client *execbuf_client;
- spinlock_t host2guc_lock; /* Protects all data below */
-
DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS);
uint32_t db_cacheline; /* Cyclic counter mod pagesize */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index bdd462e7c690..00d065fee506 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -169,10 +169,10 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(VIDEO_DIP_CTL);
}
-static bool g4x_infoframe_enabled(struct drm_encoder *encoder)
+static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
u32 val = I915_READ(VIDEO_DIP_CTL);
@@ -225,13 +225,13 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
-static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
+static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
+ i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
u32 val = I915_READ(reg);
if ((val & VIDEO_DIP_ENABLE) == 0)
@@ -287,12 +287,12 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
-static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
+static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- u32 val = I915_READ(TVIDEO_DIP_CTL(intel_crtc->pipe));
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
+ u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
@@ -341,13 +341,13 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
-static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
+static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(intel_crtc->pipe));
+ enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
+ u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
@@ -398,12 +398,11 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(ctl_reg);
}
-static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
+static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder));
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
@@ -927,7 +926,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true;
- if (intel_hdmi->infoframe_enabled(&encoder->base))
+ if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
pipe_config->has_infoframe = true;
if (tmp & SDVO_AUDIO_ENABLE)
@@ -2165,7 +2164,7 @@ void intel_hdmi_init(struct drm_device *dev,
intel_encoder = &intel_dig_port->base;
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
intel_encoder->compute_config = intel_hdmi_compute_config;
if (HAS_PCH_SPLIT(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 1110c83953cf..e26e22a72e3b 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -472,9 +472,7 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
}
static int
-gmbus_xfer(struct i2c_adapter *adapter,
- struct i2c_msg *msgs,
- int num)
+do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
{
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
@@ -483,14 +481,6 @@ gmbus_xfer(struct i2c_adapter *adapter,
int i = 0, inc, try = 0;
int ret = 0;
- intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- mutex_lock(&dev_priv->gmbus_mutex);
-
- if (bus->force_bit) {
- ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
- goto out;
- }
-
retry:
I915_WRITE(GMBUS0, bus->reg0);
@@ -505,17 +495,13 @@ retry:
ret = gmbus_xfer_write(dev_priv, &msgs[i]);
}
+ if (!ret)
+ ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
+ GMBUS_HW_WAIT_EN);
if (ret == -ETIMEDOUT)
goto timeout;
- if (ret == -ENXIO)
- goto clear_err;
-
- ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
- GMBUS_HW_WAIT_EN);
- if (ret == -ENXIO)
+ else if (ret)
goto clear_err;
- if (ret)
- goto timeout;
}
/* Generate a STOP condition on the bus. Note that gmbus can't generata
@@ -589,13 +575,34 @@ timeout:
bus->adapter.name, bus->reg0 & 0xff);
I915_WRITE(GMBUS0, 0);
- /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
+ /*
+ * Hardware may not support GMBUS over these pins? Try GPIO bitbanging
+ * instead. Use EAGAIN to have i2c core retry.
+ */
bus->force_bit = 1;
- ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
+ ret = -EAGAIN;
out:
- mutex_unlock(&dev_priv->gmbus_mutex);
+ return ret;
+}
+
+static int
+gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
+{
+ struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ int ret;
+ intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ mutex_lock(&dev_priv->gmbus_mutex);
+
+ if (bus->force_bit)
+ ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
+ else
+ ret = do_gmbus_xfer(adapter, msgs, num);
+
+ mutex_unlock(&dev_priv->gmbus_mutex);
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 61f1145f6579..0da0240caf81 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -1025,7 +1025,7 @@ void intel_lvds_init(struct drm_device *dev)
DRM_MODE_CONNECTOR_LVDS);
drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_LVDS, NULL);
intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_enable = intel_pre_enable_lvds;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a24df35e11e7..ae808b68a44f 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -1264,6 +1264,14 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
/*
+ * BXT: PWM clock frequency = 19.2 MHz.
+ */
+static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ return KHz(19200) / pwm_freq_hz;
+}
+
+/*
* SPT: This value represents the period of the PWM stream in clock periods
* multiplied by 16 (default increment) or 128 (alternate increment selected in
* SCHICKEN_1 bit 0). PWM clock is 24 MHz.
@@ -1300,7 +1308,7 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
else
mul = 128;
- if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
+ if (HAS_PCH_LPT_H(dev_priv))
clock = MHz(135); /* LPT:H */
else
clock = MHz(24); /* LPT:LP */
@@ -1335,22 +1343,28 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
int clock;
if (IS_PINEVIEW(dev))
- clock = intel_hrawclk(dev);
+ clock = MHz(intel_hrawclk(dev));
else
- clock = 1000 * dev_priv->display.get_display_clock_speed(dev);
+ clock = 1000 * dev_priv->cdclk_freq;
return clock / (pwm_freq_hz * 32);
}
/*
* Gen4: This value represents the period of the PWM stream in display core
- * clocks multiplied by 128.
+ * clocks ([DevCTG] HRAW clocks) multiplied by 128.
+ *
*/
static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int clock = 1000 * dev_priv->display.get_display_clock_speed(dev);
+ int clock;
+
+ if (IS_G4X(dev_priv))
+ clock = MHz(intel_hrawclk(dev));
+ else
+ clock = 1000 * dev_priv->cdclk_freq;
return clock / (pwm_freq_hz * 128);
}
@@ -1385,14 +1399,18 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
u32 pwm;
- if (!pwm_freq_hz) {
- DRM_DEBUG_KMS("backlight frequency not specified in VBT\n");
+ if (!panel->backlight.hz_to_pwm) {
+ DRM_DEBUG_KMS("backlight frequency conversion not supported\n");
return 0;
}
- if (!panel->backlight.hz_to_pwm) {
- DRM_DEBUG_KMS("backlight frequency setting from VBT currently not supported on this platform\n");
- return 0;
+ if (pwm_freq_hz) {
+ DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n",
+ pwm_freq_hz);
+ } else {
+ pwm_freq_hz = 200;
+ DRM_DEBUG_KMS("default backlight frequency %u Hz\n",
+ pwm_freq_hz);
}
pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
@@ -1401,8 +1419,6 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
return 0;
}
- DRM_DEBUG_KMS("backlight frequency %u Hz from VBT\n", pwm_freq_hz);
-
return pwm;
}
@@ -1750,6 +1766,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
panel->backlight.disable = bxt_disable_backlight;
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
+ panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
} else if (HAS_PCH_LPT(dev) || HAS_PCH_SPT(dev)) {
panel->backlight.setup = lpt_setup_backlight;
panel->backlight.enable = lpt_enable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 96f45d7b3e4b..ee05ce8bf79a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -66,6 +66,14 @@ static void bxt_init_clock_gating(struct drm_device *dev)
*/
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
+
+ /*
+ * Wa: Backlight PWM may stop in the asserted state, causing backlight
+ * to stay fully on.
+ */
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
+ I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+ PWM1_GATING_DIS | PWM2_GATING_DIS);
}
static void i915_pineview_get_mem_freq(struct drm_device *dev)
@@ -2422,7 +2430,7 @@ static void ilk_wm_merge(struct drm_device *dev,
* enabled sometime later.
*/
if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
- intel_fbc_enabled(dev_priv)) {
+ intel_fbc_is_active(dev_priv)) {
for (level = 2; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index bc5ea2a6cf4c..b6609e648f75 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -191,9 +191,6 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
-
/* Enable AUX frame sync at sink */
if (dev_priv->psr.aux_frame_sync)
drm_dp_dpcd_writeb(&intel_dp->aux,
@@ -414,9 +411,14 @@ void intel_psr_enable(struct intel_dp *intel_dp)
skl_psr_setup_su_vsc(intel_dp);
}
- /* Avoid continuous PSR exit by masking memup and hpd */
+ /*
+ * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD.
+ * Also mask LPSP to avoid dependency on other drivers that
+ * might block runtime_pm besides preventing other hw tracking
+ * issues now we can rely on frontbuffer tracking.
+ */
I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD);
+ EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
/* Enable PSR on the panel */
hsw_psr_enable_sink(intel_dp);
@@ -522,11 +524,15 @@ void intel_psr_disable(struct intel_dp *intel_dp)
return;
}
+ /* Disable PSR on Source */
if (HAS_DDI(dev))
hsw_psr_disable(intel_dp);
else
vlv_psr_disable(intel_dp);
+ /* Disable PSR on Sink */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+
dev_priv->psr.enabled = NULL;
mutex_unlock(&dev_priv->psr.lock);
@@ -737,25 +743,9 @@ void intel_psr_flush(struct drm_device *dev,
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
- if (HAS_DDI(dev)) {
- /*
- * By definition every flush should mean invalidate + flush,
- * however on core platforms let's minimize the
- * disable/re-enable so we can avoid the invalidate when flip
- * originated the flush.
- */
- if (frontbuffer_bits && origin != ORIGIN_FLIP)
- intel_psr_exit(dev);
- } else {
- /*
- * On Valleyview and Cherryview we don't use hardware tracking
- * so any plane updates or cursor moves don't result in a PSR
- * invalidating. Which means we need to manually fake this in
- * software for all flushes.
- */
- if (frontbuffer_bits)
- intel_psr_exit(dev);
- }
+ /* By definition flush = invalidate + flush */
+ if (frontbuffer_bits)
+ intel_psr_exit(dev);
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
if (!work_busy(&dev_priv->psr.work.work))
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index afca6c940b9a..2c2151f1c47e 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -65,6 +65,72 @@
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
int power_well_id);
+const char *
+intel_display_power_domain_str(enum intel_display_power_domain domain)
+{
+ switch (domain) {
+ case POWER_DOMAIN_PIPE_A:
+ return "PIPE_A";
+ case POWER_DOMAIN_PIPE_B:
+ return "PIPE_B";
+ case POWER_DOMAIN_PIPE_C:
+ return "PIPE_C";
+ case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
+ return "PIPE_A_PANEL_FITTER";
+ case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
+ return "PIPE_B_PANEL_FITTER";
+ case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
+ return "PIPE_C_PANEL_FITTER";
+ case POWER_DOMAIN_TRANSCODER_A:
+ return "TRANSCODER_A";
+ case POWER_DOMAIN_TRANSCODER_B:
+ return "TRANSCODER_B";
+ case POWER_DOMAIN_TRANSCODER_C:
+ return "TRANSCODER_C";
+ case POWER_DOMAIN_TRANSCODER_EDP:
+ return "TRANSCODER_EDP";
+ case POWER_DOMAIN_PORT_DDI_A_LANES:
+ return "PORT_DDI_A_LANES";
+ case POWER_DOMAIN_PORT_DDI_B_LANES:
+ return "PORT_DDI_B_LANES";
+ case POWER_DOMAIN_PORT_DDI_C_LANES:
+ return "PORT_DDI_C_LANES";
+ case POWER_DOMAIN_PORT_DDI_D_LANES:
+ return "PORT_DDI_D_LANES";
+ case POWER_DOMAIN_PORT_DDI_E_LANES:
+ return "PORT_DDI_E_LANES";
+ case POWER_DOMAIN_PORT_DSI:
+ return "PORT_DSI";
+ case POWER_DOMAIN_PORT_CRT:
+ return "PORT_CRT";
+ case POWER_DOMAIN_PORT_OTHER:
+ return "PORT_OTHER";
+ case POWER_DOMAIN_VGA:
+ return "VGA";
+ case POWER_DOMAIN_AUDIO:
+ return "AUDIO";
+ case POWER_DOMAIN_PLLS:
+ return "PLLS";
+ case POWER_DOMAIN_AUX_A:
+ return "AUX_A";
+ case POWER_DOMAIN_AUX_B:
+ return "AUX_B";
+ case POWER_DOMAIN_AUX_C:
+ return "AUX_C";
+ case POWER_DOMAIN_AUX_D:
+ return "AUX_D";
+ case POWER_DOMAIN_GMBUS:
+ return "GMBUS";
+ case POWER_DOMAIN_INIT:
+ return "INIT";
+ case POWER_DOMAIN_MODESET:
+ return "MODESET";
+ default:
+ MISSING_CASE(domain);
+ return "?";
+ }
+}
+
static void intel_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -1433,11 +1499,15 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
mutex_lock(&power_domains->lock);
- WARN_ON(!power_domains->domain_use_count[domain]);
+ WARN(!power_domains->domain_use_count[domain],
+ "Use count on domain %s is already zero\n",
+ intel_display_power_domain_str(domain));
power_domains->domain_use_count[domain]--;
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
- WARN_ON(!power_well->count);
+ WARN(!power_well->count,
+ "Use count on power well %s is already zero",
+ power_well->name);
if (!--power_well->count)
intel_power_well_disable(dev_priv, power_well);
@@ -1841,7 +1911,7 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
if (disable_power_well >= 0)
return !!disable_power_well;
- if (IS_SKYLAKE(dev_priv)) {
+ if (IS_BROXTON(dev_priv)) {
DRM_DEBUG_KMS("Disabling display power well support\n");
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 06679f164b3e..2e1da060b0e1 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2978,7 +2978,8 @@ bool intel_sdvo_init(struct drm_device *dev,
/* encoder type will be decided later */
intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO;
- drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0);
+ drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
+ NULL);
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 2b96f336589e..dbf421351b5c 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -1123,7 +1123,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
plane_formats, num_plane_formats,
- DRM_PLANE_TYPE_OVERLAY);
+ DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret) {
kfree(intel_plane);
goto out;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 6bea78944cd6..948cbff6c62e 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1645,7 +1645,7 @@ intel_tv_init(struct drm_device *dev)
DRM_MODE_CONNECTOR_SVIDEO);
drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
- DRM_MODE_ENCODER_TVDAC);
+ DRM_MODE_ENCODER_TVDAC, NULL);
intel_encoder->compute_config = intel_tv_compute_config;
intel_encoder->get_config = intel_tv_get_config;
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 98605ea2ad9d..063825fecbe2 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -137,7 +137,7 @@ static void dw_hdmi_imx_encoder_prepare(struct drm_encoder *encoder)
imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_RGB888_1X24);
}
-static struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
.mode_fixup = dw_hdmi_imx_encoder_mode_fixup,
.mode_set = dw_hdmi_imx_encoder_mode_set,
.prepare = dw_hdmi_imx_encoder_prepare,
@@ -145,7 +145,7 @@ static struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
.disable = dw_hdmi_imx_encoder_disable,
};
-static struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
+static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
@@ -251,7 +251,7 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs);
drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
}
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 6faa735376ec..7be7ac808304 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -39,13 +39,12 @@ struct imx_drm_component {
struct imx_drm_device {
struct drm_device *drm;
struct imx_drm_crtc *crtc[MAX_CRTC];
- int pipes;
+ unsigned int pipes;
struct drm_fbdev_cma *fbhelper;
};
struct imx_drm_crtc {
struct drm_crtc *crtc;
- int pipe;
struct imx_drm_crtc_helper_funcs imx_drm_helper_funcs;
};
@@ -54,9 +53,9 @@ static int legacyfb_depth = 16;
module_param(legacyfb_depth, int, 0444);
#endif
-int imx_drm_crtc_id(struct imx_drm_crtc *crtc)
+unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc)
{
- return crtc->pipe;
+ return drm_crtc_index(crtc->crtc);
}
EXPORT_SYMBOL_GPL(imx_drm_crtc_id);
@@ -64,8 +63,7 @@ static void imx_drm_driver_lastclose(struct drm_device *drm)
{
struct imx_drm_device *imxdrm = drm->dev_private;
- if (imxdrm->fbhelper)
- drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
+ drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
}
static int imx_drm_driver_unload(struct drm_device *drm)
@@ -125,19 +123,19 @@ EXPORT_SYMBOL_GPL(imx_drm_set_bus_format);
int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
{
- return drm_vblank_get(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
+ return drm_crtc_vblank_get(imx_drm_crtc->crtc);
}
EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get);
void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc)
{
- drm_vblank_put(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
+ drm_crtc_vblank_put(imx_drm_crtc->crtc);
}
EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put);
void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc)
{
- drm_handle_vblank(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
+ drm_crtc_handle_vblank(imx_drm_crtc->crtc);
}
EXPORT_SYMBOL_GPL(imx_drm_handle_vblank);
@@ -216,7 +214,7 @@ static void imx_drm_output_poll_changed(struct drm_device *drm)
drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
}
-static struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
+static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = imx_drm_output_poll_changed,
};
@@ -334,7 +332,7 @@ err_kms:
* imx_drm_add_crtc - add a new crtc
*/
int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
- struct imx_drm_crtc **new_crtc,
+ struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane,
const struct imx_drm_crtc_helper_funcs *imx_drm_helper_funcs,
struct device_node *port)
{
@@ -357,12 +355,11 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
return -ENOMEM;
imx_drm_crtc->imx_drm_helper_funcs = *imx_drm_helper_funcs;
- imx_drm_crtc->pipe = imxdrm->pipes++;
imx_drm_crtc->crtc = crtc;
crtc->port = port;
- imxdrm->crtc[imx_drm_crtc->pipe] = imx_drm_crtc;
+ imxdrm->crtc[imxdrm->pipes++] = imx_drm_crtc;
*new_crtc = imx_drm_crtc;
@@ -373,13 +370,13 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
drm_crtc_helper_add(crtc,
imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
- drm_crtc_init(drm, crtc,
- imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
+ drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
+ imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs, NULL);
return 0;
err_register:
- imxdrm->crtc[imx_drm_crtc->pipe] = NULL;
+ imxdrm->crtc[--imxdrm->pipes] = NULL;
kfree(imx_drm_crtc);
return ret;
}
@@ -391,10 +388,11 @@ EXPORT_SYMBOL_GPL(imx_drm_add_crtc);
int imx_drm_remove_crtc(struct imx_drm_crtc *imx_drm_crtc)
{
struct imx_drm_device *imxdrm = imx_drm_crtc->crtc->dev->dev_private;
+ unsigned int pipe = drm_crtc_index(imx_drm_crtc->crtc);
drm_crtc_cleanup(imx_drm_crtc->crtc);
- imxdrm->crtc[imx_drm_crtc->pipe] = NULL;
+ imxdrm->crtc[pipe] = NULL;
kfree(imx_drm_crtc);
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 28e776d8d9d2..71cf6d9c714f 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -9,10 +9,11 @@ struct drm_display_mode;
struct drm_encoder;
struct drm_fbdev_cma;
struct drm_framebuffer;
+struct drm_plane;
struct imx_drm_crtc;
struct platform_device;
-int imx_drm_crtc_id(struct imx_drm_crtc *crtc);
+unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc);
struct imx_drm_crtc_helper_funcs {
int (*enable_vblank)(struct drm_crtc *crtc);
@@ -24,7 +25,7 @@ struct imx_drm_crtc_helper_funcs {
};
int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
- struct imx_drm_crtc **new_crtc,
+ struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane,
const struct imx_drm_crtc_helper_funcs *imx_helper_funcs,
struct device_node *port);
int imx_drm_remove_crtc(struct imx_drm_crtc *);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index abacc8f67469..22ac482231ed 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -358,23 +358,23 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
drm_panel_unprepare(imx_ldb_ch->panel);
}
-static struct drm_connector_funcs imx_ldb_connector_funcs = {
+static const struct drm_connector_funcs imx_ldb_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_ldb_connector_detect,
.destroy = imx_drm_connector_destroy,
};
-static struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = {
.get_modes = imx_ldb_connector_get_modes,
.best_encoder = imx_ldb_connector_best_encoder,
};
-static struct drm_encoder_funcs imx_ldb_encoder_funcs = {
+static const struct drm_encoder_funcs imx_ldb_encoder_funcs = {
.destroy = imx_drm_encoder_destroy,
};
-static struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
.dpms = imx_ldb_encoder_dpms,
.mode_fixup = imx_ldb_encoder_mode_fixup,
.prepare = imx_ldb_encoder_prepare,
@@ -422,7 +422,7 @@ static int imx_ldb_register(struct drm_device *drm,
drm_encoder_helper_add(&imx_ldb_ch->encoder,
&imx_ldb_encoder_helper_funcs);
drm_encoder_init(drm, &imx_ldb_ch->encoder, &imx_ldb_encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_LVDS, NULL);
drm_connector_helper_add(&imx_ldb_ch->connector,
&imx_ldb_connector_helper_funcs);
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index e671ad369416..292349f0b132 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -360,24 +360,24 @@ static void imx_tve_encoder_disable(struct drm_encoder *encoder)
tve_disable(tve);
}
-static struct drm_connector_funcs imx_tve_connector_funcs = {
+static const struct drm_connector_funcs imx_tve_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_tve_connector_detect,
.destroy = imx_drm_connector_destroy,
};
-static struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = {
.get_modes = imx_tve_connector_get_modes,
.best_encoder = imx_tve_connector_best_encoder,
.mode_valid = imx_tve_connector_mode_valid,
};
-static struct drm_encoder_funcs imx_tve_encoder_funcs = {
+static const struct drm_encoder_funcs imx_tve_encoder_funcs = {
.destroy = imx_drm_encoder_destroy,
};
-static struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
.dpms = imx_tve_encoder_dpms,
.mode_fixup = imx_tve_encoder_mode_fixup,
.prepare = imx_tve_encoder_prepare,
@@ -508,7 +508,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs);
drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs,
- encoder_type);
+ encoder_type, NULL);
drm_connector_helper_add(&tve->connector,
&imx_tve_connector_helper_funcs);
@@ -721,6 +721,7 @@ static const struct of_device_id imx_tve_dt_ids[] = {
{ .compatible = "fsl,imx53-tve", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, imx_tve_dt_ids);
static struct platform_driver imx_tve_driver = {
.probe = imx_tve_probe,
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 7bc8301fafff..30a57185bdb4 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -212,7 +212,8 @@ static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
spin_lock_irqsave(&drm->event_lock, flags);
if (ipu_crtc->page_flip_event)
- drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event);
+ drm_crtc_send_vblank_event(&ipu_crtc->base,
+ ipu_crtc->page_flip_event);
ipu_crtc->page_flip_event = NULL;
imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
spin_unlock_irqrestore(&drm->event_lock, flags);
@@ -269,7 +270,7 @@ static void ipu_crtc_commit(struct drm_crtc *crtc)
ipu_fb_enable(ipu_crtc);
}
-static struct drm_crtc_helper_funcs ipu_helper_funcs = {
+static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
.dpms = ipu_crtc_dpms,
.mode_fixup = ipu_crtc_mode_fixup,
.mode_set = ipu_crtc_mode_set,
@@ -349,7 +350,6 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
int dp = -EINVAL;
int ret;
- int id;
ret = ipu_get_resources(ipu_crtc, pdata);
if (ret) {
@@ -358,18 +358,23 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
return ret;
}
+ if (pdata->dp >= 0)
+ dp = IPU_DP_FLOW_SYNC_BG;
+ ipu_crtc->plane[0] = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0,
+ DRM_PLANE_TYPE_PRIMARY);
+ if (IS_ERR(ipu_crtc->plane[0])) {
+ ret = PTR_ERR(ipu_crtc->plane[0]);
+ goto err_put_resources;
+ }
+
ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc,
- &ipu_crtc_helper_funcs, ipu_crtc->dev->of_node);
+ &ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs,
+ ipu_crtc->dev->of_node);
if (ret) {
dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret);
goto err_put_resources;
}
- if (pdata->dp >= 0)
- dp = IPU_DP_FLOW_SYNC_BG;
- id = imx_drm_crtc_id(ipu_crtc->imx_crtc);
- ipu_crtc->plane[0] = ipu_plane_init(ipu_crtc->base.dev, ipu,
- pdata->dma[0], dp, BIT(id), true);
ret = ipu_plane_get_resources(ipu_crtc->plane[0]);
if (ret) {
dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n",
@@ -379,10 +384,10 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
/* If this crtc is using the DP, add an overlay plane */
if (pdata->dp >= 0 && pdata->dma[1] > 0) {
- ipu_crtc->plane[1] = ipu_plane_init(ipu_crtc->base.dev, ipu,
- pdata->dma[1],
- IPU_DP_FLOW_SYNC_FG,
- BIT(id), false);
+ ipu_crtc->plane[1] = ipu_plane_init(drm, ipu, pdata->dma[1],
+ IPU_DP_FLOW_SYNC_FG,
+ drm_crtc_mask(&ipu_crtc->base),
+ DRM_PLANE_TYPE_OVERLAY);
if (IS_ERR(ipu_crtc->plane[1]))
ipu_crtc->plane[1] = NULL;
}
@@ -407,28 +412,6 @@ err_put_resources:
return ret;
}
-static struct device_node *ipu_drm_get_port_by_id(struct device_node *parent,
- int port_id)
-{
- struct device_node *port;
- int id, ret;
-
- port = of_get_child_by_name(parent, "port");
- while (port) {
- ret = of_property_read_u32(port, "reg", &id);
- if (!ret && id == port_id)
- return port;
-
- do {
- port = of_get_next_child(parent, port);
- if (!port)
- return NULL;
- } while (of_node_cmp(port->name, "port"));
- }
-
- return NULL;
-}
-
static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
{
struct ipu_client_platformdata *pdata = dev->platform_data;
@@ -470,23 +453,11 @@ static const struct component_ops ipu_crtc_ops = {
static int ipu_drm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct ipu_client_platformdata *pdata = dev->platform_data;
int ret;
if (!dev->platform_data)
return -EINVAL;
- if (!dev->of_node) {
- /* Associate crtc device with the corresponding DI port node */
- dev->of_node = ipu_drm_get_port_by_id(dev->parent->of_node,
- pdata->di + 2);
- if (!dev->of_node) {
- dev_err(dev, "missing port@%d node in %s\n",
- pdata->di + 2, dev->parent->of_node->full_name);
- return -ENODEV;
- }
- }
-
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
if (ret)
return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 575f4c84388f..591ba2f1ae03 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -381,7 +381,7 @@ static struct drm_plane_funcs ipu_plane_funcs = {
struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
int dma, int dp, unsigned int possible_crtcs,
- bool priv)
+ enum drm_plane_type type)
{
struct ipu_plane *ipu_plane;
int ret;
@@ -399,10 +399,10 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
ipu_plane->dma = dma;
ipu_plane->dp_flow = dp;
- ret = drm_plane_init(dev, &ipu_plane->base, possible_crtcs,
- &ipu_plane_funcs, ipu_plane_formats,
- ARRAY_SIZE(ipu_plane_formats),
- priv);
+ ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs,
+ &ipu_plane_funcs, ipu_plane_formats,
+ ARRAY_SIZE(ipu_plane_formats), type,
+ NULL);
if (ret) {
DRM_ERROR("failed to initialize plane\n");
kfree(ipu_plane);
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index 9b5eff18f5b8..3a443b413c60 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -34,7 +34,7 @@ struct ipu_plane {
struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
int dma, int dp, unsigned int possible_crtcs,
- bool priv);
+ enum drm_plane_type type);
/* Init IDMAC, DMFC, DP */
int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index b4deb9cf9d71..b74bf8e334f5 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -54,7 +54,11 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
if (imxpd->panel && imxpd->panel->funcs &&
imxpd->panel->funcs->get_modes) {
+ struct drm_display_info *di = &connector->display_info;
+
num_modes = imxpd->panel->funcs->get_modes(imxpd->panel);
+ if (!imxpd->bus_format && di->num_bus_formats)
+ imxpd->bus_format = di->bus_formats[0];
if (num_modes > 0)
return num_modes;
}
@@ -144,23 +148,23 @@ static void imx_pd_encoder_disable(struct drm_encoder *encoder)
drm_panel_unprepare(imxpd->panel);
}
-static struct drm_connector_funcs imx_pd_connector_funcs = {
+static const struct drm_connector_funcs imx_pd_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_pd_connector_detect,
.destroy = imx_drm_connector_destroy,
};
-static struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
.get_modes = imx_pd_connector_get_modes,
.best_encoder = imx_pd_connector_best_encoder,
};
-static struct drm_encoder_funcs imx_pd_encoder_funcs = {
+static const struct drm_encoder_funcs imx_pd_encoder_funcs = {
.destroy = imx_drm_encoder_destroy,
};
-static struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
.dpms = imx_pd_encoder_dpms,
.mode_fixup = imx_pd_encoder_mode_fixup,
.prepare = imx_pd_encoder_prepare,
@@ -188,7 +192,7 @@ static int imx_pd_register(struct drm_device *drm,
drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs);
drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs,
- DRM_MODE_ENCODER_NONE);
+ DRM_MODE_ENCODER_NONE, NULL);
drm_connector_helper_add(&imxpd->connector,
&imx_pd_connector_helper_funcs);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index c99d3fe12881..19c18b7af28a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1538,7 +1538,7 @@ static struct drm_encoder *mga_encoder_init(struct drm_device *dev)
encoder->possible_crtcs = 0x1;
drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs,
- DRM_MODE_ENCODER_DAC);
+ DRM_MODE_ENCODER_DAC, NULL);
drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs);
return encoder;
@@ -1684,13 +1684,13 @@ static void mga_connector_destroy(struct drm_connector *connector)
kfree(connector);
}
-struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
.get_modes = mga_vga_get_modes,
.mode_valid = mga_vga_mode_valid,
.best_encoder = mga_connector_best_encoder,
};
-struct drm_connector_funcs mga_vga_connector_funcs = {
+static const struct drm_connector_funcs mga_vga_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = mga_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 84d3ec98e6b9..215495c2780c 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -54,3 +54,11 @@ config DRM_MSM_DSI_20NM_PHY
default y
help
Choose this option if the 20nm DSI PHY is used on the platform.
+
+config DRM_MSM_DSI_28NM_8960_PHY
+ bool "Enable DSI 28nm 8960 PHY driver in MSM DRM"
+ depends on DRM_MSM_DSI
+ default y
+ help
+ Choose this option if the 28nm DSI PHY 8960 variant is used on the
+ platform.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 1c90290be716..065ad4138799 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -54,6 +54,7 @@ msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
+ mdp/mdp4/mdp4_dsi_encoder.o \
dsi/dsi_cfg.o \
dsi/dsi_host.o \
dsi/dsi_manager.o \
@@ -62,10 +63,12 @@ msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
+msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
msm-y += dsi/pll/dsi_pll.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
+msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
endif
obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 1ea2df524fac..950d27d26b30 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -19,10 +19,6 @@
#include "adreno_gpu.h"
-#if defined(DOWNSTREAM_CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF)
-# include <mach/kgsl.h>
-#endif
-
#define ANY_ID 0xff
bool hang_debug = false;
@@ -168,7 +164,6 @@ static void set_gpu_pdev(struct drm_device *dev,
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
-#ifdef CONFIG_OF
struct device_node *child, *node = dev->of_node;
u32 val;
int ret;
@@ -205,53 +200,6 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
return -ENXIO;
}
-#else
- struct kgsl_device_platform_data *pdata = dev->platform_data;
- uint32_t version = socinfo_get_version();
- if (cpu_is_apq8064ab()) {
- config.fast_rate = 450000000;
- config.slow_rate = 27000000;
- config.bus_freq = 4;
- config.rev = ADRENO_REV(3, 2, 1, 0);
- } else if (cpu_is_apq8064()) {
- config.fast_rate = 400000000;
- config.slow_rate = 27000000;
- config.bus_freq = 4;
-
- if (SOCINFO_VERSION_MAJOR(version) == 2)
- config.rev = ADRENO_REV(3, 2, 0, 2);
- else if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
- (SOCINFO_VERSION_MINOR(version) == 1))
- config.rev = ADRENO_REV(3, 2, 0, 1);
- else
- config.rev = ADRENO_REV(3, 2, 0, 0);
-
- } else if (cpu_is_msm8960ab()) {
- config.fast_rate = 400000000;
- config.slow_rate = 320000000;
- config.bus_freq = 4;
-
- if (SOCINFO_VERSION_MINOR(version) == 0)
- config.rev = ADRENO_REV(3, 2, 1, 0);
- else
- config.rev = ADRENO_REV(3, 2, 1, 1);
-
- } else if (cpu_is_msm8930()) {
- config.fast_rate = 400000000;
- config.slow_rate = 27000000;
- config.bus_freq = 3;
-
- if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
- (SOCINFO_VERSION_MINOR(version) == 2))
- config.rev = ADRENO_REV(3, 0, 5, 2);
- else
- config.rev = ADRENO_REV(3, 0, 5, 0);
-
- }
-# ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
- config.bus_scale_table = pdata->bus_scale_table;
-# endif
-#endif
dev->platform_data = &config;
set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
return 0;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 5f5a3732cdf6..749fbb28ec3d 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -31,10 +31,12 @@ enum msm_dsi_phy_type {
MSM_DSI_PHY_28NM_HPM,
MSM_DSI_PHY_28NM_LP,
MSM_DSI_PHY_20NM,
+ MSM_DSI_PHY_28NM_8960,
MSM_DSI_PHY_MAX
};
#define DSI_DEV_REGULATOR_MAX 8
+#define DSI_BUS_CLK_MAX 4
/* Regulators for DSI devices */
struct dsi_reg_entry {
@@ -89,7 +91,7 @@ int msm_dsi_manager_phy_enable(int id,
u32 *clk_pre, u32 *clk_post);
void msm_dsi_manager_phy_disable(int id);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
-bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len);
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
@@ -143,7 +145,7 @@ int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg);
void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
- u32 iova, u32 len);
+ u32 dma_base, u32 len);
int msm_dsi_host_enable(struct mipi_dsi_host *host);
int msm_dsi_host_disable(struct mipi_dsi_host *host);
int msm_dsi_host_power_on(struct mipi_dsi_host *host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 5872d5e5934f..2a827d8093a2 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -13,9 +13,26 @@
#include "dsi_cfg.h"
-/* DSI v2 has not been supported by now */
-static const struct msm_dsi_config dsi_v2_cfg = {
+static const char * const dsi_v2_bus_clk_names[] = {
+ "core_mmss_clk", "iface_clk", "bus_clk",
+};
+
+static const struct msm_dsi_config apq8064_dsi_cfg = {
.io_offset = 0,
+ .reg_cfg = {
+ .num = 3,
+ .regs = {
+ {"vdda", 1200000, 1200000, 100000, 100},
+ {"avdd", 3000000, 3000000, 110000, 100},
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ .bus_clk_names = dsi_v2_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names),
+};
+
+static const char * const dsi_6g_bus_clk_names[] = {
+ "mdp_core_clk", "iface_clk", "bus_clk", "core_mmss_clk",
};
static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
@@ -29,6 +46,12 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
{"vddio", 1800000, 1800000, 100000, 100},
},
},
+ .bus_clk_names = dsi_6g_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
+};
+
+static const char * const dsi_8916_bus_clk_names[] = {
+ "mdp_core_clk", "iface_clk", "bus_clk",
};
static const struct msm_dsi_config msm8916_dsi_cfg = {
@@ -42,6 +65,8 @@ static const struct msm_dsi_config msm8916_dsi_cfg = {
{"vddio", 1800000, 1800000, 100000, 100},
},
},
+ .bus_clk_names = dsi_8916_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names),
};
static const struct msm_dsi_config msm8994_dsi_cfg = {
@@ -57,11 +82,13 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
{"lab_reg", -1, -1, -1, -1},
{"ibb_reg", -1, -1, -1, -1},
},
- }
+ },
+ .bus_clk_names = dsi_6g_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
};
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
- {MSM_DSI_VER_MAJOR_V2, U32_MAX, &dsi_v2_cfg},
+ {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
&msm8974_apq8084_dsi_cfg},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 4cf887240177..a68c836744a3 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -25,11 +25,15 @@
#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
+#define MSM_DSI_V2_VER_MINOR_8064 0x0
+
#define DSI_6G_REG_SHIFT 4
struct msm_dsi_config {
u32 io_offset;
struct dsi_reg_config reg_cfg;
+ const char * const *bus_clk_names;
+ const int num_bus_clks;
};
struct msm_dsi_cfg_handler {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 4c49868efcda..48f9967b4a1b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -24,26 +24,36 @@
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <video/mipi_display.h>
#include "dsi.h"
#include "dsi.xml.h"
+#include "sfpb.xml.h"
#include "dsi_cfg.h"
static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
{
u32 ver;
- u32 ver_6g;
if (!major || !minor)
return -EINVAL;
- /* From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
+ /*
+ * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
* makes all other registers 4-byte shifted down.
+ *
+ * In order to identify between DSI6G(v3) and beyond, and DSIv2 and
+ * older, we read the DSI_VERSION register without any shift(offset
+ * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In
+ * the case of DSI6G, this has to be zero (the offset points to a
+ * scratch register which we never touch)
*/
- ver_6g = msm_readl(base + REG_DSI_6G_HW_VERSION);
- if (ver_6g == 0) {
- ver = msm_readl(base + REG_DSI_VERSION);
+
+ ver = msm_readl(base + REG_DSI_VERSION);
+ if (ver) {
+ /* older dsi host, there is no register shift */
ver = FIELD(ver, DSI_VERSION_MAJOR);
if (ver <= MSM_DSI_VER_MAJOR_V2) {
/* old versions */
@@ -54,12 +64,17 @@ static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
return -EINVAL;
}
} else {
+ /*
+ * newer host, offset 0 has 6G_HW_VERSION, the rest of the
+ * registers are shifted down, read DSI_VERSION again with
+ * the shifted offset
+ */
ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
ver = FIELD(ver, DSI_VERSION_MAJOR);
if (ver == MSM_DSI_VER_MAJOR_6G) {
/* 6G version */
*major = ver;
- *minor = ver_6g;
+ *minor = msm_readl(base + REG_DSI_6G_HW_VERSION);
return 0;
} else {
return -EINVAL;
@@ -91,10 +106,9 @@ struct msm_dsi_host {
void __iomem *ctrl_base;
struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
- struct clk *mdp_core_clk;
- struct clk *ahb_clk;
- struct clk *axi_clk;
- struct clk *mmss_misc_ahb_clk;
+
+ struct clk *bus_clks[DSI_BUS_CLK_MAX];
+
struct clk *byte_clk;
struct clk *esc_clk;
struct clk *pixel_clk;
@@ -102,6 +116,14 @@ struct msm_dsi_host {
struct clk *pixel_clk_src;
u32 byte_clk_rate;
+ u32 esc_clk_rate;
+
+ /* DSI v2 specific clocks */
+ struct clk *src_clk;
+ struct clk *esc_clk_src;
+ struct clk *dsi_clk_src;
+
+ u32 src_clk_rate;
struct gpio_desc *disp_en_gpio;
struct gpio_desc *te_gpio;
@@ -119,9 +141,19 @@ struct msm_dsi_host {
struct work_struct err_work;
struct workqueue_struct *workqueue;
+ /* DSI 6G TX buffer*/
struct drm_gem_object *tx_gem_obj;
+
+ /* DSI v2 TX buffer */
+ void *tx_buf;
+ dma_addr_t tx_buf_paddr;
+
+ int tx_size;
+
u8 *rx_buf;
+ struct regmap *sfpb;
+
struct drm_display_mode *mode;
/* connected device info */
@@ -165,21 +197,31 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
struct msm_dsi_host *msm_host)
{
const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
+ struct device *dev = &msm_host->pdev->dev;
struct regulator *gdsc_reg;
+ struct clk *ahb_clk;
int ret;
u32 major = 0, minor = 0;
- gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc");
+ gdsc_reg = regulator_get(dev, "gdsc");
if (IS_ERR(gdsc_reg)) {
pr_err("%s: cannot get gdsc\n", __func__);
goto exit;
}
+
+ ahb_clk = clk_get(dev, "iface_clk");
+ if (IS_ERR(ahb_clk)) {
+ pr_err("%s: cannot get interface clock\n", __func__);
+ goto put_gdsc;
+ }
+
ret = regulator_enable(gdsc_reg);
if (ret) {
pr_err("%s: unable to enable gdsc\n", __func__);
- goto put_gdsc;
+ goto put_clk;
}
- ret = clk_prepare_enable(msm_host->ahb_clk);
+
+ ret = clk_prepare_enable(ahb_clk);
if (ret) {
pr_err("%s: unable to enable ahb_clk\n", __func__);
goto disable_gdsc;
@@ -196,9 +238,11 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
DBG("%s: Version %x:%x\n", __func__, major, minor);
disable_clks:
- clk_disable_unprepare(msm_host->ahb_clk);
+ clk_disable_unprepare(ahb_clk);
disable_gdsc:
regulator_disable(gdsc_reg);
+put_clk:
+ clk_put(ahb_clk);
put_gdsc:
regulator_put(gdsc_reg);
exit:
@@ -295,40 +339,23 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host)
static int dsi_clk_init(struct msm_dsi_host *msm_host)
{
struct device *dev = &msm_host->pdev->dev;
- int ret = 0;
-
- msm_host->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk");
- if (IS_ERR(msm_host->mdp_core_clk)) {
- ret = PTR_ERR(msm_host->mdp_core_clk);
- pr_err("%s: Unable to get mdp core clk. ret=%d\n",
- __func__, ret);
- goto exit;
- }
-
- msm_host->ahb_clk = devm_clk_get(dev, "iface_clk");
- if (IS_ERR(msm_host->ahb_clk)) {
- ret = PTR_ERR(msm_host->ahb_clk);
- pr_err("%s: Unable to get mdss ahb clk. ret=%d\n",
- __func__, ret);
- goto exit;
- }
-
- msm_host->axi_clk = devm_clk_get(dev, "bus_clk");
- if (IS_ERR(msm_host->axi_clk)) {
- ret = PTR_ERR(msm_host->axi_clk);
- pr_err("%s: Unable to get axi bus clk. ret=%d\n",
- __func__, ret);
- goto exit;
- }
-
- msm_host->mmss_misc_ahb_clk = devm_clk_get(dev, "core_mmss_clk");
- if (IS_ERR(msm_host->mmss_misc_ahb_clk)) {
- ret = PTR_ERR(msm_host->mmss_misc_ahb_clk);
- pr_err("%s: Unable to get mmss misc ahb clk. ret=%d\n",
- __func__, ret);
- goto exit;
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ const struct msm_dsi_config *cfg = cfg_hnd->cfg;
+ int i, ret = 0;
+
+ /* get bus clocks */
+ for (i = 0; i < cfg->num_bus_clks; i++) {
+ msm_host->bus_clks[i] = devm_clk_get(dev,
+ cfg->bus_clk_names[i]);
+ if (IS_ERR(msm_host->bus_clks[i])) {
+ ret = PTR_ERR(msm_host->bus_clks[i]);
+ pr_err("%s: Unable to get %s, ret = %d\n",
+ __func__, cfg->bus_clk_names[i], ret);
+ goto exit;
+ }
}
+ /* get link and source clocks */
msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
if (IS_ERR(msm_host->byte_clk)) {
ret = PTR_ERR(msm_host->byte_clk);
@@ -356,80 +383,85 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
goto exit;
}
- msm_host->byte_clk_src = devm_clk_get(dev, "byte_clk_src");
- if (IS_ERR(msm_host->byte_clk_src)) {
- ret = PTR_ERR(msm_host->byte_clk_src);
+ msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
+ if (!msm_host->byte_clk_src) {
+ ret = -ENODEV;
pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret);
- msm_host->byte_clk_src = NULL;
goto exit;
}
- msm_host->pixel_clk_src = devm_clk_get(dev, "pixel_clk_src");
- if (IS_ERR(msm_host->pixel_clk_src)) {
- ret = PTR_ERR(msm_host->pixel_clk_src);
+ msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
+ if (!msm_host->pixel_clk_src) {
+ ret = -ENODEV;
pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret);
- msm_host->pixel_clk_src = NULL;
goto exit;
}
+ if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
+ msm_host->src_clk = devm_clk_get(dev, "src_clk");
+ if (IS_ERR(msm_host->src_clk)) {
+ ret = PTR_ERR(msm_host->src_clk);
+ pr_err("%s: can't find dsi_src_clk. ret=%d\n",
+ __func__, ret);
+ msm_host->src_clk = NULL;
+ goto exit;
+ }
+
+ msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
+ if (!msm_host->esc_clk_src) {
+ ret = -ENODEV;
+ pr_err("%s: can't get esc_clk_src. ret=%d\n",
+ __func__, ret);
+ goto exit;
+ }
+
+ msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
+ if (!msm_host->dsi_clk_src) {
+ ret = -ENODEV;
+ pr_err("%s: can't get dsi_clk_src. ret=%d\n",
+ __func__, ret);
+ }
+ }
exit:
return ret;
}
static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
{
- int ret;
+ const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
+ int i, ret;
DBG("id=%d", msm_host->id);
- ret = clk_prepare_enable(msm_host->mdp_core_clk);
- if (ret) {
- pr_err("%s: failed to enable mdp_core_clock, %d\n",
- __func__, ret);
- goto core_clk_err;
- }
-
- ret = clk_prepare_enable(msm_host->ahb_clk);
- if (ret) {
- pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
- goto ahb_clk_err;
- }
-
- ret = clk_prepare_enable(msm_host->axi_clk);
- if (ret) {
- pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
- goto axi_clk_err;
- }
-
- ret = clk_prepare_enable(msm_host->mmss_misc_ahb_clk);
- if (ret) {
- pr_err("%s: failed to enable mmss misc ahb clk, %d\n",
- __func__, ret);
- goto misc_ahb_clk_err;
+ for (i = 0; i < cfg->num_bus_clks; i++) {
+ ret = clk_prepare_enable(msm_host->bus_clks[i]);
+ if (ret) {
+ pr_err("%s: failed to enable bus clock %d ret %d\n",
+ __func__, i, ret);
+ goto err;
+ }
}
return 0;
+err:
+ for (; i > 0; i--)
+ clk_disable_unprepare(msm_host->bus_clks[i]);
-misc_ahb_clk_err:
- clk_disable_unprepare(msm_host->axi_clk);
-axi_clk_err:
- clk_disable_unprepare(msm_host->ahb_clk);
-ahb_clk_err:
- clk_disable_unprepare(msm_host->mdp_core_clk);
-core_clk_err:
return ret;
}
static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
{
+ const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
+ int i;
+
DBG("");
- clk_disable_unprepare(msm_host->mmss_misc_ahb_clk);
- clk_disable_unprepare(msm_host->axi_clk);
- clk_disable_unprepare(msm_host->ahb_clk);
- clk_disable_unprepare(msm_host->mdp_core_clk);
+
+ for (i = cfg->num_bus_clks - 1; i >= 0; i--)
+ clk_disable_unprepare(msm_host->bus_clks[i]);
}
-static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
+static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
{
int ret;
@@ -476,11 +508,98 @@ error:
return ret;
}
-static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
+static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
{
+ int ret;
+
+ DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d",
+ msm_host->mode->clock, msm_host->byte_clk_rate,
+ msm_host->esc_clk_rate, msm_host->src_clk_rate);
+
+ ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
+ if (ret) {
+ pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
+ goto error;
+ }
+
+ ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
+ if (ret) {
+ pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
+ goto error;
+ }
+
+ ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
+ if (ret) {
+ pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
+ goto error;
+ }
+
+ ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
+ if (ret) {
+ pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
+ goto error;
+ }
+
+ ret = clk_prepare_enable(msm_host->byte_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi byte clk\n", __func__);
+ goto error;
+ }
+
+ ret = clk_prepare_enable(msm_host->esc_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi esc clk\n", __func__);
+ goto esc_clk_err;
+ }
+
+ ret = clk_prepare_enable(msm_host->src_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi src clk\n", __func__);
+ goto src_clk_err;
+ }
+
+ ret = clk_prepare_enable(msm_host->pixel_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
+ goto pixel_clk_err;
+ }
+
+ return 0;
+
+pixel_clk_err:
+ clk_disable_unprepare(msm_host->src_clk);
+src_clk_err:
clk_disable_unprepare(msm_host->esc_clk);
- clk_disable_unprepare(msm_host->pixel_clk);
+esc_clk_err:
clk_disable_unprepare(msm_host->byte_clk);
+error:
+ return ret;
+}
+
+static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
+{
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+
+ if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
+ return dsi_link_clk_enable_6g(msm_host);
+ else
+ return dsi_link_clk_enable_v2(msm_host);
+}
+
+static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
+{
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+
+ if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
+ clk_disable_unprepare(msm_host->esc_clk);
+ clk_disable_unprepare(msm_host->pixel_clk);
+ clk_disable_unprepare(msm_host->byte_clk);
+ } else {
+ clk_disable_unprepare(msm_host->pixel_clk);
+ clk_disable_unprepare(msm_host->src_clk);
+ clk_disable_unprepare(msm_host->esc_clk);
+ clk_disable_unprepare(msm_host->byte_clk);
+ }
}
static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
@@ -515,6 +634,7 @@ unlock_ret:
static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
{
struct drm_display_mode *mode = msm_host->mode;
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
u8 lanes = msm_host->lanes;
u32 bpp = dsi_get_bpp(msm_host->format);
u32 pclk_rate;
@@ -534,6 +654,47 @@ static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
+ msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
+
+ if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
+ unsigned int esc_mhz, esc_div;
+ unsigned long byte_mhz;
+
+ msm_host->src_clk_rate = (pclk_rate * bpp) / 8;
+
+ /*
+ * esc clock is byte clock followed by a 4 bit divider,
+ * we need to find an escape clock frequency within the
+ * mipi DSI spec range within the maximum divider limit
+ * We iterate here between an escape clock frequencey
+ * between 20 Mhz to 5 Mhz and pick up the first one
+ * that can be supported by our divider
+ */
+
+ byte_mhz = msm_host->byte_clk_rate / 1000000;
+
+ for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
+ esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
+
+ /*
+ * TODO: Ideally, we shouldn't know what sort of divider
+ * is available in mmss_cc, we're just assuming that
+ * it'll always be a 4 bit divider. Need to come up with
+ * a better way here.
+ */
+ if (esc_div >= 1 && esc_div <= 16)
+ break;
+ }
+
+ if (esc_mhz < 5)
+ return -EINVAL;
+
+ msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
+
+ DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
+ msm_host->src_clk_rate);
+ }
+
return 0;
}
@@ -835,29 +996,46 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
{
struct drm_device *dev = msm_host->dev;
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
u32 iova;
- mutex_lock(&dev->struct_mutex);
- msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
- if (IS_ERR(msm_host->tx_gem_obj)) {
- ret = PTR_ERR(msm_host->tx_gem_obj);
- pr_err("%s: failed to allocate gem, %d\n", __func__, ret);
- msm_host->tx_gem_obj = NULL;
+ if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
+ mutex_lock(&dev->struct_mutex);
+ msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
+ if (IS_ERR(msm_host->tx_gem_obj)) {
+ ret = PTR_ERR(msm_host->tx_gem_obj);
+ pr_err("%s: failed to allocate gem, %d\n",
+ __func__, ret);
+ msm_host->tx_gem_obj = NULL;
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ if (ret) {
+ pr_err("%s: failed to get iova, %d\n", __func__, ret);
+ return ret;
+ }
- ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
- if (ret) {
- pr_err("%s: failed to get iova, %d\n", __func__, ret);
- return ret;
- }
- mutex_unlock(&dev->struct_mutex);
+ if (iova & 0x07) {
+ pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
+ return -EINVAL;
+ }
- if (iova & 0x07) {
- pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
- return -EINVAL;
+ msm_host->tx_size = msm_host->tx_gem_obj->size;
+ } else {
+ msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
+ &msm_host->tx_buf_paddr, GFP_KERNEL);
+ if (!msm_host->tx_buf) {
+ ret = -ENOMEM;
+ pr_err("%s: failed to allocate tx buf, %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ msm_host->tx_size = size;
}
return 0;
@@ -874,14 +1052,19 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
msm_host->tx_gem_obj = NULL;
mutex_unlock(&dev->struct_mutex);
}
+
+ if (msm_host->tx_buf)
+ dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf,
+ msm_host->tx_buf_paddr);
}
/*
* prepare cmd buffer to be txed
*/
-static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem,
- const struct mipi_dsi_msg *msg)
+static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
+ const struct mipi_dsi_msg *msg)
{
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct mipi_dsi_packet packet;
int len;
int ret;
@@ -894,17 +1077,20 @@ static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem,
}
len = (packet.size + 3) & (~0x3);
- if (len > tx_gem->size) {
+ if (len > msm_host->tx_size) {
pr_err("%s: packet size is too big\n", __func__);
return -EINVAL;
}
- data = msm_gem_vaddr(tx_gem);
-
- if (IS_ERR(data)) {
- ret = PTR_ERR(data);
- pr_err("%s: get vaddr failed, %d\n", __func__, ret);
- return ret;
+ if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
+ data = msm_gem_vaddr(msm_host->tx_gem_obj);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ pr_err("%s: get vaddr failed, %d\n", __func__, ret);
+ return ret;
+ }
+ } else {
+ data = msm_host->tx_buf;
}
/* MSM specific command format in memory */
@@ -970,17 +1156,21 @@ static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
return msg->rx_len;
}
-
static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
{
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
- u32 iova;
+ u32 dma_base;
bool triggered;
- ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova);
- if (ret) {
- pr_err("%s: failed to get iova: %d\n", __func__, ret);
- return ret;
+ if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
+ ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &dma_base);
+ if (ret) {
+ pr_err("%s: failed to get iova: %d\n", __func__, ret);
+ return ret;
+ }
+ } else {
+ dma_base = msm_host->tx_buf_paddr;
}
reinit_completion(&msm_host->dma_comp);
@@ -988,7 +1178,7 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
dsi_wait4video_eng_busy(msm_host);
triggered = msm_dsi_manager_cmd_xfer_trigger(
- msm_host->id, iova, len);
+ msm_host->id, dma_base, len);
if (triggered) {
ret = wait_for_completion_timeout(&msm_host->dma_comp,
msecs_to_jiffies(200));
@@ -1060,7 +1250,7 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
int bllp_len = msm_host->mode->hdisplay *
dsi_get_bpp(msm_host->format) / 8;
- len = dsi_cmd_dma_add(msm_host->tx_gem_obj, msg);
+ len = dsi_cmd_dma_add(msm_host, msg);
if (!len) {
pr_err("%s: failed to add cmd type = 0x%x\n",
__func__, msg->type);
@@ -1383,6 +1573,16 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
msm_host->device_node = device_node;
+ if (of_property_read_bool(np, "syscon-sfpb")) {
+ msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
+ "syscon-sfpb");
+ if (IS_ERR(msm_host->sfpb)) {
+ dev_err(dev, "%s: failed to get sfpb regmap\n",
+ __func__);
+ return PTR_ERR(msm_host->sfpb);
+ }
+ }
+
return 0;
}
@@ -1408,12 +1608,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
goto fail;
}
- ret = dsi_clk_init(msm_host);
- if (ret) {
- pr_err("%s: unable to initialize dsi clks\n", __func__);
- goto fail;
- }
-
msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
if (IS_ERR(msm_host->ctrl_base)) {
pr_err("%s: unable to map Dsi ctrl base\n", __func__);
@@ -1437,6 +1631,12 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
goto fail;
}
+ ret = dsi_clk_init(msm_host);
+ if (ret) {
+ pr_err("%s: unable to initialize dsi clks\n", __func__);
+ goto fail;
+ }
+
msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
if (!msm_host->rx_buf) {
pr_err("%s: alloc rx temp buf failed\n", __func__);
@@ -1750,11 +1950,12 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
return ret;
}
-void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
+void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
+ u32 len)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- dsi_write(msm_host, REG_DSI_DMA_BASE, iova);
+ dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base);
dsi_write(msm_host, REG_DSI_DMA_LEN, len);
dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
@@ -1766,6 +1967,7 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct msm_dsi_pll *src_pll)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct clk *byte_clk_provider, *pixel_clk_provider;
int ret;
@@ -1791,6 +1993,22 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
goto exit;
}
+ if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
+ ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
+ if (ret) {
+ pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
+ __func__, ret);
+ goto exit;
+ }
+
+ ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
+ if (ret) {
+ pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
+ __func__, ret);
+ goto exit;
+ }
+ }
+
exit:
return ret;
}
@@ -1828,6 +2046,20 @@ int msm_dsi_host_disable(struct mipi_dsi_host *host)
return 0;
}
+static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
+{
+ enum sfpb_ahb_arb_master_port_en en;
+
+ if (!msm_host->sfpb)
+ return;
+
+ en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE;
+
+ regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG,
+ SFPB_GPREG_MASTER_PORT_EN__MASK,
+ SFPB_GPREG_MASTER_PORT_EN(en));
+}
+
int msm_dsi_host_power_on(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
@@ -1840,6 +2072,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
goto unlock_ret;
}
+ msm_dsi_sfpb_config(msm_host, true);
+
ret = dsi_calc_clk_rate(msm_host);
if (ret) {
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
@@ -1862,7 +2096,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
dsi_phy_sw_reset(msm_host);
ret = msm_dsi_manager_phy_enable(msm_host->id,
msm_host->byte_clk_rate * 8,
- clk_get_rate(msm_host->esc_clk),
+ msm_host->esc_clk_rate,
&clk_pre, &clk_post);
dsi_bus_clk_disable(msm_host);
if (ret) {
@@ -1927,6 +2161,8 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
dsi_host_regulator_disable(msm_host);
+ msm_dsi_sfpb_config(msm_host, false);
+
DBG("-");
msm_host->power_on = false;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 0455ff75074a..58ba7ec17f51 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -774,7 +774,7 @@ restore_host0:
return ret;
}
-bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len)
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
@@ -784,9 +784,9 @@ bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len)
return false;
if (IS_SYNC_NEEDED() && msm_dsi0)
- msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, iova, len);
+ msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, dma_base, len);
- msm_dsi_host_cmd_xfer_commit(host, iova, len);
+ msm_dsi_host_cmd_xfer_commit(host, dma_base, len);
return true;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index f1f955f571fa..91a95fb04a4a 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -277,6 +277,10 @@ static const struct of_device_id dsi_phy_dt_match[] = {
{ .compatible = "qcom,dsi-phy-20nm",
.data = &dsi_phy_20nm_cfgs },
#endif
+#ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
+ { .compatible = "qcom,dsi-phy-28nm-8960",
+ .data = &dsi_phy_28nm_8960_cfgs },
+#endif
{}
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 0456b253239f..0d54ed00386d 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -43,6 +43,7 @@ struct msm_dsi_phy_cfg {
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
struct msm_dsi_dphy_timing {
u32 clk_pre;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
new file mode 100644
index 000000000000..197b039ca1f1
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
+ struct msm_dsi_dphy_timing *timing)
+{
+ void __iomem *base = phy->base;
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_0,
+ DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_1,
+ DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_2,
+ DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_3, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_4,
+ DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_5,
+ DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_6,
+ DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_7,
+ DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_8,
+ DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_9,
+ DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_10,
+ DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_11,
+ DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->reg_base;
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 1);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 1);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4,
+ 0x100);
+}
+
+static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->reg_base;
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 0xa);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 0x4);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4, 0x20);
+}
+
+static void dsi_28nm_phy_calibration(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->reg_base;
+ u32 status;
+ int i = 5000;
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG,
+ 0x3);
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1, 0x5a);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3, 0x10);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0, 0x1);
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x1);
+ usleep_range(5000, 6000);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x0);
+
+ do {
+ status = dsi_phy_read(base +
+ REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS);
+
+ if (!(status & DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY))
+ break;
+
+ udelay(1);
+ } while (--i > 0);
+}
+
+static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->base;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_0(i), 0x80);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_1(i), 0x45);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_2(i), 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(i),
+ 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(i),
+ 0x01);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(i),
+ 0x66);
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_0, 0x40);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_1, 0x67);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_2, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1, 0x88);
+}
+
+static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+ const unsigned long bit_rate, const unsigned long esc_rate)
+{
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ void __iomem *base = phy->base;
+
+ DBG("");
+
+ if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+ dev_err(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ dsi_28nm_phy_regulator_init(phy);
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LDO_CTRL, 0x04);
+
+ /* strength control */
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_0, 0xff);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_1, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_2, 0x06);
+
+ /* phy ctrl */
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x5f);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_1, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_2, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_3, 0x10);
+
+ dsi_28nm_phy_regulator_ctrl(phy);
+
+ dsi_28nm_phy_calibration(phy);
+
+ dsi_28nm_phy_lane_config(phy);
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0f);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_1, 0x03);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_0, 0x03);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0);
+
+ dsi_28nm_dphy_set_timing(phy, timing);
+
+ return 0;
+}
+
+static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ dsi_phy_write(phy->base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x0);
+
+ /*
+ * Wait for the registers writes to complete in order to
+ * ensure that the phy is completely disabled
+ */
+ wmb();
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
+ .type = MSM_DSI_PHY_28NM_8960,
+ .src_pll_truthtable = { {true, true}, {false, true} },
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vddio", 1800000, 1800000, 100000, 100},
+ },
+ },
+ .ops = {
+ .enable = dsi_28nm_phy_enable,
+ .disable = dsi_28nm_phy_disable,
+ },
+};
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index 5104fc9f9a53..5cd438f91afe 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -151,6 +151,9 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
case MSM_DSI_PHY_28NM_LP:
pll = msm_dsi_pll_28nm_init(pdev, type, id);
break;
+ case MSM_DSI_PHY_28NM_8960:
+ pll = msm_dsi_pll_28nm_8960_init(pdev, id);
+ break;
default:
pll = ERR_PTR(-ENXIO);
break;
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index 063caa2c5740..80b6038334a6 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -93,6 +93,16 @@ static inline struct msm_dsi_pll *msm_dsi_pll_28nm_init(
return ERR_PTR(-ENODEV);
}
#endif
+#ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
+struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
+ int id);
+#else
+struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
+ int id)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
#endif /* __DSI_PLL_H__ */
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
new file mode 100644
index 000000000000..38c90e1eb002
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
@@ -0,0 +1,533 @@
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+
+#include "dsi_pll.h"
+#include "dsi.xml.h"
+
+/*
+ * DSI PLL 28nm (8960/A family) - clock diagram (eg: DSI1):
+ *
+ *
+ * +------+
+ * dsi1vco_clk ----o-----| DIV1 |---dsi1pllbit (not exposed as clock)
+ * F * byte_clk | +------+
+ * | bit clock divider (F / 8)
+ * |
+ * | +------+
+ * o-----| DIV2 |---dsi0pllbyte---o---> To byte RCG
+ * | +------+ | (sets parent rate)
+ * | byte clock divider (F) |
+ * | |
+ * | o---> To esc RCG
+ * | (doesn't set parent rate)
+ * |
+ * | +------+
+ * o-----| DIV3 |----dsi0pll------o---> To dsi RCG
+ * +------+ | (sets parent rate)
+ * dsi clock divider (F * magic) |
+ * |
+ * o---> To pixel rcg
+ * (doesn't set parent rate)
+ */
+
+#define POLL_MAX_READS 8000
+#define POLL_TIMEOUT_US 1
+
+#define NUM_PROVIDED_CLKS 2
+
+#define VCO_REF_CLK_RATE 27000000
+#define VCO_MIN_RATE 600000000
+#define VCO_MAX_RATE 1200000000
+
+#define DSI_BYTE_PLL_CLK 0
+#define DSI_PIXEL_PLL_CLK 1
+
+#define VCO_PREF_DIV_RATIO 27
+
+struct pll_28nm_cached_state {
+ unsigned long vco_rate;
+ u8 postdiv3;
+ u8 postdiv2;
+ u8 postdiv1;
+};
+
+struct clk_bytediv {
+ struct clk_hw hw;
+ void __iomem *reg;
+};
+
+struct dsi_pll_28nm {
+ struct msm_dsi_pll base;
+
+ int id;
+ struct platform_device *pdev;
+ void __iomem *mmio;
+
+ /* custom byte clock divider */
+ struct clk_bytediv *bytediv;
+
+ /* private clocks: */
+ struct clk *clks[NUM_DSI_CLOCKS_MAX];
+ u32 num_clks;
+
+ /* clock-provider: */
+ struct clk *provided_clks[NUM_PROVIDED_CLKS];
+ struct clk_onecell_data clk_data;
+
+ struct pll_28nm_cached_state cached_state;
+};
+
+#define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, base)
+
+static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
+ int nb_tries, int timeout_us)
+{
+ bool pll_locked = false;
+ u32 val;
+
+ while (nb_tries--) {
+ val = pll_read(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_RDY);
+ pll_locked = !!(val & DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY);
+
+ if (pll_locked)
+ break;
+
+ udelay(timeout_us);
+ }
+ DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+ return pll_locked;
+}
+
+/*
+ * Clock Callbacks
+ */
+static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+ void __iomem *base = pll_28nm->mmio;
+ u32 val, temp, fb_divider;
+
+ DBG("rate=%lu, parent's=%lu", rate, parent_rate);
+
+ temp = rate / 10;
+ val = VCO_REF_CLK_RATE / 10;
+ fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
+ fb_divider = fb_divider / 2 - 1;
+ pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
+ fb_divider & 0xff);
+
+ val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
+
+ val |= (fb_divider >> 8) & 0x07;
+
+ pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
+ val);
+
+ val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
+
+ val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
+
+ pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
+ val);
+
+ pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
+ 0xf);
+
+ val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
+ val |= 0x7 << 4;
+ pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
+ val);
+
+ return 0;
+}
+
+static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+ return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
+ POLL_TIMEOUT_US);
+}
+
+static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+ void __iomem *base = pll_28nm->mmio;
+ unsigned long vco_rate;
+ u32 status, fb_divider, temp, ref_divider;
+
+ VERB("parent_rate=%lu", parent_rate);
+
+ status = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);
+
+ if (status & DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE) {
+ fb_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);
+ fb_divider &= 0xff;
+ temp = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07;
+ fb_divider = (temp << 8) | fb_divider;
+ fb_divider += 1;
+
+ ref_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
+ ref_divider &= 0x3f;
+ ref_divider += 1;
+
+ /* multiply by 2 */
+ vco_rate = (parent_rate / ref_divider) * fb_divider * 2;
+ } else {
+ vco_rate = 0;
+ }
+
+ DBG("returning vco rate = %lu", vco_rate);
+
+ return vco_rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
+ .round_rate = msm_dsi_pll_helper_clk_round_rate,
+ .set_rate = dsi_pll_28nm_clk_set_rate,
+ .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
+ .prepare = msm_dsi_pll_helper_clk_prepare,
+ .unprepare = msm_dsi_pll_helper_clk_unprepare,
+ .is_enabled = dsi_pll_28nm_clk_is_enabled,
+};
+
+/*
+ * Custom byte clock divier clk_ops
+ *
+ * This clock is the entry point to configuring the PLL. The user (dsi host)
+ * will set this clock's rate to the desired byte clock rate. The VCO lock
+ * frequency is a multiple of the byte clock rate. The multiplication factor
+ * (shown as F in the diagram above) is a function of the byte clock rate.
+ *
+ * This custom divider clock ensures that its parent (VCO) is set to the
+ * desired rate, and that the byte clock postdivider (POSTDIV2) is configured
+ * accordingly
+ */
+#define to_clk_bytediv(_hw) container_of(_hw, struct clk_bytediv, hw)
+
+static unsigned long clk_bytediv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_bytediv *bytediv = to_clk_bytediv(hw);
+ unsigned int div;
+
+ div = pll_read(bytediv->reg) & 0xff;
+
+ return parent_rate / (div + 1);
+}
+
+/* find multiplication factor(wrt byte clock) at which the VCO should be set */
+static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)
+{
+ unsigned long bit_mhz;
+
+ /* convert to bit clock in Mhz */
+ bit_mhz = (byte_clk_rate * 8) / 1000000;
+
+ if (bit_mhz < 125)
+ return 64;
+ else if (bit_mhz < 250)
+ return 32;
+ else if (bit_mhz < 600)
+ return 16;
+ else
+ return 8;
+}
+
+static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ unsigned long best_parent;
+ unsigned int factor;
+
+ factor = get_vco_mul_factor(rate);
+
+ best_parent = rate * factor;
+ *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+
+ return *prate / factor;
+}
+
+static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_bytediv *bytediv = to_clk_bytediv(hw);
+ u32 val;
+ unsigned int factor;
+
+ factor = get_vco_mul_factor(rate);
+
+ val = pll_read(bytediv->reg);
+ val |= (factor - 1) & 0xff;
+ pll_write(bytediv->reg, val);
+
+ return 0;
+}
+
+/* Our special byte clock divider ops */
+static const struct clk_ops clk_bytediv_ops = {
+ .round_rate = clk_bytediv_round_rate,
+ .set_rate = clk_bytediv_set_rate,
+ .recalc_rate = clk_bytediv_recalc_rate,
+};
+
+/*
+ * PLL Callbacks
+ */
+static int dsi_pll_28nm_enable_seq(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+ struct device *dev = &pll_28nm->pdev->dev;
+ void __iomem *base = pll_28nm->mmio;
+ bool locked;
+ unsigned int bit_div, byte_div;
+ int max_reads = 1000, timeout_us = 100;
+ u32 val;
+
+ DBG("id=%d", pll_28nm->id);
+
+ /*
+ * before enabling the PLL, configure the bit clock divider since we
+ * don't expose it as a clock to the outside world
+ * 1: read back the byte clock divider that should already be set
+ * 2: divide by 8 to get bit clock divider
+ * 3: write it to POSTDIV1
+ */
+ val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
+ byte_div = val + 1;
+ bit_div = byte_div / 8;
+
+ val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
+ val &= ~0xf;
+ val |= (bit_div - 1);
+ pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, val);
+
+ /* enable the PLL */
+ pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
+ DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
+
+ locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
+
+ if (unlikely(!locked))
+ dev_err(dev, "DSI PLL lock failed\n");
+ else
+ DBG("DSI PLL lock success");
+
+ return locked ? 0 : -EINVAL;
+}
+
+static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+ DBG("id=%d", pll_28nm->id);
+ pll_write(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_0, 0x00);
+}
+
+static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+ struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+ void __iomem *base = pll_28nm->mmio;
+
+ cached_state->postdiv3 =
+ pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);
+ cached_state->postdiv2 =
+ pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
+ cached_state->postdiv1 =
+ pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
+
+ cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
+}
+
+static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+ struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+ void __iomem *base = pll_28nm->mmio;
+ int ret;
+
+ ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
+ cached_state->vco_rate, 0);
+ if (ret) {
+ dev_err(&pll_28nm->pdev->dev,
+ "restore vco rate failed. ret=%d\n", ret);
+ return ret;
+ }
+
+ pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
+ cached_state->postdiv3);
+ pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
+ cached_state->postdiv2);
+ pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
+ cached_state->postdiv1);
+
+ return 0;
+}
+
+static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,
+ struct clk **byte_clk_provider,
+ struct clk **pixel_clk_provider)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+ if (byte_clk_provider)
+ *byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];
+ if (pixel_clk_provider)
+ *pixel_clk_provider =
+ pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];
+
+ return 0;
+}
+
+static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+ msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,
+ pll_28nm->clks, pll_28nm->num_clks);
+}
+
+static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
+{
+ char *clk_name, *parent_name, *vco_name;
+ struct clk_init_data vco_init = {
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .ops = &clk_ops_dsi_pll_28nm_vco,
+ };
+ struct device *dev = &pll_28nm->pdev->dev;
+ struct clk **clks = pll_28nm->clks;
+ struct clk **provided_clks = pll_28nm->provided_clks;
+ struct clk_bytediv *bytediv;
+ struct clk_init_data bytediv_init = { };
+ int ret, num = 0;
+
+ DBG("%d", pll_28nm->id);
+
+ bytediv = devm_kzalloc(dev, sizeof(*bytediv), GFP_KERNEL);
+ if (!bytediv)
+ return -ENOMEM;
+
+ vco_name = devm_kzalloc(dev, 32, GFP_KERNEL);
+ if (!vco_name)
+ return -ENOMEM;
+
+ parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);
+ if (!parent_name)
+ return -ENOMEM;
+
+ clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);
+ if (!clk_name)
+ return -ENOMEM;
+
+ pll_28nm->bytediv = bytediv;
+
+ snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);
+ vco_init.name = vco_name;
+
+ pll_28nm->base.clk_hw.init = &vco_init;
+
+ clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);
+
+ /* prepare and register bytediv */
+ bytediv->hw.init = &bytediv_init;
+ bytediv->reg = pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
+
+ snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->id);
+ snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
+
+ bytediv_init.name = clk_name;
+ bytediv_init.ops = &clk_bytediv_ops;
+ bytediv_init.flags = CLK_SET_RATE_PARENT;
+ bytediv_init.parent_names = (const char * const *) &parent_name;
+ bytediv_init.num_parents = 1;
+
+ /* DIV2 */
+ clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =
+ clk_register(dev, &bytediv->hw);
+
+ snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);
+ /* DIV3 */
+ clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =
+ clk_register_divider(dev, clk_name,
+ parent_name, 0, pll_28nm->mmio +
+ REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
+ 0, 8, 0, NULL);
+
+ pll_28nm->num_clks = num;
+
+ pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;
+ pll_28nm->clk_data.clks = provided_clks;
+
+ ret = of_clk_add_provider(dev->of_node,
+ of_clk_src_onecell_get, &pll_28nm->clk_data);
+ if (ret) {
+ dev_err(dev, "failed to register clk provider: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
+ int id)
+{
+ struct dsi_pll_28nm *pll_28nm;
+ struct msm_dsi_pll *pll;
+ int ret;
+
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
+ if (!pll_28nm)
+ return ERR_PTR(-ENOMEM);
+
+ pll_28nm->pdev = pdev;
+ pll_28nm->id = id + 1;
+
+ pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+ if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
+ dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pll = &pll_28nm->base;
+ pll->min_rate = VCO_MIN_RATE;
+ pll->max_rate = VCO_MAX_RATE;
+ pll->get_provider = dsi_pll_28nm_get_provider;
+ pll->destroy = dsi_pll_28nm_destroy;
+ pll->disable_seq = dsi_pll_28nm_disable_seq;
+ pll->save_state = dsi_pll_28nm_save_state;
+ pll->restore_state = dsi_pll_28nm_restore_state;
+
+ pll->en_seq_cnt = 1;
+ pll->enable_seqs[0] = dsi_pll_28nm_enable_seq;
+
+ ret = pll_28nm_register(pll_28nm);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ return pll;
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 1f4a95eeb348..9a0989c0b4de 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -17,6 +17,8 @@
*/
#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+
#include "hdmi.h"
void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -322,8 +324,6 @@ fail:
* The hdmi device:
*/
-#include <linux/of_gpio.h>
-
#define HDMI_CFG(item, entry) \
.item ## _names = item ##_names_ ## entry, \
.item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry)
@@ -388,17 +388,6 @@ static struct hdmi_platform_config hdmi_tx_8996_config = {
.hpd_freq = hpd_clk_freq_8x74,
};
-static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config },
- { .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
- { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
- { .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config },
- { .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config },
- { .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config },
- {}
-};
-
-#ifdef CONFIG_OF
static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
{
int gpio = of_get_named_gpio(of_node, name, 0);
@@ -413,7 +402,6 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char
}
return gpio;
}
-#endif
static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
@@ -421,16 +409,12 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
struct msm_drm_private *priv = drm->dev_private;
static struct hdmi_platform_config *hdmi_cfg;
struct hdmi *hdmi;
-#ifdef CONFIG_OF
struct device_node *of_node = dev->of_node;
- const struct of_device_id *match;
- match = of_match_node(dt_match, of_node);
- if (match && match->data) {
- hdmi_cfg = (struct hdmi_platform_config *)match->data;
- DBG("hdmi phy: %s", match->compatible);
- } else {
- dev_err(dev, "unknown phy: %s\n", of_node->name);
+ hdmi_cfg = (struct hdmi_platform_config *)
+ of_device_get_match_data(dev);
+ if (!hdmi_cfg) {
+ dev_err(dev, "unknown hdmi_cfg: %s\n", of_node->name);
return -ENXIO;
}
@@ -443,55 +427,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi_cfg->mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
hdmi_cfg->mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
-#else
- static struct hdmi_platform_config config = {};
- static const char *hpd_clk_names[] = {
- "core_clk", "master_iface_clk", "slave_iface_clk",
- };
- if (cpu_is_apq8064()) {
- static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
- config.phy_init = hdmi_phy_8960_init;
- config.hpd_reg_names = hpd_reg_names;
- config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
- config.hpd_clk_names = hpd_clk_names;
- config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
- config.ddc_clk_gpio = 70;
- config.ddc_data_gpio = 71;
- config.hpd_gpio = 72;
- config.mux_en_gpio = -1;
- config.mux_sel_gpio = -1;
- } else if (cpu_is_msm8960() || cpu_is_msm8960ab()) {
- static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
- config.phy_init = hdmi_phy_8960_init;
- config.hpd_reg_names = hpd_reg_names;
- config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
- config.hpd_clk_names = hpd_clk_names;
- config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
- config.ddc_clk_gpio = 100;
- config.ddc_data_gpio = 101;
- config.hpd_gpio = 102;
- config.mux_en_gpio = -1;
- config.mux_sel_gpio = -1;
- } else if (cpu_is_msm8x60()) {
- static const char *hpd_reg_names[] = {
- "8901_hdmi_mvs", "8901_mpp0"
- };
- config.phy_init = hdmi_phy_8x60_init;
- config.hpd_reg_names = hpd_reg_names;
- config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
- config.hpd_clk_names = hpd_clk_names;
- config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
- config.ddc_clk_gpio = 170;
- config.ddc_data_gpio = 171;
- config.hpd_gpio = 172;
- config.mux_en_gpio = -1;
- config.mux_sel_gpio = -1;
- }
- config.mmio_name = "hdmi_msm_hdmi_addr";
- config.qfprom_mmio_name = "hdmi_msm_qfprom_addr";
-
- hdmi_cfg = &config;
-#endif
dev->platform_data = hdmi_cfg;
hdmi = hdmi_init(to_platform_device(dev));
@@ -529,6 +464,16 @@ static int hdmi_dev_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config },
+ { .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
+ { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
+ { .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config },
+ { .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config },
+ { .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config },
+ {}
+};
+
static struct platform_driver hdmi_driver = {
.probe = hdmi_dev_probe,
.remove = hdmi_dev_remove,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 6ac9aa165768..28df397c3b04 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -678,7 +678,8 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
"unref cursor", unref_cursor_worker);
- drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs);
+ drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
+ NULL);
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
plane->crtc = crtc;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c
new file mode 100644
index 000000000000..2f57e9453b67
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, Inforce Computing. All rights reserved.
+ *
+ * Author: Vinay Simha <vinaysimha@inforcecomputing.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp4_kms.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+struct mdp4_dsi_encoder {
+ struct drm_encoder base;
+ struct drm_panel *panel;
+ bool enabled;
+};
+#define to_mdp4_dsi_encoder(x) container_of(x, struct mdp4_dsi_encoder, base)
+
+static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
+}
+
+static void mdp4_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(mdp4_dsi_encoder);
+}
+
+static const struct drm_encoder_funcs mdp4_dsi_encoder_funcs = {
+ .destroy = mdp4_dsi_encoder_destroy,
+};
+
+static bool mdp4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void mdp4_dsi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ uint32_t dsi_hsync_skew, vsync_period, vsync_len, ctrl_pol;
+ uint32_t display_v_start, display_v_end;
+ uint32_t hsync_start_x, hsync_end_x;
+
+ mode = adjusted_mode;
+
+ DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ ctrl_pol = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl_pol |= MDP4_DSI_CTRL_POLARITY_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl_pol |= MDP4_DSI_CTRL_POLARITY_VSYNC_LOW;
+ /* probably need to get DATA_EN polarity from panel.. */
+
+ dsi_hsync_skew = 0; /* get this from panel? */
+
+ hsync_start_x = (mode->htotal - mode->hsync_start);
+ hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
+
+ vsync_period = mode->vtotal * mode->htotal;
+ vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
+ display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dsi_hsync_skew;
+ display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dsi_hsync_skew - 1;
+
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_CTRL,
+ MDP4_DSI_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
+ MDP4_DSI_HSYNC_CTRL_PERIOD(mode->htotal));
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_PERIOD, vsync_period);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_LEN, vsync_len);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_HCTRL,
+ MDP4_DSI_DISPLAY_HCTRL_START(hsync_start_x) |
+ MDP4_DSI_DISPLAY_HCTRL_END(hsync_end_x));
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VSTART, display_v_start);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VEND, display_v_end);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_CTRL_POLARITY, ctrl_pol);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_UNDERFLOW_CLR,
+ MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY |
+ MDP4_DSI_UNDERFLOW_CLR_COLOR(0xff));
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_HCTL,
+ MDP4_DSI_ACTIVE_HCTL_START(0) |
+ MDP4_DSI_ACTIVE_HCTL_END(0));
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_SKEW, dsi_hsync_skew);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_BORDER_CLR, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VSTART, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VEND, 0);
+}
+
+static void mdp4_dsi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+
+ if (!mdp4_dsi_encoder->enabled)
+ return;
+
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC);
+
+ mdp4_dsi_encoder->enabled = false;
+}
+
+static void mdp4_dsi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+
+ if (mdp4_dsi_encoder->enabled)
+ return;
+
+ mdp4_crtc_set_config(encoder->crtc,
+ MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
+ MDP4_DMA_CONFIG_DEFLKR_EN |
+ MDP4_DMA_CONFIG_DITHER_EN |
+ MDP4_DMA_CONFIG_R_BPC(BPC8) |
+ MDP4_DMA_CONFIG_G_BPC(BPC8) |
+ MDP4_DMA_CONFIG_B_BPC(BPC8) |
+ MDP4_DMA_CONFIG_PACK(0x21));
+
+ mdp4_crtc_set_intf(encoder->crtc, INTF_DSI_VIDEO, 0);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 1);
+
+ mdp4_dsi_encoder->enabled = true;
+}
+
+static const struct drm_encoder_helper_funcs mdp4_dsi_encoder_helper_funcs = {
+ .mode_fixup = mdp4_dsi_encoder_mode_fixup,
+ .mode_set = mdp4_dsi_encoder_mode_set,
+ .disable = mdp4_dsi_encoder_disable,
+ .enable = mdp4_dsi_encoder_enable,
+};
+
+/* initialize encoder */
+struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder = NULL;
+ struct mdp4_dsi_encoder *mdp4_dsi_encoder;
+ int ret;
+
+ mdp4_dsi_encoder = kzalloc(sizeof(*mdp4_dsi_encoder), GFP_KERNEL);
+ if (!mdp4_dsi_encoder) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ encoder = &mdp4_dsi_encoder->base;
+
+ drm_encoder_init(dev, encoder, &mdp4_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, NULL);
+ drm_encoder_helper_add(encoder, &mdp4_dsi_encoder_helper_funcs);
+
+ return encoder;
+
+fail:
+ if (encoder)
+ mdp4_dsi_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
index 89614c6a6c1b..a21df54cb50f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
@@ -262,7 +262,7 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
encoder = &mdp4_dtv_encoder->base;
drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index 5ed38cf548a1..a521207db8a1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -29,7 +29,7 @@ void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
{
- DRM_ERROR("errors: %08x\n", irqstatus);
+ DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
}
void mdp4_irq_preinstall(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 077f7521a971..5a8e3d6bcbff 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -169,7 +169,14 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder)
{
/* if we had >1 encoder, we'd need something more clever: */
- return mdp4_dtv_round_pixclk(encoder, rate);
+ switch (encoder->encoder_type) {
+ case DRM_MODE_ENCODER_TMDS:
+ return mdp4_dtv_round_pixclk(encoder, rate);
+ case DRM_MODE_ENCODER_LVDS:
+ case DRM_MODE_ENCODER_DSI:
+ default:
+ return rate;
+ }
}
static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
@@ -240,19 +247,18 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
return 0;
}
-#ifdef CONFIG_OF
-static struct drm_panel *detect_panel(struct drm_device *dev)
+static struct device_node *mdp4_detect_lcdc_panel(struct drm_device *dev)
{
struct device_node *endpoint, *panel_node;
struct device_node *np = dev->dev->of_node;
- struct drm_panel *panel = NULL;
endpoint = of_graph_get_next_endpoint(np, NULL);
if (!endpoint) {
- dev_err(dev->dev, "no valid endpoint\n");
- return ERR_PTR(-ENODEV);
+ DBG("no endpoint in MDP4 to fetch LVDS panel\n");
+ return NULL;
}
+ /* don't proceed if we have an endpoint but no panel_node tied to it */
panel_node = of_graph_get_remote_port_parent(endpoint);
if (!panel_node) {
dev_err(dev->dev, "no valid panel node\n");
@@ -262,132 +268,185 @@ static struct drm_panel *detect_panel(struct drm_device *dev)
of_node_put(endpoint);
- panel = of_drm_find_panel(panel_node);
- if (!panel) {
- of_node_put(panel_node);
- return ERR_PTR(-EPROBE_DEFER);
- }
-
- return panel;
+ return panel_node;
}
-#else
-static struct drm_panel *detect_panel(struct drm_device *dev)
-{
- // ??? maybe use a module param to specify which panel is attached?
-}
-#endif
-static int modeset_init(struct mdp4_kms *mdp4_kms)
+static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
+ int intf_type)
{
struct drm_device *dev = mdp4_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
- struct drm_plane *plane;
- struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
- struct drm_panel *panel;
+ struct device_node *panel_node;
+ struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM];
+ int i, dsi_id;
int ret;
- /* construct non-private planes: */
- plane = mdp4_plane_init(dev, VG1, false);
- if (IS_ERR(plane)) {
- dev_err(dev->dev, "failed to construct plane for VG1\n");
- ret = PTR_ERR(plane);
- goto fail;
- }
- priv->planes[priv->num_planes++] = plane;
+ switch (intf_type) {
+ case DRM_MODE_ENCODER_LVDS:
+ /*
+ * bail out early if:
+ * - there is no panel node (no need to initialize lcdc
+ * encoder and lvds connector), or
+ * - panel node is a bad pointer
+ */
+ panel_node = mdp4_detect_lcdc_panel(dev);
+ if (IS_ERR_OR_NULL(panel_node))
+ return PTR_ERR(panel_node);
+
+ encoder = mdp4_lcdc_encoder_init(dev, panel_node);
+ if (IS_ERR(encoder)) {
+ dev_err(dev->dev, "failed to construct LCDC encoder\n");
+ return PTR_ERR(encoder);
+ }
- plane = mdp4_plane_init(dev, VG2, false);
- if (IS_ERR(plane)) {
- dev_err(dev->dev, "failed to construct plane for VG2\n");
- ret = PTR_ERR(plane);
- goto fail;
- }
- priv->planes[priv->num_planes++] = plane;
+ /* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */
+ encoder->possible_crtcs = 1 << DMA_P;
- /*
- * Setup the LCDC/LVDS path: RGB2 -> DMA_P -> LCDC -> LVDS:
- */
+ connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
+ if (IS_ERR(connector)) {
+ dev_err(dev->dev, "failed to initialize LVDS connector\n");
+ return PTR_ERR(connector);
+ }
- panel = detect_panel(dev);
- if (IS_ERR(panel)) {
- ret = PTR_ERR(panel);
- dev_err(dev->dev, "failed to detect LVDS panel: %d\n", ret);
- goto fail;
- }
+ priv->encoders[priv->num_encoders++] = encoder;
+ priv->connectors[priv->num_connectors++] = connector;
- plane = mdp4_plane_init(dev, RGB2, true);
- if (IS_ERR(plane)) {
- dev_err(dev->dev, "failed to construct plane for RGB2\n");
- ret = PTR_ERR(plane);
- goto fail;
- }
+ break;
+ case DRM_MODE_ENCODER_TMDS:
+ encoder = mdp4_dtv_encoder_init(dev);
+ if (IS_ERR(encoder)) {
+ dev_err(dev->dev, "failed to construct DTV encoder\n");
+ return PTR_ERR(encoder);
+ }
- crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 0, DMA_P);
- if (IS_ERR(crtc)) {
- dev_err(dev->dev, "failed to construct crtc for DMA_P\n");
- ret = PTR_ERR(crtc);
- goto fail;
- }
+ /* DTV can be hooked to DMA_E: */
+ encoder->possible_crtcs = 1 << 1;
- encoder = mdp4_lcdc_encoder_init(dev, panel);
- if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct LCDC encoder\n");
- ret = PTR_ERR(encoder);
- goto fail;
- }
+ if (priv->hdmi) {
+ /* Construct bridge/connector for HDMI: */
+ ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
+ if (ret) {
+ dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+ return ret;
+ }
+ }
- /* LCDC can be hooked to DMA_P: */
- encoder->possible_crtcs = 1 << priv->num_crtcs;
+ priv->encoders[priv->num_encoders++] = encoder;
- priv->crtcs[priv->num_crtcs++] = crtc;
- priv->encoders[priv->num_encoders++] = encoder;
+ break;
+ case DRM_MODE_ENCODER_DSI:
+ /* only DSI1 supported for now */
+ dsi_id = 0;
- connector = mdp4_lvds_connector_init(dev, panel, encoder);
- if (IS_ERR(connector)) {
- ret = PTR_ERR(connector);
- dev_err(dev->dev, "failed to initialize LVDS connector: %d\n", ret);
- goto fail;
- }
+ if (!priv->dsi[dsi_id])
+ break;
- priv->connectors[priv->num_connectors++] = connector;
+ for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
+ dsi_encs[i] = mdp4_dsi_encoder_init(dev);
+ if (IS_ERR(dsi_encs[i])) {
+ ret = PTR_ERR(dsi_encs[i]);
+ dev_err(dev->dev,
+ "failed to construct DSI encoder: %d\n",
+ ret);
+ return ret;
+ }
- /*
- * Setup DTV/HDMI path: RGB1 -> DMA_E -> DTV -> HDMI:
- */
+ /* TODO: Add DMA_S later? */
+ dsi_encs[i]->possible_crtcs = 1 << DMA_P;
+ priv->encoders[priv->num_encoders++] = dsi_encs[i];
+ }
- plane = mdp4_plane_init(dev, RGB1, true);
- if (IS_ERR(plane)) {
- dev_err(dev->dev, "failed to construct plane for RGB1\n");
- ret = PTR_ERR(plane);
- goto fail;
- }
+ ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs);
+ if (ret) {
+ dev_err(dev->dev, "failed to initialize DSI: %d\n",
+ ret);
+ return ret;
+ }
- crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 1, DMA_E);
- if (IS_ERR(crtc)) {
- dev_err(dev->dev, "failed to construct crtc for DMA_E\n");
- ret = PTR_ERR(crtc);
- goto fail;
+ break;
+ default:
+ dev_err(dev->dev, "Invalid or unsupported interface\n");
+ return -EINVAL;
}
- encoder = mdp4_dtv_encoder_init(dev);
- if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct DTV encoder\n");
- ret = PTR_ERR(encoder);
- goto fail;
+ return 0;
+}
+
+static int modeset_init(struct mdp4_kms *mdp4_kms)
+{
+ struct drm_device *dev = mdp4_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ int i, ret;
+ static const enum mdp4_pipe rgb_planes[] = {
+ RGB1, RGB2,
+ };
+ static const enum mdp4_pipe vg_planes[] = {
+ VG1, VG2,
+ };
+ static const enum mdp4_dma mdp4_crtcs[] = {
+ DMA_P, DMA_E,
+ };
+ static const char * const mdp4_crtc_names[] = {
+ "DMA_P", "DMA_E",
+ };
+ static const int mdp4_intfs[] = {
+ DRM_MODE_ENCODER_LVDS,
+ DRM_MODE_ENCODER_DSI,
+ DRM_MODE_ENCODER_TMDS,
+ };
+
+ /* construct non-private planes: */
+ for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
+ plane = mdp4_plane_init(dev, vg_planes[i], false);
+ if (IS_ERR(plane)) {
+ dev_err(dev->dev,
+ "failed to construct plane for VG%d\n", i + 1);
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ priv->planes[priv->num_planes++] = plane;
}
- /* DTV can be hooked to DMA_E: */
- encoder->possible_crtcs = 1 << priv->num_crtcs;
+ for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
+ plane = mdp4_plane_init(dev, rgb_planes[i], true);
+ if (IS_ERR(plane)) {
+ dev_err(dev->dev,
+ "failed to construct plane for RGB%d\n", i + 1);
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+
+ crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
+ mdp4_crtcs[i]);
+ if (IS_ERR(crtc)) {
+ dev_err(dev->dev, "failed to construct crtc for %s\n",
+ mdp4_crtc_names[i]);
+ ret = PTR_ERR(crtc);
+ goto fail;
+ }
+
+ priv->crtcs[priv->num_crtcs++] = crtc;
+ }
- priv->crtcs[priv->num_crtcs++] = crtc;
- priv->encoders[priv->num_encoders++] = encoder;
+ /*
+ * we currently set up two relatively fixed paths:
+ *
+ * LCDC/LVDS path: RGB1 -> DMA_P -> LCDC -> LVDS
+ * or
+ * DSI path: RGB1 -> DMA_P -> DSI1 -> DSI Panel
+ *
+ * DTV/HDMI path: RGB2 -> DMA_E -> DTV -> HDMI
+ */
- if (priv->hdmi) {
- /* Construct bridge/connector for HDMI: */
- ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
+ for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
+ ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
if (ret) {
- dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+ dev_err(dev->dev, "failed to initialize intf: %d, %d\n",
+ i, ret);
goto fail;
}
}
@@ -558,17 +617,10 @@ fail:
static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
{
static struct mdp4_platform_config config = {};
-#ifdef CONFIG_OF
- /* TODO */
+
+ /* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
config.max_clk = 266667000;
config.iommu = iommu_domain_alloc(&platform_bus_type);
-#else
- if (cpu_is_apq8064())
- config.max_clk = 266667000;
- else
- config.max_clk = 200000000;
-
- config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
-#endif
+
return &config;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 8a7f6e1e2bca..d2c96ef431f4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -157,7 +157,7 @@ static inline uint32_t mixercfg(uint32_t mixer_cfg, int mixer,
COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
break;
default:
- WARN_ON("invalid pipe");
+ WARN(1, "invalid pipe");
break;
}
@@ -212,10 +212,19 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
- struct drm_panel *panel);
+ struct device_node *panel_node);
struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
- struct drm_panel *panel, struct drm_encoder *encoder);
+ struct device_node *panel_node, struct drm_encoder *encoder);
+
+#ifdef CONFIG_DRM_MSM_DSI
+struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev);
+#else
+static inline struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
#ifdef CONFIG_COMMON_CLK
struct clk *mpd4_lvds_pll_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
index 4cd6e721aa0a..cd63fedb67cc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
@@ -23,6 +23,7 @@
struct mdp4_lcdc_encoder {
struct drm_encoder base;
+ struct device_node *panel_node;
struct drm_panel *panel;
struct clk *lcdc_clk;
unsigned long int pixclock;
@@ -338,7 +339,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
to_mdp4_lcdc_encoder(encoder);
struct mdp4_kms *mdp4_kms = get_kms(encoder);
- struct drm_panel *panel = mdp4_lcdc_encoder->panel;
+ struct drm_panel *panel;
int i, ret;
if (WARN_ON(!mdp4_lcdc_encoder->enabled))
@@ -346,6 +347,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
+ panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
if (panel) {
drm_panel_disable(panel);
drm_panel_unprepare(panel);
@@ -381,7 +383,7 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
to_mdp4_lcdc_encoder(encoder);
unsigned long pc = mdp4_lcdc_encoder->pixclock;
struct mdp4_kms *mdp4_kms = get_kms(encoder);
- struct drm_panel *panel = mdp4_lcdc_encoder->panel;
+ struct drm_panel *panel;
int i, ret;
if (WARN_ON(mdp4_lcdc_encoder->enabled))
@@ -414,6 +416,7 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
if (ret)
dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
+ panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
if (panel) {
drm_panel_prepare(panel);
drm_panel_enable(panel);
@@ -442,7 +445,7 @@ long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
/* initialize encoder */
struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
- struct drm_panel *panel)
+ struct device_node *panel_node)
{
struct drm_encoder *encoder = NULL;
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder;
@@ -455,12 +458,12 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
goto fail;
}
- mdp4_lcdc_encoder->panel = panel;
+ mdp4_lcdc_encoder->panel_node = panel_node;
encoder = &mdp4_lcdc_encoder->base;
drm_encoder_init(dev, encoder, &mdp4_lcdc_encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_LVDS, NULL);
drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs);
/* TODO: do we need different pll in other cases? */
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
index 921185133d38..e73e1742b250 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -23,6 +23,7 @@
struct mdp4_lvds_connector {
struct drm_connector base;
struct drm_encoder *encoder;
+ struct device_node *panel_node;
struct drm_panel *panel;
};
#define to_mdp4_lvds_connector(x) container_of(x, struct mdp4_lvds_connector, base)
@@ -33,6 +34,10 @@ static enum drm_connector_status mdp4_lvds_connector_detect(
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);
+ if (!mdp4_lvds_connector->panel)
+ mdp4_lvds_connector->panel =
+ of_drm_find_panel(mdp4_lvds_connector->panel_node);
+
return mdp4_lvds_connector->panel ?
connector_status_connected :
connector_status_disconnected;
@@ -42,10 +47,6 @@ static void mdp4_lvds_connector_destroy(struct drm_connector *connector)
{
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);
- struct drm_panel *panel = mdp4_lvds_connector->panel;
-
- if (panel)
- drm_panel_detach(panel);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
@@ -60,9 +61,14 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
struct drm_panel *panel = mdp4_lvds_connector->panel;
int ret = 0;
- if (panel)
+ if (panel) {
+ drm_panel_attach(panel, connector);
+
ret = panel->funcs->get_modes(panel);
+ drm_panel_detach(panel);
+ }
+
return ret;
}
@@ -111,7 +117,7 @@ static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs
/* initialize connector */
struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
- struct drm_panel *panel, struct drm_encoder *encoder)
+ struct device_node *panel_node, struct drm_encoder *encoder)
{
struct drm_connector *connector = NULL;
struct mdp4_lvds_connector *mdp4_lvds_connector;
@@ -124,7 +130,7 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
}
mdp4_lvds_connector->encoder = encoder;
- mdp4_lvds_connector->panel = panel;
+ mdp4_lvds_connector->panel_node = panel_node;
connector = &mdp4_lvds_connector->base;
@@ -141,9 +147,6 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
drm_mode_connector_attach_encoder(connector, encoder);
- if (panel)
- drm_panel_attach(panel, connector);
-
return connector;
fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 30d57e74c42f..9f96dfe67769 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -397,7 +397,8 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
- mdp4_plane->formats, mdp4_plane->nformats, type);
+ mdp4_plane->formats, mdp4_plane->nformats,
+ type, NULL);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index bb1225aa2f75..57f73f0c120d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -553,9 +553,7 @@ fail:
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
{
static struct mdp5_cfg_platform config = {};
-#ifdef CONFIG_OF
- /* TODO */
-#endif
+
config.iommu = iommu_domain_alloc(&platform_bus_type);
return &config;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index 8e6c9b598a57..1aa21dba663d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -326,7 +326,7 @@ struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
mdp5_cmd_enc->ctl = ctl;
drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs,
- DRM_MODE_ENCODER_DSI);
+ DRM_MODE_ENCODER_DSI, NULL);
drm_encoder_helper_add(encoder, &mdp5_cmd_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 7f9f4ac88029..20cee5ce4071 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -797,7 +797,8 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
pipe2name(mdp5_plane_pipe(plane)), id);
- drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
+ drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs,
+ NULL);
drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
"unref cursor", unref_cursor_worker);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index c9e32b08a7a0..0d737cad03a6 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -293,6 +293,24 @@ static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
.enable = mdp5_encoder_enable,
};
+int mdp5_encoder_get_linecount(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int intf = mdp5_encoder->intf.num;
+
+ return mdp5_read(mdp5_kms, REG_MDP5_INTF_LINE_COUNT(intf));
+}
+
+u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int intf = mdp5_encoder->intf.num;
+
+ return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf));
+}
+
int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
struct drm_encoder *slave_encoder)
{
@@ -354,7 +372,7 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
spin_lock_init(&mdp5_encoder->intf_lock);
- drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type);
+ drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type, NULL);
drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index b0d4b53b97f4..73bc3e312fd4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -31,7 +31,7 @@ void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
{
- DRM_ERROR("errors: %08x\n", irqstatus);
+ DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
}
void mdp5_irq_preinstall(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index b532faa8026d..e115318402bd 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -468,6 +468,127 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
return 0;
}
+static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder(encoder, dev)
+ if (encoder->crtc == crtc)
+ return encoder;
+
+ return NULL;
+}
+
+static int mdp5_get_scanoutpos(struct drm_device *dev, unsigned int pipe,
+ unsigned int flags, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
+ int ret = 0;
+
+ crtc = priv->crtcs[pipe];
+ if (!crtc) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
+ return 0;
+ }
+
+ encoder = get_encoder_from_crtc(crtc);
+ if (!encoder) {
+ DRM_ERROR("no encoder found for crtc %d\n", pipe);
+ return 0;
+ }
+
+ ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+ vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
+
+ /*
+ * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
+ * the end of VFP. Translate the porch values relative to the line
+ * counter positions.
+ */
+
+ vactive_start = vsw + vbp + 1;
+
+ vactive_end = vactive_start + mode->crtc_vdisplay;
+
+ /* last scan line before VSYNC */
+ vfp_end = mode->crtc_vtotal;
+
+ if (stime)
+ *stime = ktime_get();
+
+ line = mdp5_encoder_get_linecount(encoder);
+
+ if (line < vactive_start) {
+ line -= vactive_start;
+ ret |= DRM_SCANOUTPOS_IN_VBLANK;
+ } else if (line > vactive_end) {
+ line = line - vfp_end - vactive_start;
+ ret |= DRM_SCANOUTPOS_IN_VBLANK;
+ } else {
+ line -= vactive_start;
+ }
+
+ *vpos = line;
+ *hpos = 0;
+
+ if (etime)
+ *etime = ktime_get();
+
+ return ret;
+}
+
+static int mdp5_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_crtc *crtc;
+
+ if (pipe < 0 || pipe >= priv->num_crtcs) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
+ return -EINVAL;
+ }
+
+ crtc = priv->crtcs[pipe];
+ if (!crtc) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
+ return -EINVAL;
+ }
+
+ return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
+ vblank_time, flags,
+ &crtc->mode);
+}
+
+static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+
+ if (pipe < 0 || pipe >= priv->num_crtcs)
+ return 0;
+
+ crtc = priv->crtcs[pipe];
+ if (!crtc)
+ return 0;
+
+ encoder = get_encoder_from_crtc(crtc);
+ if (!encoder)
+ return 0;
+
+ return mdp5_encoder_get_framecount(encoder);
+}
+
struct msm_kms *mdp5_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = dev->platformdev;
@@ -590,6 +711,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
!config->hw->intf.base[i])
continue;
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
+
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
}
mdp5_disable(mdp5_kms);
mdelay(16);
@@ -635,6 +758,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
dev->mode_config.max_width = config->hw->lm.max_width;
dev->mode_config.max_height = config->hw->lm.max_height;
+ dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
+ dev->driver->get_scanout_position = mdp5_get_scanoutpos;
+ dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
+ dev->max_vblank_count = 0xffffffff;
+ dev->vblank_disable_immediate = true;
+
return kms;
fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 84f65d415598..00730ba08a60 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -222,6 +222,8 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
struct mdp5_interface *intf, struct mdp5_ctl *ctl);
int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
struct drm_encoder *slave_encoder);
+int mdp5_encoder_get_linecount(struct drm_encoder *encoder);
+u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder);
#ifdef CONFIG_DRM_MSM_DSI
struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 81cd49045ffc..432c09836b0e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -904,7 +904,7 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
mdp5_plane->formats, mdp5_plane->nformats,
- type);
+ type, NULL);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b88ce514eb8e..9a30807b900b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -237,20 +237,9 @@ static int msm_unload(struct drm_device *dev)
static int get_mdp_ver(struct platform_device *pdev)
{
-#ifdef CONFIG_OF
- static const struct of_device_id match_types[] = { {
- .compatible = "qcom,mdss_mdp",
- .data = (void *)5,
- }, {
- /* end node */
- } };
struct device *dev = &pdev->dev;
- const struct of_device_id *match;
- match = of_match_node(match_types, dev->of_node);
- if (match)
- return (int)(unsigned long)match->data;
-#endif
- return 4;
+
+ return (int) (unsigned long) of_device_get_match_data(dev);
}
#include <linux/of_address.h>
@@ -258,10 +247,10 @@ static int get_mdp_ver(struct platform_device *pdev)
static int msm_init_vram(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
+ struct device_node *node;
unsigned long size = 0;
int ret = 0;
-#ifdef CONFIG_OF
/* In the device-tree world, we could have a 'memory-region'
* phandle, which gives us a link to our "vram". Allocating
* is all nicely abstracted behind the dma api, but we need
@@ -278,7 +267,6 @@ static int msm_init_vram(struct drm_device *dev)
* as corruption on screen before we have a chance to
* load and do initial modeset)
*/
- struct device_node *node;
node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
if (node) {
@@ -288,14 +276,12 @@ static int msm_init_vram(struct drm_device *dev)
return ret;
size = r.end - r.start;
DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
- } else
-#endif
- /* if we have no IOMMU, then we need to use carveout allocator.
- * Grab the entire CMA chunk carved out in early startup in
- * mach-msm:
- */
- if (!iommu_present(&platform_bus_type)) {
+ /* if we have no IOMMU, then we need to use carveout allocator.
+ * Grab the entire CMA chunk carved out in early startup in
+ * mach-msm:
+ */
+ } else if (!iommu_present(&platform_bus_type)) {
DRM_INFO("using %s VRAM carveout\n", vram);
size = memparse(vram, NULL);
}
@@ -1035,9 +1021,9 @@ static const struct dev_pm_ops msm_pm_ops = {
* Componentized driver support:
*/
-#ifdef CONFIG_OF
-/* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx
- * (or probably any other).. so probably some room for some helpers
+/*
+ * NOTE: duplication of the same code as exynos or imx (or probably any other).
+ * so probably some room for some helpers
*/
static int compare_of(struct device *dev, void *data)
{
@@ -1062,12 +1048,6 @@ static int add_components(struct device *dev, struct component_match **matchptr,
return 0;
}
-#else
-static int compare_dev(struct device *dev, void *data)
-{
- return dev == data;
-}
-#endif
static int msm_drm_bind(struct device *dev)
{
@@ -1091,35 +1071,9 @@ static const struct component_master_ops msm_drm_ops = {
static int msm_pdev_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
-#ifdef CONFIG_OF
+
add_components(&pdev->dev, &match, "connectors");
add_components(&pdev->dev, &match, "gpus");
-#else
- /* For non-DT case, it kinda sucks. We don't actually have a way
- * to know whether or not we are waiting for certain devices (or if
- * they are simply not present). But for non-DT we only need to
- * care about apq8064/apq8060/etc (all mdp4/a3xx):
- */
- static const char *devnames[] = {
- "hdmi_msm.0", "kgsl-3d0.0",
- };
- int i;
-
- DBG("Adding components..");
-
- for (i = 0; i < ARRAY_SIZE(devnames); i++) {
- struct device *dev;
-
- dev = bus_find_device_by_name(&platform_bus_type,
- NULL, devnames[i]);
- if (!dev) {
- dev_info(&pdev->dev, "still waiting for %s\n", devnames[i]);
- return -EPROBE_DEFER;
- }
-
- component_match_add(&pdev->dev, &match, compare_dev, dev);
- }
-#endif
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
@@ -1138,8 +1092,10 @@ static const struct platform_device_id msm_id[] = {
};
static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,mdp" }, /* mdp4 */
- { .compatible = "qcom,mdss_mdp" }, /* mdp5 */
+ { .compatible = "qcom,mdp4", .data = (void *) 4 }, /* mdp4 */
+ { .compatible = "qcom,mdp5", .data = (void *) 5 }, /* mdp5 */
+ /* to support downstream DT files */
+ { .compatible = "qcom,mdss_mdp", .data = (void *) 5 }, /* mdp5 */
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9a713b7a009d..c1e7bba2fdb7 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -31,14 +31,9 @@
#include <linux/iommu.h>
#include <linux/types.h>
#include <linux/of_graph.h>
+#include <linux/of_device.h>
#include <asm/sizes.h>
-#ifndef CONFIG_OF
-#include <mach/board.h>
-#include <mach/socinfo.h>
-#include <mach/iommu_domains.h>
-#endif
-
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 3f6ec077b51d..d95af6eba602 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -121,7 +121,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
- drm_gem_object_unreference(fbdev->bo);
+ drm_gem_object_unreference_unlocked(fbdev->bo);
ret = PTR_ERR(fb);
goto fail;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 3d96b49fe662..6f04397d43a7 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1081,8 +1081,6 @@ nouveau_crtc_set_config(struct drm_mode_set *set)
}
static const struct drm_crtc_funcs nv04_crtc_funcs = {
- .save = nv_crtc_save,
- .restore = nv_crtc_restore,
.cursor_set = nv04_crtc_cursor_set,
.cursor_move = nv04_crtc_cursor_move,
.gamma_set = nv_crtc_gamma_set,
@@ -1123,6 +1121,9 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
nv_crtc->index = crtc_num;
nv_crtc->last_dpms = NV_DPMS_CLEARED;
+ nv_crtc->save = nv_crtc_save;
+ nv_crtc->restore = nv_crtc_restore;
+
drm_crtc_init(dev, &nv_crtc->base, &nv04_crtc_funcs);
drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index 78cb033bc015..b48eec395f07 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -504,8 +504,6 @@ static void nv04_dac_destroy(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = {
.dpms = nv04_dac_dpms,
- .save = nv04_dac_save,
- .restore = nv04_dac_restore,
.mode_fixup = nv04_dac_mode_fixup,
.prepare = nv04_dac_prepare,
.commit = nv04_dac_commit,
@@ -515,8 +513,6 @@ static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = {
static const struct drm_encoder_helper_funcs nv17_dac_helper_funcs = {
.dpms = nv04_dac_dpms,
- .save = nv04_dac_save,
- .restore = nv04_dac_restore,
.mode_fixup = nv04_dac_mode_fixup,
.prepare = nv04_dac_prepare,
.commit = nv04_dac_commit,
@@ -545,12 +541,16 @@ nv04_dac_create(struct drm_connector *connector, struct dcb_output *entry)
nv_encoder->dcb = entry;
nv_encoder->or = ffs(entry->or) - 1;
+ nv_encoder->enc_save = nv04_dac_save;
+ nv_encoder->enc_restore = nv04_dac_restore;
+
if (nv_gf4_disp_arch(dev))
helper = &nv17_dac_helper_funcs;
else
helper = &nv04_dac_helper_funcs;
- drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC);
+ drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC,
+ NULL);
drm_encoder_helper_add(encoder, helper);
encoder->possible_crtcs = entry->heads;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 429ab5e3025a..05bfd151d1d8 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -652,8 +652,6 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
.dpms = nv04_lvds_dpms,
- .save = nv04_dfp_save,
- .restore = nv04_dfp_restore,
.mode_fixup = nv04_dfp_mode_fixup,
.prepare = nv04_dfp_prepare,
.commit = nv04_dfp_commit,
@@ -663,8 +661,6 @@ static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
static const struct drm_encoder_helper_funcs nv04_tmds_helper_funcs = {
.dpms = nv04_tmds_dpms,
- .save = nv04_dfp_save,
- .restore = nv04_dfp_restore,
.mode_fixup = nv04_dfp_mode_fixup,
.prepare = nv04_dfp_prepare,
.commit = nv04_dfp_commit,
@@ -701,12 +697,15 @@ nv04_dfp_create(struct drm_connector *connector, struct dcb_output *entry)
if (!nv_encoder)
return -ENOMEM;
+ nv_encoder->enc_save = nv04_dfp_save;
+ nv_encoder->enc_restore = nv04_dfp_restore;
+
encoder = to_drm_encoder(nv_encoder);
nv_encoder->dcb = entry;
nv_encoder->or = ffs(entry->or) - 1;
- drm_encoder_init(connector->dev, encoder, &nv04_dfp_funcs, type);
+ drm_encoder_init(connector->dev, encoder, &nv04_dfp_funcs, type, NULL);
drm_encoder_helper_add(encoder, helper);
encoder->possible_crtcs = entry->heads;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 9e650081c357..b4a6bc433ef5 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -39,7 +39,8 @@ nv04_display_create(struct drm_device *dev)
struct dcb_table *dcb = &drm->vbios.dcb;
struct drm_connector *connector, *ct;
struct drm_encoder *encoder;
- struct drm_crtc *crtc;
+ struct nouveau_encoder *nv_encoder;
+ struct nouveau_crtc *crtc;
struct nv04_display *disp;
int i, ret;
@@ -107,14 +108,11 @@ nv04_display_create(struct drm_device *dev)
}
/* Save previous state */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- crtc->funcs->save(crtc);
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- const struct drm_encoder_helper_funcs *func = encoder->helper_private;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
+ crtc->save(&crtc->base);
- func->save(encoder);
- }
+ list_for_each_entry(nv_encoder, &dev->mode_config.encoder_list, base.base.head)
+ nv_encoder->enc_save(&nv_encoder->base.base);
nouveau_overlay_init(dev);
@@ -126,8 +124,9 @@ nv04_display_destroy(struct drm_device *dev)
{
struct nv04_display *disp = nv04_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct drm_encoder *encoder;
+ struct nouveau_encoder *encoder;
struct drm_crtc *crtc;
+ struct nouveau_crtc *nv_crtc;
/* Turn every CRTC off. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -139,14 +138,11 @@ nv04_display_destroy(struct drm_device *dev)
}
/* Restore state */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- const struct drm_encoder_helper_funcs *func = encoder->helper_private;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head)
+ encoder->enc_restore(&encoder->base.base);
- func->restore(encoder);
- }
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- crtc->funcs->restore(crtc);
+ list_for_each_entry(nv_crtc, &dev->mode_config.crtc_list, base.head)
+ nv_crtc->restore(&nv_crtc->base);
nouveau_hw_save_vga_fonts(dev, 0);
@@ -159,8 +155,8 @@ nv04_display_destroy(struct drm_device *dev)
int
nv04_display_init(struct drm_device *dev)
{
- struct drm_encoder *encoder;
- struct drm_crtc *crtc;
+ struct nouveau_encoder *encoder;
+ struct nouveau_crtc *crtc;
/* meh.. modeset apparently doesn't setup all the regs and depends
* on pre-existing state, for now load the state of the card *before*
@@ -170,14 +166,11 @@ nv04_display_init(struct drm_device *dev)
* save/restore "pre-load" state, but more general so we can save
* on suspend too.
*/
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- const struct drm_encoder_helper_funcs *func = encoder->helper_private;
-
- func->restore(encoder);
- }
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
+ crtc->save(&crtc->base);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- crtc->funcs->restore(crtc);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head)
+ encoder->enc_save(&encoder->base.base);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 5345eb5378a8..54e9fb9eb5c0 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -192,8 +192,6 @@ static const struct drm_encoder_funcs nv04_tv_funcs = {
static const struct drm_encoder_helper_funcs nv04_tv_helper_funcs = {
.dpms = nv04_tv_dpms,
- .save = drm_i2c_encoder_save,
- .restore = drm_i2c_encoder_restore,
.mode_fixup = drm_i2c_encoder_mode_fixup,
.prepare = nv04_tv_prepare,
.commit = nv04_tv_commit,
@@ -225,9 +223,13 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
/* Initialize the common members */
encoder = to_drm_encoder(nv_encoder);
- drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC);
+ drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC,
+ NULL);
drm_encoder_helper_add(encoder, &nv04_tv_helper_funcs);
+ nv_encoder->enc_save = drm_i2c_encoder_save;
+ nv_encoder->enc_restore = drm_i2c_encoder_restore;
+
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
nv_encoder->dcb = entry;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index b734195d80a0..163317d26de9 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -769,10 +769,8 @@ static void nv17_tv_destroy(struct drm_encoder *encoder)
kfree(tv_enc);
}
-static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
+static const struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
.dpms = nv17_tv_dpms,
- .save = nv17_tv_save,
- .restore = nv17_tv_restore,
.mode_fixup = nv17_tv_mode_fixup,
.prepare = nv17_tv_prepare,
.commit = nv17_tv_commit,
@@ -780,14 +778,14 @@ static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
.detect = nv17_tv_detect,
};
-static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
+static const struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
.get_modes = nv17_tv_get_modes,
.mode_valid = nv17_tv_mode_valid,
.create_resources = nv17_tv_create_resources,
.set_property = nv17_tv_set_property,
};
-static struct drm_encoder_funcs nv17_tv_funcs = {
+static const struct drm_encoder_funcs nv17_tv_funcs = {
.destroy = nv17_tv_destroy,
};
@@ -816,10 +814,14 @@ nv17_tv_create(struct drm_connector *connector, struct dcb_output *entry)
tv_enc->base.dcb = entry;
tv_enc->base.or = ffs(entry->or) - 1;
- drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC);
+ drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC,
+ NULL);
drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs);
to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs;
+ tv_enc->base.enc_save = nv17_tv_save;
+ tv_enc->base.enc_restore = nv17_tv_restore;
+
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 2e7cbe933533..5dd1d0111cac 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -898,8 +898,6 @@ nouveau_connector_helper_funcs = {
static const struct drm_connector_funcs
nouveau_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = NULL,
- .restore = NULL,
.detect = nouveau_connector_detect,
.destroy = nouveau_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -910,8 +908,6 @@ nouveau_connector_funcs = {
static const struct drm_connector_funcs
nouveau_connector_funcs_lvds = {
.dpms = drm_helper_connector_dpms,
- .save = NULL,
- .restore = NULL,
.detect = nouveau_connector_detect_lvds,
.destroy = nouveau_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -944,8 +940,6 @@ nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
static const struct drm_connector_funcs
nouveau_connector_funcs_dp = {
.dpms = nouveau_connector_dp_dpms,
- .save = NULL,
- .restore = NULL,
.detect = nouveau_connector_detect,
.destroy = nouveau_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index f19cb1c5fc5a..863f10b8d818 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -73,6 +73,9 @@ struct nouveau_crtc {
int (*set_dither)(struct nouveau_crtc *crtc, bool update);
int (*set_scale)(struct nouveau_crtc *crtc, bool update);
int (*set_color_vibrance)(struct nouveau_crtc *crtc, bool update);
+
+ void (*save)(struct drm_crtc *crtc);
+ void (*restore)(struct drm_crtc *crtc);
};
static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index ea9d3bc91266..18676b8c1721 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -829,7 +829,6 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
struct drm_device *dev = drm->dev;
struct nouveau_page_flip_state *s;
unsigned long flags;
- int crtcid = -1;
spin_lock_irqsave(&dev->event_lock, flags);
@@ -841,15 +840,19 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
if (s->event) {
- /* Vblank timestamps/counts are only correct on >= NV-50 */
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
- crtcid = s->crtc;
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
+ drm_arm_vblank_event(dev, s->crtc, s->event);
+ } else {
+ drm_send_vblank_event(dev, s->crtc, s->event);
- drm_send_vblank_event(dev, crtcid, s->event);
+ /* Give up ownership of vblank for page-flipped crtc */
+ drm_vblank_put(dev, s->crtc);
+ }
+ }
+ else {
+ /* Give up ownership of vblank for page-flipped crtc */
+ drm_vblank_put(dev, s->crtc);
}
-
- /* Give up ownership of vblank for page-flipped crtc */
- drm_vblank_put(dev, s->crtc);
list_del(&s->head);
if (ps)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 1d3ee5179ab8..b3a563c44bcd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -1046,10 +1046,6 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
goto err_free;
}
- err = drm_dev_set_unique(drm, "%s", dev_name(&pdev->dev));
- if (err < 0)
- goto err_free;
-
drm->platformdev = pdev;
platform_set_drvdata(pdev, drm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index b37da95105b0..ee6a6d3fc80f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -63,6 +63,9 @@ struct nouveau_encoder {
u32 datarate;
} dp;
};
+
+ void (*enc_save)(struct drm_encoder *encoder);
+ void (*enc_restore)(struct drm_encoder *encoder);
};
struct nouveau_encoder *
@@ -80,7 +83,7 @@ static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc)
return &enc->base.base;
}
-static inline struct drm_encoder_slave_funcs *
+static inline const struct drm_encoder_slave_funcs *
get_slave_funcs(struct drm_encoder *enc)
{
return to_encoder_slave(enc)->slave_funcs;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index c053c50b346a..44e1952582aa 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -28,6 +28,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_fb_helper.h>
#include <nvif/class.h>
@@ -1717,7 +1718,7 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
- drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type);
+ drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type, NULL);
drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
drm_mode_connector_attach_encoder(connector, encoder);
@@ -2125,7 +2126,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
- drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type);
+ drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type, NULL);
drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
drm_mode_connector_attach_encoder(connector, encoder);
@@ -2305,7 +2306,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
- drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type);
+ drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type, NULL);
drm_encoder_helper_add(encoder, &nv50_pior_hfunc);
drm_mode_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index ad09590e8a46..2ed0754ed19e 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -524,7 +524,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
omap_crtc->mgr = omap_dss_get_overlay_manager(channel);
ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
- &omap_crtc_funcs);
+ &omap_crtc_funcs, NULL);
if (ret < 0) {
kfree(omap_crtc);
return NULL;
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 7d9b32a0eb43..0c104ad7ef66 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -178,7 +178,7 @@ struct drm_encoder *omap_encoder_init(struct drm_device *dev,
encoder = &omap_encoder->base;
drm_encoder_init(dev, encoder, &omap_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs);
return encoder;
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 3054bda72688..d5ecabd6c14c 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -366,7 +366,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
ret = drm_universal_plane_init(dev, plane, (1 << priv->num_crtcs) - 1,
&omap_plane_funcs, omap_plane->formats,
- omap_plane->nformats, type);
+ omap_plane->nformats, type, NULL);
if (ret < 0)
goto error;
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
index efb609510540..6df1f2a1bc52 100644
--- a/drivers/gpu/drm/omapdrm/tcm-sita.c
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
@@ -87,14 +87,11 @@ struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr)
if (width == 0 || height == 0)
return NULL;
- tcm = kmalloc(sizeof(*tcm), GFP_KERNEL);
- pvt = kmalloc(sizeof(*pvt), GFP_KERNEL);
+ tcm = kzalloc(sizeof(*tcm), GFP_KERNEL);
+ pvt = kzalloc(sizeof(*pvt), GFP_KERNEL);
if (!tcm || !pvt)
goto error;
- memset(tcm, 0, sizeof(*tcm));
- memset(pvt, 0, sizeof(*pvt));
-
/* Updating the pointers to SiTA implementation APIs */
tcm->height = height;
tcm->width = width;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 7d4704b1292b..1500ab99f548 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -31,6 +31,16 @@ config DRM_PANEL_LG_LG4573
Say Y here if you want to enable support for LG4573 RGB panel.
To compile this driver as a module, choose M here.
+config DRM_PANEL_PANASONIC_VVX10F034N00
+ tristate "Panasonic VVX10F034N00 1920x1200 video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Panasonic VVX10F034N00
+ WUXGA (1920x1200) Novatek NT1397-based DSI panel as found in some
+ Xperia Z2 tablets
+
config DRM_PANEL_SAMSUNG_S6E8AA0
tristate "Samsung S6E8AA0 DSI video mode panel"
depends on OF
@@ -51,4 +61,13 @@ config DRM_PANEL_SHARP_LQ101R1SX01
To compile this driver as a module, choose M here: the module
will be called panel-sharp-lq101r1sx01.
+config DRM_PANEL_SHARP_LS043T1LE01
+ tristate "Sharp LS043T1LE01 qHD video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Sharp LS043T1LE01 qHD
+ (540x960) DSI panel as found on the Qualcomm APQ8074 Dragonboard
+
endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index d0f016dd7ddb..f277eed933d6 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,5 +1,7 @@
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
+obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
+obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
new file mode 100644
index 000000000000..7f915f706fa6
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -0,0 +1,334 @@
+/*
+ * Copyright (C) 2015 Red Hat
+ * Copyright (C) 2015 Sony Mobile Communications Inc.
+ * Author: Werner Johansson <werner.johansson@sonymobile.com>
+ *
+ * Based on AUO panel driver by Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+/*
+ * When power is turned off to this panel a minimum off time of 500ms has to be
+ * observed before powering back on as there's no external reset pin. Keep
+ * track of earliest wakeup time and delay subsequent prepare call accordingly
+ */
+#define MIN_POFF_MS (500)
+
+struct wuxga_nt_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *dsi;
+
+ struct backlight_device *backlight;
+ struct regulator *supply;
+
+ bool prepared;
+ bool enabled;
+
+ ktime_t earliest_wake;
+
+ const struct drm_display_mode *mode;
+};
+
+static inline struct wuxga_nt_panel *to_wuxga_nt_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct wuxga_nt_panel, base);
+}
+
+static int wuxga_nt_panel_on(struct wuxga_nt_panel *wuxga_nt)
+{
+ struct mipi_dsi_device *dsi = wuxga_nt->dsi;
+ int ret;
+
+ ret = mipi_dsi_turn_on_peripheral(dsi);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int wuxga_nt_panel_disable(struct drm_panel *panel)
+{
+ struct wuxga_nt_panel *wuxga_nt = to_wuxga_nt_panel(panel);
+
+ if (!wuxga_nt->enabled)
+ return 0;
+
+ mipi_dsi_shutdown_peripheral(wuxga_nt->dsi);
+
+ if (wuxga_nt->backlight) {
+ wuxga_nt->backlight->props.power = FB_BLANK_POWERDOWN;
+ wuxga_nt->backlight->props.state |= BL_CORE_FBBLANK;
+ backlight_update_status(wuxga_nt->backlight);
+ }
+
+ wuxga_nt->enabled = false;
+
+ return 0;
+}
+
+static int wuxga_nt_panel_unprepare(struct drm_panel *panel)
+{
+ struct wuxga_nt_panel *wuxga_nt = to_wuxga_nt_panel(panel);
+
+ if (!wuxga_nt->prepared)
+ return 0;
+
+ regulator_disable(wuxga_nt->supply);
+ wuxga_nt->earliest_wake = ktime_add_ms(ktime_get_real(), MIN_POFF_MS);
+ wuxga_nt->prepared = false;
+
+ return 0;
+}
+
+static int wuxga_nt_panel_prepare(struct drm_panel *panel)
+{
+ struct wuxga_nt_panel *wuxga_nt = to_wuxga_nt_panel(panel);
+ int ret;
+ s64 enablewait;
+
+ if (wuxga_nt->prepared)
+ return 0;
+
+ /*
+ * If the user re-enabled the panel before the required off-time then
+ * we need to wait the remaining period before re-enabling regulator
+ */
+ enablewait = ktime_ms_delta(wuxga_nt->earliest_wake, ktime_get_real());
+
+ /* Sanity check, this should never happen */
+ if (enablewait > MIN_POFF_MS)
+ enablewait = MIN_POFF_MS;
+
+ if (enablewait > 0)
+ msleep(enablewait);
+
+ ret = regulator_enable(wuxga_nt->supply);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * A minimum delay of 250ms is required after power-up until commands
+ * can be sent
+ */
+ msleep(250);
+
+ ret = wuxga_nt_panel_on(wuxga_nt);
+ if (ret < 0) {
+ dev_err(panel->dev, "failed to set panel on: %d\n", ret);
+ goto poweroff;
+ }
+
+ wuxga_nt->prepared = true;
+
+ return 0;
+
+poweroff:
+ regulator_disable(wuxga_nt->supply);
+
+ return ret;
+}
+
+static int wuxga_nt_panel_enable(struct drm_panel *panel)
+{
+ struct wuxga_nt_panel *wuxga_nt = to_wuxga_nt_panel(panel);
+
+ if (wuxga_nt->enabled)
+ return 0;
+
+ if (wuxga_nt->backlight) {
+ wuxga_nt->backlight->props.power = FB_BLANK_UNBLANK;
+ wuxga_nt->backlight->props.state &= ~BL_CORE_FBBLANK;
+ backlight_update_status(wuxga_nt->backlight);
+ }
+
+ wuxga_nt->enabled = true;
+
+ return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = 164402,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 152,
+ .hsync_end = 1920 + 152 + 52,
+ .htotal = 1920 + 152 + 52 + 20,
+ .vdisplay = 1200,
+ .vsync_start = 1200 + 24,
+ .vsync_end = 1200 + 24 + 6,
+ .vtotal = 1200 + 24 + 6 + 48,
+ .vrefresh = 60,
+};
+
+static int wuxga_nt_panel_get_modes(struct drm_panel *panel)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(panel->connector, mode);
+
+ panel->connector->display_info.width_mm = 217;
+ panel->connector->display_info.height_mm = 136;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs wuxga_nt_panel_funcs = {
+ .disable = wuxga_nt_panel_disable,
+ .unprepare = wuxga_nt_panel_unprepare,
+ .prepare = wuxga_nt_panel_prepare,
+ .enable = wuxga_nt_panel_enable,
+ .get_modes = wuxga_nt_panel_get_modes,
+};
+
+static const struct of_device_id wuxga_nt_of_match[] = {
+ { .compatible = "panasonic,vvx10f034n00", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, wuxga_nt_of_match);
+
+static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt)
+{
+ struct device *dev = &wuxga_nt->dsi->dev;
+ struct device_node *np;
+ int ret;
+
+ wuxga_nt->mode = &default_mode;
+
+ wuxga_nt->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(wuxga_nt->supply))
+ return PTR_ERR(wuxga_nt->supply);
+
+ np = of_parse_phandle(dev->of_node, "backlight", 0);
+ if (np) {
+ wuxga_nt->backlight = of_find_backlight_by_node(np);
+ of_node_put(np);
+
+ if (!wuxga_nt->backlight)
+ return -EPROBE_DEFER;
+ }
+
+ drm_panel_init(&wuxga_nt->base);
+ wuxga_nt->base.funcs = &wuxga_nt_panel_funcs;
+ wuxga_nt->base.dev = &wuxga_nt->dsi->dev;
+
+ ret = drm_panel_add(&wuxga_nt->base);
+ if (ret < 0)
+ goto put_backlight;
+
+ return 0;
+
+put_backlight:
+ if (wuxga_nt->backlight)
+ put_device(&wuxga_nt->backlight->dev);
+
+ return ret;
+}
+
+static void wuxga_nt_panel_del(struct wuxga_nt_panel *wuxga_nt)
+{
+ if (wuxga_nt->base.dev)
+ drm_panel_remove(&wuxga_nt->base);
+
+ if (wuxga_nt->backlight)
+ put_device(&wuxga_nt->backlight->dev);
+}
+
+static int wuxga_nt_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct wuxga_nt_panel *wuxga_nt;
+ int ret;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_HSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_LPM;
+
+ wuxga_nt = devm_kzalloc(&dsi->dev, sizeof(*wuxga_nt), GFP_KERNEL);
+ if (!wuxga_nt)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, wuxga_nt);
+
+ wuxga_nt->dsi = dsi;
+
+ ret = wuxga_nt_panel_add(wuxga_nt);
+ if (ret < 0)
+ return ret;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct wuxga_nt_panel *wuxga_nt = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = wuxga_nt_panel_disable(&wuxga_nt->base);
+ if (ret < 0)
+ dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_detach(&wuxga_nt->base);
+ wuxga_nt_panel_del(wuxga_nt);
+
+ return 0;
+}
+
+static void wuxga_nt_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct wuxga_nt_panel *wuxga_nt = mipi_dsi_get_drvdata(dsi);
+
+ wuxga_nt_panel_disable(&wuxga_nt->base);
+}
+
+static struct mipi_dsi_driver wuxga_nt_panel_driver = {
+ .driver = {
+ .name = "panel-panasonic-vvx10f034n00",
+ .of_match_table = wuxga_nt_of_match,
+ },
+ .probe = wuxga_nt_panel_probe,
+ .remove = wuxga_nt_panel_remove,
+ .shutdown = wuxga_nt_panel_shutdown,
+};
+module_mipi_dsi_driver(wuxga_nt_panel_driver);
+
+MODULE_AUTHOR("Werner Johansson <werner.johansson@sonymobile.com>");
+MODULE_DESCRIPTION("Panasonic VVX10F034N00 Novatek NT1397-based WUXGA (1920x1200) video mode panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
new file mode 100644
index 000000000000..3aeb0bda4947
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright (C) 2015 Red Hat
+ * Copyright (C) 2015 Sony Mobile Communications Inc.
+ * Author: Werner Johansson <werner.johansson@sonymobile.com>
+ *
+ * Based on AUO panel driver by Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct sharp_nt_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *dsi;
+
+ struct backlight_device *backlight;
+ struct regulator *supply;
+ struct gpio_desc *reset_gpio;
+
+ bool prepared;
+ bool enabled;
+
+ const struct drm_display_mode *mode;
+};
+
+static inline struct sharp_nt_panel *to_sharp_nt_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct sharp_nt_panel, base);
+}
+
+static int sharp_nt_panel_init(struct sharp_nt_panel *sharp_nt)
+{
+ struct mipi_dsi_device *dsi = sharp_nt->dsi;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0)
+ return ret;
+
+ msleep(120);
+
+ /* Novatek two-lane operation */
+ ret = mipi_dsi_dcs_write(dsi, 0xae, (u8[]){ 0x03 }, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Set both MCU and RGB I/F to 24bpp */
+ ret = mipi_dsi_dcs_set_pixel_format(dsi, MIPI_DCS_PIXEL_FMT_24BIT |
+ (MIPI_DCS_PIXEL_FMT_24BIT << 4));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int sharp_nt_panel_on(struct sharp_nt_panel *sharp_nt)
+{
+ struct mipi_dsi_device *dsi = sharp_nt->dsi;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int sharp_nt_panel_off(struct sharp_nt_panel *sharp_nt)
+{
+ struct mipi_dsi_device *dsi = sharp_nt->dsi;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+
+static int sharp_nt_panel_disable(struct drm_panel *panel)
+{
+ struct sharp_nt_panel *sharp_nt = to_sharp_nt_panel(panel);
+
+ if (!sharp_nt->enabled)
+ return 0;
+
+ if (sharp_nt->backlight) {
+ sharp_nt->backlight->props.power = FB_BLANK_POWERDOWN;
+ backlight_update_status(sharp_nt->backlight);
+ }
+
+ sharp_nt->enabled = false;
+
+ return 0;
+}
+
+static int sharp_nt_panel_unprepare(struct drm_panel *panel)
+{
+ struct sharp_nt_panel *sharp_nt = to_sharp_nt_panel(panel);
+ int ret;
+
+ if (!sharp_nt->prepared)
+ return 0;
+
+ ret = sharp_nt_panel_off(sharp_nt);
+ if (ret < 0) {
+ dev_err(panel->dev, "failed to set panel off: %d\n", ret);
+ return ret;
+ }
+
+ regulator_disable(sharp_nt->supply);
+ if (sharp_nt->reset_gpio)
+ gpiod_set_value(sharp_nt->reset_gpio, 0);
+
+ sharp_nt->prepared = false;
+
+ return 0;
+}
+
+static int sharp_nt_panel_prepare(struct drm_panel *panel)
+{
+ struct sharp_nt_panel *sharp_nt = to_sharp_nt_panel(panel);
+ int ret;
+
+ if (sharp_nt->prepared)
+ return 0;
+
+ ret = regulator_enable(sharp_nt->supply);
+ if (ret < 0)
+ return ret;
+
+ msleep(20);
+
+ if (sharp_nt->reset_gpio) {
+ gpiod_set_value(sharp_nt->reset_gpio, 1);
+ msleep(1);
+ gpiod_set_value(sharp_nt->reset_gpio, 0);
+ msleep(1);
+ gpiod_set_value(sharp_nt->reset_gpio, 1);
+ msleep(10);
+ }
+
+ ret = sharp_nt_panel_init(sharp_nt);
+ if (ret < 0) {
+ dev_err(panel->dev, "failed to init panel: %d\n", ret);
+ goto poweroff;
+ }
+
+ ret = sharp_nt_panel_on(sharp_nt);
+ if (ret < 0) {
+ dev_err(panel->dev, "failed to set panel on: %d\n", ret);
+ goto poweroff;
+ }
+
+ sharp_nt->prepared = true;
+
+ return 0;
+
+poweroff:
+ regulator_disable(sharp_nt->supply);
+ if (sharp_nt->reset_gpio)
+ gpiod_set_value(sharp_nt->reset_gpio, 0);
+ return ret;
+}
+
+static int sharp_nt_panel_enable(struct drm_panel *panel)
+{
+ struct sharp_nt_panel *sharp_nt = to_sharp_nt_panel(panel);
+
+ if (sharp_nt->enabled)
+ return 0;
+
+ if (sharp_nt->backlight) {
+ sharp_nt->backlight->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(sharp_nt->backlight);
+ }
+
+ sharp_nt->enabled = true;
+
+ return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = 41118,
+ .hdisplay = 540,
+ .hsync_start = 540 + 48,
+ .hsync_end = 540 + 48 + 80,
+ .htotal = 540 + 48 + 80 + 32,
+ .vdisplay = 960,
+ .vsync_start = 960 + 3,
+ .vsync_end = 960 + 3 + 15,
+ .vtotal = 960 + 3 + 15 + 1,
+ .vrefresh = 60,
+};
+
+static int sharp_nt_panel_get_modes(struct drm_panel *panel)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(panel->connector, mode);
+
+ panel->connector->display_info.width_mm = 54;
+ panel->connector->display_info.height_mm = 95;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs sharp_nt_panel_funcs = {
+ .disable = sharp_nt_panel_disable,
+ .unprepare = sharp_nt_panel_unprepare,
+ .prepare = sharp_nt_panel_prepare,
+ .enable = sharp_nt_panel_enable,
+ .get_modes = sharp_nt_panel_get_modes,
+};
+
+static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt)
+{
+ struct device *dev = &sharp_nt->dsi->dev;
+ struct device_node *np;
+ int ret;
+
+ sharp_nt->mode = &default_mode;
+
+ sharp_nt->supply = devm_regulator_get(dev, "avdd");
+ if (IS_ERR(sharp_nt->supply))
+ return PTR_ERR(sharp_nt->supply);
+
+ sharp_nt->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(sharp_nt->reset_gpio)) {
+ dev_err(dev, "cannot get reset-gpios %ld\n",
+ PTR_ERR(sharp_nt->reset_gpio));
+ sharp_nt->reset_gpio = NULL;
+ } else {
+ gpiod_set_value(sharp_nt->reset_gpio, 0);
+ }
+
+ np = of_parse_phandle(dev->of_node, "backlight", 0);
+ if (np) {
+ sharp_nt->backlight = of_find_backlight_by_node(np);
+ of_node_put(np);
+
+ if (!sharp_nt->backlight)
+ return -EPROBE_DEFER;
+ }
+
+ drm_panel_init(&sharp_nt->base);
+ sharp_nt->base.funcs = &sharp_nt_panel_funcs;
+ sharp_nt->base.dev = &sharp_nt->dsi->dev;
+
+ ret = drm_panel_add(&sharp_nt->base);
+ if (ret < 0)
+ goto put_backlight;
+
+ return 0;
+
+put_backlight:
+ if (sharp_nt->backlight)
+ put_device(&sharp_nt->backlight->dev);
+
+ return ret;
+}
+
+static void sharp_nt_panel_del(struct sharp_nt_panel *sharp_nt)
+{
+ if (sharp_nt->base.dev)
+ drm_panel_remove(&sharp_nt->base);
+
+ if (sharp_nt->backlight)
+ put_device(&sharp_nt->backlight->dev);
+}
+
+static int sharp_nt_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct sharp_nt_panel *sharp_nt;
+ int ret;
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_HSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_EOT_PACKET;
+
+ sharp_nt = devm_kzalloc(&dsi->dev, sizeof(*sharp_nt), GFP_KERNEL);
+ if (!sharp_nt)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, sharp_nt);
+
+ sharp_nt->dsi = dsi;
+
+ ret = sharp_nt_panel_add(sharp_nt);
+ if (ret < 0)
+ return ret;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct sharp_nt_panel *sharp_nt = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = sharp_nt_panel_disable(&sharp_nt->base);
+ if (ret < 0)
+ dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_detach(&sharp_nt->base);
+ sharp_nt_panel_del(sharp_nt);
+
+ return 0;
+}
+
+static void sharp_nt_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct sharp_nt_panel *sharp_nt = mipi_dsi_get_drvdata(dsi);
+
+ sharp_nt_panel_disable(&sharp_nt->base);
+}
+
+static const struct of_device_id sharp_nt_of_match[] = {
+ { .compatible = "sharp,ls043t1le01-qhd", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sharp_nt_of_match);
+
+static struct mipi_dsi_driver sharp_nt_panel_driver = {
+ .driver = {
+ .name = "panel-sharp-ls043t1le01-qhd",
+ .of_match_table = sharp_nt_of_match,
+ },
+ .probe = sharp_nt_panel_probe,
+ .remove = sharp_nt_panel_remove,
+ .shutdown = sharp_nt_panel_shutdown,
+};
+module_mipi_dsi_driver(sharp_nt_panel_driver);
+
+MODULE_AUTHOR("Werner Johansson <werner.johansson@sonymobile.com>");
+MODULE_DESCRIPTION("Sharp LS043T1LE01 NT35565-based qHD (540x960) video mode panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index f97b73ec4713..f88a631c43ab 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -44,6 +44,10 @@ struct panel_desc {
unsigned int bpc;
+ /**
+ * @width: width (in millimeters) of the panel's active display area
+ * @height: height (in millimeters) of the panel's active display area
+ */
struct {
unsigned int width;
unsigned int height;
@@ -832,6 +836,34 @@ static const struct panel_desc innolux_g121i1_l01 = {
},
};
+static const struct drm_display_mode innolux_g121x1_l03_mode = {
+ .clock = 65000,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 0,
+ .hsync_end = 1024 + 1,
+ .htotal = 1024 + 0 + 1 + 320,
+ .vdisplay = 768,
+ .vsync_start = 768 + 38,
+ .vsync_end = 768 + 38 + 1,
+ .vtotal = 768 + 38 + 1 + 0,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc innolux_g121x1_l03 = {
+ .modes = &innolux_g121x1_l03_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 246,
+ .height = 185,
+ },
+ .delay = {
+ .enable = 200,
+ .unprepare = 200,
+ .disable = 400,
+ },
+};
+
static const struct drm_display_mode innolux_n116bge_mode = {
.clock = 76420,
.hdisplay = 1366,
@@ -902,6 +934,30 @@ static const struct panel_desc innolux_zj070na_01p = {
},
};
+static const struct display_timing kyo_tcg121xglp_timing = {
+ .pixelclock = { 52000000, 65000000, 71000000 },
+ .hactive = { 1024, 1024, 1024 },
+ .hfront_porch = { 2, 2, 2 },
+ .hback_porch = { 2, 2, 2 },
+ .hsync_len = { 86, 124, 244 },
+ .vactive = { 768, 768, 768 },
+ .vfront_porch = { 2, 2, 2 },
+ .vback_porch = { 2, 2, 2 },
+ .vsync_len = { 6, 34, 73 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc kyo_tcg121xglp = {
+ .timings = &kyo_tcg121xglp_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 246,
+ .height = 184,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
static const struct drm_display_mode lg_lb070wv8_mode = {
.clock = 33246,
.hdisplay = 800,
@@ -1027,6 +1083,30 @@ static const struct panel_desc ortustech_com43h4m85ulc = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode qd43003c0_40_mode = {
+ .clock = 9000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 8,
+ .hsync_end = 480 + 8 + 4,
+ .htotal = 480 + 8 + 4 + 39,
+ .vdisplay = 272,
+ .vsync_start = 272 + 4,
+ .vsync_end = 272 + 4 + 10,
+ .vtotal = 272 + 4 + 10 + 2,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc qd43003c0_40 = {
+ .modes = &qd43003c0_40_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 95,
+ .height = 53,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
static const struct drm_display_mode samsung_ltn101nt05_mode = {
.clock = 54030,
.hdisplay = 1024,
@@ -1158,6 +1238,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible ="innolux,g121i1-l01",
.data = &innolux_g121i1_l01
}, {
+ .compatible = "innolux,g121x1-l03",
+ .data = &innolux_g121x1_l03,
+ }, {
.compatible = "innolux,n116bge",
.data = &innolux_n116bge,
}, {
@@ -1167,6 +1250,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,zj070na-01p",
.data = &innolux_zj070na_01p,
}, {
+ .compatible = "kyo,tcg121xglp",
+ .data = &kyo_tcg121xglp,
+ }, {
.compatible = "lg,lb070wv8",
.data = &lg_lb070wv8,
}, {
@@ -1182,6 +1268,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "ortustech,com43h4m85ulc",
.data = &ortustech_com43h4m85ulc,
}, {
+ .compatible = "qiaodian,qd43003c0-40",
+ .data = &qd43003c0_40,
+ }, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
}, {
@@ -1263,6 +1352,36 @@ static const struct panel_desc_dsi auo_b080uan01 = {
.lanes = 4,
};
+static const struct drm_display_mode boe_tv080wum_nl0_mode = {
+ .clock = 160000,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 120,
+ .hsync_end = 1200 + 120 + 20,
+ .htotal = 1200 + 120 + 20 + 21,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 21,
+ .vsync_end = 1920 + 21 + 3,
+ .vtotal = 1920 + 21 + 3 + 18,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc_dsi boe_tv080wum_nl0 = {
+ .desc = {
+ .modes = &boe_tv080wum_nl0_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 107,
+ .height = 172,
+ },
+ },
+ .flags = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+};
+
static const struct drm_display_mode lg_ld070wx3_sl01_mode = {
.clock = 71000,
.hdisplay = 800,
@@ -1348,11 +1467,15 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
.lanes = 4,
};
+
static const struct of_device_id dsi_of_match[] = {
{
.compatible = "auo,b080uan01",
.data = &auo_b080uan01
}, {
+ .compatible = "boe,tv080wum-nl0",
+ .data = &boe_tv080wum_nl0
+ }, {
.compatible = "lg,ld070wx3-sl01",
.data = &lg_ld070wx3_sl01
}, {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index cddba079197f..86276519b2ef 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -876,16 +876,6 @@ static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
.best_encoder = qxl_best_encoder,
};
-static void qxl_conn_save(struct drm_connector *connector)
-{
- DRM_DEBUG("\n");
-}
-
-static void qxl_conn_restore(struct drm_connector *connector)
-{
- DRM_DEBUG("\n");
-}
-
static enum drm_connector_status qxl_conn_detect(
struct drm_connector *connector,
bool force)
@@ -932,10 +922,8 @@ static void qxl_conn_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs qxl_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = qxl_conn_save,
- .restore = qxl_conn_restore,
.detect = qxl_conn_detect,
- .fill_modes = drm_helper_probe_single_connector_modes_nomerge,
+ .fill_modes = drm_helper_probe_single_connector_modes,
.set_property = qxl_conn_set_property,
.destroy = qxl_conn_destroy,
};
@@ -980,7 +968,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
&qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs,
- DRM_MODE_ENCODER_VIRTUAL);
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
/* we get HPD via client monitors config */
connector->polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index b28370e014c6..5e1d7899dd72 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -32,7 +32,7 @@ static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
struct qxl_bo *bo;
struct qxl_device *qdev;
- bo = container_of(tbo, struct qxl_bo, tbo);
+ bo = to_qxl_bo(tbo);
qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
qxl_surface_evict(qdev, bo, false);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 0cbc4c987164..953412766416 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -201,7 +201,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
placement->num_busy_placement = 1;
return;
}
- qbo = container_of(bo, struct qxl_bo, tbo);
+ qbo = to_qxl_bo(bo);
qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false);
*placement = qbo->placement;
}
@@ -365,7 +365,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
if (!qxl_ttm_bo_is_qxl_bo(bo))
return;
- qbo = container_of(bo, struct qxl_bo, tbo);
+ qbo = to_qxl_bo(bo);
qdev = qbo->gem_base.dev->dev_private;
if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id)
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index dac78ad24b31..801dd60ac192 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -25,6 +25,7 @@
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/radeon_drm.h>
#include <drm/drm_fixed.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index bb292143997e..01b20e14a247 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2767,23 +2767,27 @@ radeon_add_atom_encoder(struct drm_device *dev,
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_encoder->rmx_type = RMX_FULL;
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+ DRM_MODE_ENCODER_LVDS, NULL);
radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
} else {
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
}
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+ DRM_MODE_ENCODER_TVDAC, NULL);
radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
break;
@@ -2797,13 +2801,16 @@ radeon_add_atom_encoder(struct drm_device *dev,
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_encoder->rmx_type = RMX_FULL;
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+ DRM_MODE_ENCODER_LVDS, NULL);
radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
} else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
} else {
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
}
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
@@ -2820,11 +2827,14 @@ radeon_add_atom_encoder(struct drm_device *dev,
/* these are handled by the primary encoders */
radeon_encoder->is_ext_encoder = true;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+ DRM_MODE_ENCODER_LVDS, NULL);
else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
else
- drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
break;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index f16b60b6a30f..1e3a80165309 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -8472,7 +8472,7 @@ restart_ih:
if (queue_dp)
schedule_work(&rdev->dp_work);
if (queue_hotplug)
- schedule_work(&rdev->hotplug_work);
+ schedule_delayed_work(&rdev->hotplug_work, 0);
if (queue_reset) {
rdev->needs_reset = true;
wake_up_all(&rdev->fence_queue);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e533db11f70a..2ad462896896 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -5347,7 +5347,7 @@ restart_ih:
if (queue_dp)
schedule_work(&rdev->dp_work);
if (queue_hotplug)
- schedule_work(&rdev->hotplug_work);
+ schedule_delayed_work(&rdev->hotplug_work, 0);
if (queue_hdmi)
schedule_work(&rdev->audio_work);
if (queue_thermal && rdev->pm.dpm_enabled)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index aff1e4d8098d..9e7e2bf03b81 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -806,7 +806,7 @@ int r100_irq_process(struct radeon_device *rdev)
status = r100_irq_ack(rdev);
}
if (queue_hotplug)
- schedule_work(&rdev->hotplug_work);
+ schedule_delayed_work(&rdev->hotplug_work, 0);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS400:
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 4ea5b10ff5f4..cc2fdf0be37a 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -4276,7 +4276,7 @@ restart_ih:
WREG32(IH_RB_RPTR, rptr);
}
if (queue_hotplug)
- schedule_work(&rdev->hotplug_work);
+ schedule_delayed_work(&rdev->hotplug_work, 0);
if (queue_hdmi)
schedule_work(&rdev->audio_work);
if (queue_thermal && rdev->pm.dpm_enabled)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a9955e85009a..5ae6db98aa4d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2415,7 +2415,7 @@ struct radeon_device {
struct r600_ih ih; /* r6/700 interrupt ring */
struct radeon_rlc rlc;
struct radeon_mec mec;
- struct work_struct hotplug_work;
+ struct delayed_work hotplug_work;
struct work_struct dp_work;
struct work_struct audio_work;
int num_crtc; /* number of crtcs */
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index fe994aac3b04..c77d349c561c 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -54,6 +54,9 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
/* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */
{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50,
PCI_VENDOR_ID_IBM, 0x0550, 1},
+ /* Intel 82855PM host bridge / RV250/M9 GL [Mobility FireGL 9000/Radeon 9000] needs AGPMode 1 (Thinkpad T40p) */
+ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
+ PCI_VENDOR_ID_IBM, 0x054d, 1},
/* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */
{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57,
PCI_VENDOR_ID_IBM, 0x0530, 1},
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 5a2cafb4f1bc..340f3f549f29 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1234,13 +1234,32 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if (r < 0)
return connector_status_disconnected;
+ if (radeon_connector->detected_hpd_without_ddc) {
+ force = true;
+ radeon_connector->detected_hpd_without_ddc = false;
+ }
+
if (!force && radeon_check_hpd_status_unchanged(connector)) {
ret = connector->status;
goto exit;
}
- if (radeon_connector->ddc_bus)
+ if (radeon_connector->ddc_bus) {
dret = radeon_ddc_probe(radeon_connector, false);
+
+ /* Sometimes the pins required for the DDC probe on DVI
+ * connectors don't make contact at the same time that the ones
+ * for HPD do. If the DDC probe fails even though we had an HPD
+ * signal, try again later */
+ if (!dret && !force &&
+ connector->status != connector_status_connected) {
+ DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n");
+ radeon_connector->detected_hpd_without_ddc = true;
+ schedule_delayed_work(&rdev->hotplug_work,
+ msecs_to_jiffies(1000));
+ goto exit;
+ }
+ }
if (dret) {
radeon_connector->detected_by_load = false;
radeon_connector_free_edid(connector);
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index b431c9c2b247..c236f6fec245 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -647,7 +647,7 @@ radeon_dp_create_fake_mst_encoder(struct radeon_connector *connector)
}
drm_encoder_init(dev, &radeon_encoder->base, &radeon_dp_mst_enc_funcs,
- DRM_MODE_ENCODER_DPMST);
+ DRM_MODE_ENCODER_DPMST, NULL);
drm_encoder_helper_add(encoder, &radeon_mst_helper_funcs);
mst_enc = radeon_encoder->enc_priv;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 171d3e43c30c..979f3bf65f2c 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -74,7 +74,7 @@ irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg)
static void radeon_hotplug_work_func(struct work_struct *work)
{
struct radeon_device *rdev = container_of(work, struct radeon_device,
- hotplug_work);
+ hotplug_work.work);
struct drm_device *dev = rdev->ddev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
@@ -302,7 +302,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
}
}
- INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+ INIT_DELAYED_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
INIT_WORK(&rdev->dp_work, radeon_dp_work_func);
INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
@@ -310,7 +310,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
if (r) {
rdev->irq.installed = false;
- flush_work(&rdev->hotplug_work);
+ flush_delayed_work(&rdev->hotplug_work);
return r;
}
@@ -333,7 +333,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
rdev->irq.installed = false;
if (rdev->msi_enabled)
pci_disable_msi(rdev->pdev);
- flush_work(&rdev->hotplug_work);
+ flush_delayed_work(&rdev->hotplug_work);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 678b4386540d..32b338ff436b 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -25,6 +25,7 @@
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/radeon_drm.h>
#include <drm/drm_fixed.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 30de43366eae..88dc973fb209 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -1772,7 +1772,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
encoder->possible_crtcs = 0x1;
- drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS);
+ drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs,
+ DRM_MODE_ENCODER_LVDS, NULL);
drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs);
if (rdev->is_atom_bios)
radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
@@ -1781,12 +1782,14 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_
radeon_encoder->rmx_type = RMX_FULL;
break;
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs);
radeon_encoder->enc_priv = radeon_legacy_get_tmds_info(radeon_encoder);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
- drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs, DRM_MODE_ENCODER_DAC);
+ drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
drm_encoder_helper_add(encoder, &radeon_legacy_primary_dac_helper_funcs);
if (rdev->is_atom_bios)
radeon_encoder->enc_priv = radeon_atombios_get_primary_dac_info(radeon_encoder);
@@ -1794,7 +1797,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_
radeon_encoder->enc_priv = radeon_combios_get_primary_dac_info(radeon_encoder);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
- drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs, DRM_MODE_ENCODER_TVDAC);
+ drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs,
+ DRM_MODE_ENCODER_TVDAC, NULL);
drm_encoder_helper_add(encoder, &radeon_legacy_tv_dac_helper_funcs);
if (rdev->is_atom_bios)
radeon_encoder->enc_priv = radeon_atombios_get_tv_dac_info(radeon_encoder);
@@ -1802,7 +1806,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_
radeon_encoder->enc_priv = radeon_combios_get_tv_dac_info(radeon_encoder);
break;
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
- drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
if (!rdev->is_atom_bios)
radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 2c8331078529..bb75201a24ba 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -554,6 +554,7 @@ struct radeon_connector {
void *con_priv;
bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */
+ bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */
uint16_t connector_object_id;
struct radeon_hpd hpd;
struct radeon_router router;
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 97a904835759..6244f4e44e9a 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -813,7 +813,7 @@ int rs600_irq_process(struct radeon_device *rdev)
status = rs600_irq_ack(rdev);
}
if (queue_hotplug)
- schedule_work(&rdev->hotplug_work);
+ schedule_delayed_work(&rdev->hotplug_work, 0);
if (queue_hdmi)
schedule_work(&rdev->audio_work);
if (rdev->msi_enabled) {
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 3d5d41240b64..f878d6962da5 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6851,7 +6851,7 @@ restart_ih:
if (queue_dp)
schedule_work(&rdev->dp_work);
if (queue_hotplug)
- schedule_work(&rdev->hotplug_work);
+ schedule_delayed_work(&rdev->hotplug_work, 0);
if (queue_thermal && rdev->pm.dpm_enabled)
schedule_work(&rdev->pm.dpm.thermal.work);
rdev->ih.rptr = rptr;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 48cb19949ca3..88a4b706be16 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -613,7 +613,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
ret = drm_crtc_init_with_planes(rcdu->ddev, crtc,
&rgrp->planes[index % 2].plane,
- NULL, &crtc_funcs);
+ NULL, &crtc_funcs, NULL);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index d0ae1e8009c6..c08700757feb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -173,7 +173,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
goto done;
} else {
ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
- encoder_type);
+ encoder_type, NULL);
if (ret < 0)
goto done;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
index 96f2eb43713c..a37b6e2fe51a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
@@ -28,7 +28,7 @@ static int rcar_du_hdmi_connector_get_modes(struct drm_connector *connector)
{
struct rcar_du_connector *con = to_rcar_connector(connector);
struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
- struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+ const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
if (sfuncs->get_modes == NULL)
return 0;
@@ -41,7 +41,7 @@ static int rcar_du_hdmi_connector_mode_valid(struct drm_connector *connector,
{
struct rcar_du_connector *con = to_rcar_connector(connector);
struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
- struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+ const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
if (sfuncs->mode_valid == NULL)
return MODE_OK;
@@ -66,7 +66,7 @@ rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
struct rcar_du_connector *con = to_rcar_connector(connector);
struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
- struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+ const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
if (sfuncs->detect == NULL)
return connector_status_unknown;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index 81da8419282b..2567efcbee36 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -35,7 +35,7 @@ struct rcar_du_hdmienc {
static void rcar_du_hdmienc_disable(struct drm_encoder *encoder)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+ const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
if (sfuncs->dpms)
sfuncs->dpms(encoder, DRM_MODE_DPMS_OFF);
@@ -50,7 +50,7 @@ static void rcar_du_hdmienc_disable(struct drm_encoder *encoder)
static void rcar_du_hdmienc_enable(struct drm_encoder *encoder)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+ const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
if (hdmienc->renc->lvds)
rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
@@ -67,7 +67,7 @@ static int rcar_du_hdmienc_atomic_check(struct drm_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+ const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
const struct drm_display_mode *mode = &crtc_state->mode;
@@ -89,7 +89,7 @@ static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
+ const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
if (sfuncs->mode_set)
sfuncs->mode_set(encoder, mode, adjusted_mode);
@@ -151,7 +151,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
goto error;
ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
if (ret < 0)
goto error;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index ffa583712cd9..c3ed9522c0e1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -410,7 +410,8 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
&rcar_du_plane_funcs, formats,
- ARRAY_SIZE(formats), type);
+ ARRAY_SIZE(formats), type,
+ NULL);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 80d6fc8a5cee..bddcabd7a370 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -173,7 +173,7 @@ dw_hdmi_rockchip_mode_valid(struct drm_connector *connector,
return (valid) ? MODE_OK : MODE_BAD;
}
-static struct drm_encoder_funcs dw_hdmi_rockchip_encoder_funcs = {
+static const struct drm_encoder_funcs dw_hdmi_rockchip_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
@@ -218,7 +218,7 @@ static void dw_hdmi_rockchip_encoder_prepare(struct drm_encoder *encoder)
ROCKCHIP_OUT_MODE_AAAA);
}
-static struct drm_encoder_helper_funcs dw_hdmi_rockchip_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs dw_hdmi_rockchip_encoder_helper_funcs = {
.mode_fixup = dw_hdmi_rockchip_encoder_mode_fixup,
.mode_set = dw_hdmi_rockchip_encoder_mode_set,
.prepare = dw_hdmi_rockchip_encoder_prepare,
@@ -295,7 +295,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index f22e1e1ee64a..afbb7407c44f 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -450,10 +450,6 @@ static int rockchip_drm_bind(struct device *dev)
if (!drm)
return -ENOMEM;
- ret = drm_dev_set_unique(drm, "%s", dev_name(dev));
- if (ret)
- goto err_free;
-
ret = drm_dev_register(drm, 0);
if (ret)
goto err_free;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index b8ac5911c102..621f25c463bd 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -66,7 +66,7 @@ static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb,
rockchip_fb->obj[0], handle);
}
-static struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
+static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
.destroy = rockchip_drm_fb_destroy,
.create_handle = rockchip_drm_fb_create_handle,
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 8caea0a33dd8..d908321b94ce 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -67,6 +67,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_pgoff = 0;
ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
obj->size, &rk_obj->dma_attrs);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 5d8ae5e49c44..dd8e0860ad4e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -374,6 +374,7 @@ static const struct of_device_id vop_driver_dt_match[] = {
.data = &rk3288_vop },
{},
};
+MODULE_DEVICE_TABLE(of, vop_driver_dt_match);
static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
{
@@ -959,8 +960,8 @@ static int vop_update_plane_event(struct drm_plane *plane,
val = (dest.y2 - dest.y1 - 1) << 16;
val |= (dest.x2 - dest.x1 - 1) & 0xffff;
VOP_WIN_SET(vop, win, dsp_info, val);
- val = (dsp_sty - 1) << 16;
- val |= (dsp_stx - 1) & 0xffff;
+ val = dsp_sty << 16;
+ val |= dsp_stx & 0xffff;
VOP_WIN_SET(vop, win, dsp_st, val);
VOP_WIN_SET(vop, win, rb_swap, rb_swap);
@@ -1289,7 +1290,7 @@ static void vop_win_state_complete(struct vop_win *vop_win,
if (state->event) {
spin_lock_irqsave(&drm->event_lock, flags);
- drm_send_vblank_event(drm, -1, state->event);
+ drm_crtc_send_vblank_event(crtc, state->event);
spin_unlock_irqrestore(&drm->event_lock, flags);
}
@@ -1477,7 +1478,7 @@ static int vop_create_crtc(struct vop *vop)
0, &vop_plane_funcs,
win_data->phy->data_formats,
win_data->phy->nformats,
- win_data->type);
+ win_data->type, NULL);
if (ret) {
DRM_ERROR("failed to initialize plane\n");
goto err_cleanup_planes;
@@ -1491,7 +1492,7 @@ static int vop_create_crtc(struct vop *vop)
}
ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
- &vop_crtc_funcs);
+ &vop_crtc_funcs, NULL);
if (ret)
return ret;
@@ -1514,7 +1515,7 @@ static int vop_create_crtc(struct vop *vop)
&vop_plane_funcs,
win_data->phy->data_formats,
win_data->phy->nformats,
- win_data->type);
+ win_data->type, NULL);
if (ret) {
DRM_ERROR("failed to initialize overlay plane\n");
goto err_cleanup_crtc;
@@ -1575,32 +1576,25 @@ static int vop_initial(struct vop *vop)
return PTR_ERR(vop->dclk);
}
- ret = clk_prepare(vop->hclk);
- if (ret < 0) {
- dev_err(vop->dev, "failed to prepare hclk\n");
- return ret;
- }
-
ret = clk_prepare(vop->dclk);
if (ret < 0) {
dev_err(vop->dev, "failed to prepare dclk\n");
- goto err_unprepare_hclk;
+ return ret;
}
- ret = clk_prepare(vop->aclk);
+ /* Enable both the hclk and aclk to setup the vop */
+ ret = clk_prepare_enable(vop->hclk);
if (ret < 0) {
- dev_err(vop->dev, "failed to prepare aclk\n");
+ dev_err(vop->dev, "failed to prepare/enable hclk\n");
goto err_unprepare_dclk;
}
- /*
- * enable hclk, so that we can config vop register.
- */
- ret = clk_enable(vop->hclk);
+ ret = clk_prepare_enable(vop->aclk);
if (ret < 0) {
- dev_err(vop->dev, "failed to prepare aclk\n");
- goto err_unprepare_aclk;
+ dev_err(vop->dev, "failed to prepare/enable aclk\n");
+ goto err_disable_hclk;
}
+
/*
* do hclk_reset, reset all vop registers.
*/
@@ -1608,7 +1602,7 @@ static int vop_initial(struct vop *vop)
if (IS_ERR(ahb_rst)) {
dev_err(vop->dev, "failed to get ahb reset\n");
ret = PTR_ERR(ahb_rst);
- goto err_disable_hclk;
+ goto err_disable_aclk;
}
reset_control_assert(ahb_rst);
usleep_range(10, 20);
@@ -1634,26 +1628,25 @@ static int vop_initial(struct vop *vop)
if (IS_ERR(vop->dclk_rst)) {
dev_err(vop->dev, "failed to get dclk reset\n");
ret = PTR_ERR(vop->dclk_rst);
- goto err_unprepare_aclk;
+ goto err_disable_aclk;
}
reset_control_assert(vop->dclk_rst);
usleep_range(10, 20);
reset_control_deassert(vop->dclk_rst);
clk_disable(vop->hclk);
+ clk_disable(vop->aclk);
vop->is_enabled = false;
return 0;
+err_disable_aclk:
+ clk_disable_unprepare(vop->aclk);
err_disable_hclk:
- clk_disable(vop->hclk);
-err_unprepare_aclk:
- clk_unprepare(vop->aclk);
+ clk_disable_unprepare(vop->hclk);
err_unprepare_dclk:
clk_unprepare(vop->dclk);
-err_unprepare_hclk:
- clk_unprepare(vop->hclk);
return ret;
}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index e9272b0a8592..b80802f55143 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -613,7 +613,7 @@ int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
encoder->possible_crtcs = 1;
ret = drm_encoder_init(sdev->ddev, encoder, &encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_LVDS, NULL);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 3ae09dcd4fd8..de11c7cfb02f 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -367,7 +367,7 @@ int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
int res;
res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
- &sti_crtc_funcs);
+ &sti_crtc_funcs, NULL);
if (res) {
DRM_ERROR("Can't initialze CRTC\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index dd1032195051..807863106b8d 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -272,7 +272,7 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
&sti_plane_helpers_funcs,
cursor_supported_formats,
ARRAY_SIZE(cursor_supported_formats),
- DRM_PLANE_TYPE_CURSOR);
+ DRM_PLANE_TYPE_CURSOR, NULL);
if (res) {
DRM_ERROR("Failed to initialize universal plane\n");
goto err_plane;
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index c85dc7d6b005..f9a1d92c9d95 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -630,7 +630,7 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
&sti_plane_helpers_funcs,
gdp_supported_formats,
ARRAY_SIZE(gdp_supported_formats),
- type);
+ type, NULL);
if (res) {
DRM_ERROR("Failed to initialize universal plane\n");
goto err;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index d735daccd458..49cce833f2c8 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -543,8 +543,6 @@ static int sti_hda_connector_get_modes(struct drm_connector *connector)
count++;
}
- drm_mode_sort(&connector->modes);
-
return count;
}
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index ea0690bc77d5..43861b52261d 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -973,7 +973,7 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
&sti_plane_helpers_funcs,
hqvdp_supported_formats,
ARRAY_SIZE(hqvdp_supported_formats),
- DRM_PLANE_TYPE_OVERLAY);
+ DRM_PLANE_TYPE_OVERLAY, NULL);
if (res) {
DRM_ERROR("Failed to initialize universal plane\n");
return NULL;
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index c8a4c5dae2b6..f2afcf5438b8 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -512,7 +512,8 @@ sti_tvout_create_dvo_encoder(struct drm_device *dev,
drm_encoder->possible_clones = 1 << 0;
drm_encoder_init(dev, drm_encoder,
- &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_LVDS);
+ &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_LVDS,
+ NULL);
drm_encoder_helper_add(drm_encoder, &sti_dvo_encoder_helper_funcs);
@@ -564,7 +565,7 @@ static struct drm_encoder *sti_tvout_create_hda_encoder(struct drm_device *dev,
drm_encoder->possible_clones = 1 << 0;
drm_encoder_init(dev, drm_encoder,
- &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_DAC);
+ &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL);
drm_encoder_helper_add(drm_encoder, &sti_hda_encoder_helper_funcs);
@@ -613,7 +614,7 @@ static struct drm_encoder *sti_tvout_create_hdmi_encoder(struct drm_device *dev,
drm_encoder->possible_clones = 1 << 1;
drm_encoder_init(dev, drm_encoder,
- &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_TMDS);
+ &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(drm_encoder, &sti_hdmi_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index e9f24a85a103..dde6f208c347 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -660,7 +660,8 @@ static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm,
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_primary_plane_funcs, formats,
- num_formats, DRM_PLANE_TYPE_PRIMARY);
+ num_formats, DRM_PLANE_TYPE_PRIMARY,
+ NULL);
if (err < 0) {
kfree(plane);
return ERR_PTR(err);
@@ -827,7 +828,8 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
&tegra_cursor_plane_funcs, formats,
- num_formats, DRM_PLANE_TYPE_CURSOR);
+ num_formats, DRM_PLANE_TYPE_CURSOR,
+ NULL);
if (err < 0) {
kfree(plane);
return ERR_PTR(err);
@@ -890,7 +892,8 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
&tegra_overlay_plane_funcs, formats,
- num_formats, DRM_PLANE_TYPE_OVERLAY);
+ num_formats, DRM_PLANE_TYPE_OVERLAY,
+ NULL);
if (err < 0) {
kfree(plane);
return ERR_PTR(err);
@@ -1732,7 +1735,7 @@ static int tegra_dc_init(struct host1x_client *client)
}
err = drm_crtc_init_with_planes(drm, &dc->base, primary, cursor,
- &tegra_crtc_funcs);
+ &tegra_crtc_funcs, NULL);
if (err < 0)
goto cleanup;
@@ -1952,8 +1955,10 @@ static int tegra_dc_parse_dt(struct tegra_dc *dc)
* cases where only a single display controller is used.
*/
for_each_matching_node(np, tegra_dc_of_match) {
- if (np == dc->dev->of_node)
+ if (np == dc->dev->of_node) {
+ of_node_put(np);
break;
+ }
value++;
}
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 6aecb6647313..b24a0f14821a 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -436,7 +436,7 @@ struct platform_driver tegra_dpaux_driver = {
.remove = tegra_dpaux_remove,
};
-struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np)
+struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np)
{
struct tegra_dpaux *dpaux;
@@ -445,7 +445,7 @@ struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np)
list_for_each_entry(dpaux, &dpaux_list, list)
if (np == dpaux->dev->of_node) {
mutex_unlock(&dpaux_lock);
- return dpaux;
+ return &dpaux->aux;
}
mutex_unlock(&dpaux_lock);
@@ -453,8 +453,9 @@ struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np)
return NULL;
}
-int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output)
+int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output)
{
+ struct tegra_dpaux *dpaux = to_dpaux(aux);
unsigned long timeout;
int err;
@@ -470,7 +471,7 @@ int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output)
while (time_before(jiffies, timeout)) {
enum drm_connector_status status;
- status = tegra_dpaux_detect(dpaux);
+ status = drm_dp_aux_detect(aux);
if (status == connector_status_connected) {
enable_irq(dpaux->irq);
return 0;
@@ -482,8 +483,9 @@ int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output)
return -ETIMEDOUT;
}
-int tegra_dpaux_detach(struct tegra_dpaux *dpaux)
+int drm_dp_aux_detach(struct drm_dp_aux *aux)
{
+ struct tegra_dpaux *dpaux = to_dpaux(aux);
unsigned long timeout;
int err;
@@ -498,7 +500,7 @@ int tegra_dpaux_detach(struct tegra_dpaux *dpaux)
while (time_before(jiffies, timeout)) {
enum drm_connector_status status;
- status = tegra_dpaux_detect(dpaux);
+ status = drm_dp_aux_detect(aux);
if (status == connector_status_disconnected) {
dpaux->output = NULL;
return 0;
@@ -510,8 +512,9 @@ int tegra_dpaux_detach(struct tegra_dpaux *dpaux)
return -ETIMEDOUT;
}
-enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux)
+enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux)
{
+ struct tegra_dpaux *dpaux = to_dpaux(aux);
u32 value;
value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXSTAT);
@@ -522,8 +525,9 @@ enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux)
return connector_status_disconnected;
}
-int tegra_dpaux_enable(struct tegra_dpaux *dpaux)
+int drm_dp_aux_enable(struct drm_dp_aux *aux)
{
+ struct tegra_dpaux *dpaux = to_dpaux(aux);
u32 value;
value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
@@ -540,8 +544,9 @@ int tegra_dpaux_enable(struct tegra_dpaux *dpaux)
return 0;
}
-int tegra_dpaux_disable(struct tegra_dpaux *dpaux)
+int drm_dp_aux_disable(struct drm_dp_aux *aux)
{
+ struct tegra_dpaux *dpaux = to_dpaux(aux);
u32 value;
value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
@@ -551,11 +556,11 @@ int tegra_dpaux_disable(struct tegra_dpaux *dpaux)
return 0;
}
-int tegra_dpaux_prepare(struct tegra_dpaux *dpaux, u8 encoding)
+int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding)
{
int err;
- err = drm_dp_dpcd_writeb(&dpaux->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
+ err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
encoding);
if (err < 0)
return err;
@@ -563,15 +568,15 @@ int tegra_dpaux_prepare(struct tegra_dpaux *dpaux, u8 encoding)
return 0;
}
-int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link,
- u8 pattern)
+int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
+ u8 pattern)
{
u8 tp = pattern & DP_TRAINING_PATTERN_MASK;
u8 status[DP_LINK_STATUS_SIZE], values[4];
unsigned int i;
int err;
- err = drm_dp_dpcd_writeb(&dpaux->aux, DP_TRAINING_PATTERN_SET, pattern);
+ err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern);
if (err < 0)
return err;
@@ -584,14 +589,14 @@ int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link,
DP_TRAIN_MAX_SWING_REACHED |
DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
- err = drm_dp_dpcd_write(&dpaux->aux, DP_TRAINING_LANE0_SET, values,
+ err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values,
link->num_lanes);
if (err < 0)
return err;
usleep_range(500, 1000);
- err = drm_dp_dpcd_read_link_status(&dpaux->aux, status);
+ err = drm_dp_dpcd_read_link_status(aux, status);
if (err < 0)
return err;
@@ -609,11 +614,11 @@ int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link,
break;
default:
- dev_err(dpaux->dev, "unsupported training pattern %u\n", tp);
+ dev_err(aux->dev, "unsupported training pattern %u\n", tp);
return -EINVAL;
}
- err = drm_dp_dpcd_writeb(&dpaux->aux, DP_EDP_CONFIGURATION_SET, 0);
+ err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, 0);
if (err < 0)
return err;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index e0f827790a5e..c5c856a0879d 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -137,8 +137,8 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
start = geometry->aperture_start;
end = geometry->aperture_end;
- DRM_DEBUG("IOMMU context initialized (aperture: %#llx-%#llx)\n",
- start, end);
+ DRM_DEBUG_DRIVER("IOMMU aperture initialized (%#llx-%#llx)\n",
+ start, end);
drm_mm_init(&tegra->mm, start, end - start + 1);
}
@@ -277,9 +277,7 @@ host1x_bo_lookup(struct drm_device *drm, struct drm_file *file, u32 handle)
if (!gem)
return NULL;
- mutex_lock(&drm->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&drm->struct_mutex);
+ drm_gem_object_unreference_unlocked(gem);
bo = to_tegra_bo(gem);
return &bo->base;
@@ -473,7 +471,7 @@ static int tegra_gem_mmap(struct drm_device *drm, void *data,
args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
- drm_gem_object_unreference(gem);
+ drm_gem_object_unreference_unlocked(gem);
return 0;
}
@@ -683,7 +681,7 @@ static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
bo->tiling.mode = mode;
bo->tiling.value = value;
- drm_gem_object_unreference(gem);
+ drm_gem_object_unreference_unlocked(gem);
return 0;
}
@@ -723,7 +721,7 @@ static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
break;
}
- drm_gem_object_unreference(gem);
+ drm_gem_object_unreference_unlocked(gem);
return err;
}
@@ -748,7 +746,7 @@ static int tegra_gem_set_flags(struct drm_device *drm, void *data,
if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
bo->flags |= TEGRA_BO_BOTTOM_UP;
- drm_gem_object_unreference(gem);
+ drm_gem_object_unreference_unlocked(gem);
return 0;
}
@@ -770,7 +768,7 @@ static int tegra_gem_get_flags(struct drm_device *drm, void *data,
if (bo->flags & TEGRA_BO_BOTTOM_UP)
args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
- drm_gem_object_unreference(gem);
+ drm_gem_object_unreference_unlocked(gem);
return 0;
}
@@ -921,7 +919,8 @@ static void tegra_debugfs_cleanup(struct drm_minor *minor)
#endif
static struct drm_driver tegra_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_ATOMIC,
.load = tegra_drm_load,
.unload = tegra_drm_unload,
.open = tegra_drm_open,
@@ -991,7 +990,6 @@ static int host1x_drm_probe(struct host1x_device *dev)
if (!drm)
return -ENOMEM;
- drm_dev_set_unique(drm, dev_name(&dev->dev));
dev_set_drvdata(&dev->dev, drm);
err = drm_dev_register(drm, 0);
@@ -1023,8 +1021,17 @@ static int host1x_drm_remove(struct host1x_device *dev)
static int host1x_drm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
+ struct tegra_drm *tegra = drm->dev_private;
drm_kms_helper_poll_disable(drm);
+ tegra_drm_fb_suspend(drm);
+
+ tegra->state = drm_atomic_helper_suspend(drm);
+ if (IS_ERR(tegra->state)) {
+ tegra_drm_fb_resume(drm);
+ drm_kms_helper_poll_enable(drm);
+ return PTR_ERR(tegra->state);
+ }
return 0;
}
@@ -1032,7 +1039,10 @@ static int host1x_drm_suspend(struct device *dev)
static int host1x_drm_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
+ struct tegra_drm *tegra = drm->dev_private;
+ drm_atomic_helper_resume(drm, tegra->state);
+ tegra_drm_fb_resume(drm);
drm_kms_helper_poll_enable(drm);
return 0;
@@ -1076,6 +1086,16 @@ static struct host1x_driver host1x_drm_driver = {
.subdevs = host1x_drm_subdevs,
};
+static struct platform_driver * const drivers[] = {
+ &tegra_dc_driver,
+ &tegra_hdmi_driver,
+ &tegra_dsi_driver,
+ &tegra_dpaux_driver,
+ &tegra_sor_driver,
+ &tegra_gr2d_driver,
+ &tegra_gr3d_driver,
+};
+
static int __init host1x_drm_init(void)
{
int err;
@@ -1084,48 +1104,12 @@ static int __init host1x_drm_init(void)
if (err < 0)
return err;
- err = platform_driver_register(&tegra_dc_driver);
+ err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
if (err < 0)
goto unregister_host1x;
- err = platform_driver_register(&tegra_dsi_driver);
- if (err < 0)
- goto unregister_dc;
-
- err = platform_driver_register(&tegra_sor_driver);
- if (err < 0)
- goto unregister_dsi;
-
- err = platform_driver_register(&tegra_hdmi_driver);
- if (err < 0)
- goto unregister_sor;
-
- err = platform_driver_register(&tegra_dpaux_driver);
- if (err < 0)
- goto unregister_hdmi;
-
- err = platform_driver_register(&tegra_gr2d_driver);
- if (err < 0)
- goto unregister_dpaux;
-
- err = platform_driver_register(&tegra_gr3d_driver);
- if (err < 0)
- goto unregister_gr2d;
-
return 0;
-unregister_gr2d:
- platform_driver_unregister(&tegra_gr2d_driver);
-unregister_dpaux:
- platform_driver_unregister(&tegra_dpaux_driver);
-unregister_hdmi:
- platform_driver_unregister(&tegra_hdmi_driver);
-unregister_sor:
- platform_driver_unregister(&tegra_sor_driver);
-unregister_dsi:
- platform_driver_unregister(&tegra_dsi_driver);
-unregister_dc:
- platform_driver_unregister(&tegra_dc_driver);
unregister_host1x:
host1x_driver_unregister(&host1x_drm_driver);
return err;
@@ -1134,13 +1118,7 @@ module_init(host1x_drm_init);
static void __exit host1x_drm_exit(void)
{
- platform_driver_unregister(&tegra_gr3d_driver);
- platform_driver_unregister(&tegra_gr2d_driver);
- platform_driver_unregister(&tegra_dpaux_driver);
- platform_driver_unregister(&tegra_hdmi_driver);
- platform_driver_unregister(&tegra_sor_driver);
- platform_driver_unregister(&tegra_dsi_driver);
- platform_driver_unregister(&tegra_dc_driver);
+ platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
host1x_driver_unregister(&host1x_drm_driver);
}
module_exit(host1x_drm_exit);
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index d88a2d18c1a4..c088f2f67eda 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -57,6 +57,8 @@ struct tegra_drm {
struct work_struct work;
struct mutex lock;
} commit;
+
+ struct drm_atomic_state *state;
};
struct tegra_drm_client;
@@ -247,18 +249,17 @@ void tegra_output_connector_destroy(struct drm_connector *connector);
void tegra_output_encoder_destroy(struct drm_encoder *encoder);
/* from dpaux.c */
-struct tegra_dpaux;
struct drm_dp_link;
-struct tegra_dpaux *tegra_dpaux_find_by_of_node(struct device_node *np);
-enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux);
-int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output);
-int tegra_dpaux_detach(struct tegra_dpaux *dpaux);
-int tegra_dpaux_enable(struct tegra_dpaux *dpaux);
-int tegra_dpaux_disable(struct tegra_dpaux *dpaux);
-int tegra_dpaux_prepare(struct tegra_dpaux *dpaux, u8 encoding);
-int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link,
- u8 pattern);
+struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np);
+enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux);
+int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output);
+int drm_dp_aux_detach(struct drm_dp_aux *aux);
+int drm_dp_aux_enable(struct drm_dp_aux *aux);
+int drm_dp_aux_disable(struct drm_dp_aux *aux);
+int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding);
+int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
+ u8 pattern);
/* from fb.c */
struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
@@ -273,16 +274,18 @@ int tegra_drm_fb_prepare(struct drm_device *drm);
void tegra_drm_fb_free(struct drm_device *drm);
int tegra_drm_fb_init(struct drm_device *drm);
void tegra_drm_fb_exit(struct drm_device *drm);
+void tegra_drm_fb_suspend(struct drm_device *drm);
+void tegra_drm_fb_resume(struct drm_device *drm);
#ifdef CONFIG_DRM_FBDEV_EMULATION
void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
void tegra_fb_output_poll_changed(struct drm_device *drm);
#endif
extern struct platform_driver tegra_dc_driver;
-extern struct platform_driver tegra_dsi_driver;
-extern struct platform_driver tegra_sor_driver;
extern struct platform_driver tegra_hdmi_driver;
+extern struct platform_driver tegra_dsi_driver;
extern struct platform_driver tegra_dpaux_driver;
+extern struct platform_driver tegra_sor_driver;
extern struct platform_driver tegra_gr2d_driver;
extern struct platform_driver tegra_gr3d_driver;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index f0a138ef68ce..50d46ae3786b 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1023,7 +1023,7 @@ static int tegra_dsi_init(struct host1x_client *client)
drm_encoder_init(drm, &dsi->output.encoder,
&tegra_dsi_encoder_funcs,
- DRM_MODE_ENCODER_DSI);
+ DRM_MODE_ENCODER_DSI, NULL);
drm_encoder_helper_add(&dsi->output.encoder,
&tegra_dsi_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index ede9e94f3312..ca84de9ccb51 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -10,6 +10,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/console.h>
+
#include "drm.h"
#include "gem.h"
@@ -86,7 +88,7 @@ static int tegra_fb_create_handle(struct drm_framebuffer *framebuffer,
return drm_gem_handle_create(file, &fb->planes[0]->gem, handle);
}
-static struct drm_framebuffer_funcs tegra_fb_funcs = {
+static const struct drm_framebuffer_funcs tegra_fb_funcs = {
.destroy = tegra_fb_destroy,
.create_handle = tegra_fb_create_handle,
};
@@ -413,3 +415,25 @@ void tegra_drm_fb_exit(struct drm_device *drm)
tegra_fbdev_exit(tegra->fbdev);
#endif
}
+
+void tegra_drm_fb_suspend(struct drm_device *drm)
+{
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ struct tegra_drm *tegra = drm->dev_private;
+
+ console_lock();
+ drm_fb_helper_set_suspend(&tegra->fbdev->base, 1);
+ console_unlock();
+#endif
+}
+
+void tegra_drm_fb_resume(struct drm_device *drm)
+{
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ struct tegra_drm *tegra = drm->dev_private;
+
+ console_lock();
+ drm_fb_helper_set_suspend(&tegra->fbdev->base, 0);
+ console_unlock();
+#endif
+}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 01e16e146bfe..33add93b4ed9 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -28,11 +28,8 @@ static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
static void tegra_bo_put(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- struct drm_device *drm = obj->gem.dev;
- mutex_lock(&drm->struct_mutex);
- drm_gem_object_unreference(&obj->gem);
- mutex_unlock(&drm->struct_mutex);
+ drm_gem_object_unreference_unlocked(&obj->gem);
}
static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
@@ -72,11 +69,8 @@ static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- struct drm_device *drm = obj->gem.dev;
- mutex_lock(&drm->struct_mutex);
drm_gem_object_reference(&obj->gem);
- mutex_unlock(&drm->struct_mutex);
return bo;
}
@@ -408,12 +402,9 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
struct drm_gem_object *gem;
struct tegra_bo *bo;
- mutex_lock(&drm->struct_mutex);
-
gem = drm_gem_object_lookup(drm, file, handle);
if (!gem) {
dev_err(drm->dev, "failed to lookup GEM object\n");
- mutex_unlock(&drm->struct_mutex);
return -EINVAL;
}
@@ -421,9 +412,7 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
*offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
- drm_gem_object_unreference(gem);
-
- mutex_unlock(&drm->struct_mutex);
+ drm_gem_object_unreference_unlocked(gem);
return 0;
}
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 52b32cbd9de6..b7ef4929e347 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1320,7 +1320,7 @@ static int tegra_hdmi_init(struct host1x_client *client)
hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF;
drm_encoder_init(drm, &hdmi->output.encoder, &tegra_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(&hdmi->output.encoder,
&tegra_hdmi_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index bc9735b4ad60..e246334e0252 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -287,7 +287,7 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
output->connector.dpms = DRM_MODE_DPMS_OFF;
drm_encoder_init(drm, &output->encoder, &tegra_rgb_encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_LVDS, NULL);
drm_encoder_helper_add(&output->encoder,
&tegra_rgb_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 3eff7cf75d25..757c6e8603af 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -173,7 +173,7 @@ struct tegra_sor {
struct clk *clk_dp;
struct clk *clk;
- struct tegra_dpaux *dpaux;
+ struct drm_dp_aux *aux;
struct drm_info_list *debugfs_files;
struct drm_minor *minor;
@@ -273,7 +273,7 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
- err = tegra_dpaux_prepare(sor->dpaux, DP_SET_ANSI_8B10B);
+ err = drm_dp_aux_prepare(sor->aux, DP_SET_ANSI_8B10B);
if (err < 0)
return err;
@@ -288,7 +288,7 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
pattern = DP_TRAINING_PATTERN_1;
- err = tegra_dpaux_train(sor->dpaux, link, pattern);
+ err = drm_dp_aux_train(sor->aux, link, pattern);
if (err < 0)
return err;
@@ -309,7 +309,7 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
pattern = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_2;
- err = tegra_dpaux_train(sor->dpaux, link, pattern);
+ err = drm_dp_aux_train(sor->aux, link, pattern);
if (err < 0)
return err;
@@ -324,7 +324,7 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
pattern = DP_TRAINING_PATTERN_DISABLE;
- err = tegra_dpaux_train(sor->dpaux, link, pattern);
+ err = drm_dp_aux_train(sor->aux, link, pattern);
if (err < 0)
return err;
@@ -1044,8 +1044,8 @@ tegra_sor_connector_detect(struct drm_connector *connector, bool force)
struct tegra_output *output = connector_to_output(connector);
struct tegra_sor *sor = to_sor(output);
- if (sor->dpaux)
- return tegra_dpaux_detect(sor->dpaux);
+ if (sor->aux)
+ return drm_dp_aux_detect(sor->aux);
return tegra_output_connector_detect(connector, force);
}
@@ -1066,13 +1066,13 @@ static int tegra_sor_connector_get_modes(struct drm_connector *connector)
struct tegra_sor *sor = to_sor(output);
int err;
- if (sor->dpaux)
- tegra_dpaux_enable(sor->dpaux);
+ if (sor->aux)
+ drm_dp_aux_enable(sor->aux);
err = tegra_output_connector_get_modes(connector);
- if (sor->dpaux)
- tegra_dpaux_disable(sor->dpaux);
+ if (sor->aux)
+ drm_dp_aux_disable(sor->aux);
return err;
}
@@ -1128,8 +1128,8 @@ static void tegra_sor_edp_disable(struct drm_encoder *encoder)
if (err < 0)
dev_err(sor->dev, "failed to power down SOR: %d\n", err);
- if (sor->dpaux) {
- err = tegra_dpaux_disable(sor->dpaux);
+ if (sor->aux) {
+ err = drm_dp_aux_disable(sor->aux);
if (err < 0)
dev_err(sor->dev, "failed to disable DP: %d\n", err);
}
@@ -1196,7 +1196,7 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
struct tegra_sor *sor = to_sor(output);
struct tegra_sor_config config;
struct drm_dp_link link;
- struct drm_dp_aux *aux;
+ u8 rate, lanes;
int err = 0;
u32 value;
@@ -1209,20 +1209,14 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
if (output->panel)
drm_panel_prepare(output->panel);
- /* FIXME: properly convert to struct drm_dp_aux */
- aux = (struct drm_dp_aux *)sor->dpaux;
-
- if (sor->dpaux) {
- err = tegra_dpaux_enable(sor->dpaux);
- if (err < 0)
- dev_err(sor->dev, "failed to enable DP: %d\n", err);
+ err = drm_dp_aux_enable(sor->aux);
+ if (err < 0)
+ dev_err(sor->dev, "failed to enable DP: %d\n", err);
- err = drm_dp_link_probe(aux, &link);
- if (err < 0) {
- dev_err(sor->dev, "failed to probe eDP link: %d\n",
- err);
- return;
- }
+ err = drm_dp_link_probe(sor->aux, &link);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to probe eDP link: %d\n", err);
+ return;
}
err = clk_set_parent(sor->clk, sor->clk_safe);
@@ -1434,60 +1428,51 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
value |= SOR_DP_PADCTL_PAD_CAL_PD;
tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
- if (sor->dpaux) {
- u8 rate, lanes;
-
- err = drm_dp_link_probe(aux, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to probe eDP link: %d\n",
- err);
+ err = drm_dp_link_probe(sor->aux, &link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to probe eDP link: %d\n", err);
- err = drm_dp_link_power_up(aux, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to power up eDP link: %d\n",
- err);
+ err = drm_dp_link_power_up(sor->aux, &link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power up eDP link: %d\n", err);
- err = drm_dp_link_configure(aux, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to configure eDP link: %d\n",
- err);
+ err = drm_dp_link_configure(sor->aux, &link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to configure eDP link: %d\n", err);
- rate = drm_dp_link_rate_to_bw_code(link.rate);
- lanes = link.num_lanes;
+ rate = drm_dp_link_rate_to_bw_code(link.rate);
+ lanes = link.num_lanes;
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
- value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
+ value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
- value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
- value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+ value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
+ value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
- if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
- value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
+ if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
- /* disable training pattern generator */
+ /* disable training pattern generator */
- for (i = 0; i < link.num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_GALIOS |
- SOR_DP_TPG_PATTERN_NONE;
- value = (value << 8) | lane;
- }
+ for (i = 0; i < link.num_lanes; i++) {
+ unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
+ SOR_DP_TPG_SCRAMBLER_GALIOS |
+ SOR_DP_TPG_PATTERN_NONE;
+ value = (value << 8) | lane;
+ }
- tegra_sor_writel(sor, value, SOR_DP_TPG);
+ tegra_sor_writel(sor, value, SOR_DP_TPG);
- err = tegra_sor_dp_train_fast(sor, &link);
- if (err < 0) {
- dev_err(sor->dev, "DP fast link training failed: %d\n",
- err);
- }
+ err = tegra_sor_dp_train_fast(sor, &link);
+ if (err < 0)
+ dev_err(sor->dev, "DP fast link training failed: %d\n", err);
- dev_dbg(sor->dev, "fast link training succeeded\n");
- }
+ dev_dbg(sor->dev, "fast link training succeeded\n");
err = tegra_sor_power_up(sor, 250);
if (err < 0)
@@ -1961,9 +1946,9 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
/* production settings */
settings = tegra_sor_hdmi_find_settings(sor, mode->clock * 1000);
- if (IS_ERR(settings)) {
- dev_err(sor->dev, "no settings for pixel clock %d Hz: %ld\n",
- mode->clock * 1000, PTR_ERR(settings));
+ if (!settings) {
+ dev_err(sor->dev, "no settings for pixel clock %d Hz\n",
+ mode->clock * 1000);
return;
}
@@ -2148,7 +2133,7 @@ static int tegra_sor_init(struct host1x_client *client)
int encoder = DRM_MODE_ENCODER_NONE;
int err;
- if (!sor->dpaux) {
+ if (!sor->aux) {
if (sor->soc->supports_hdmi) {
connector = DRM_MODE_CONNECTOR_HDMIA;
encoder = DRM_MODE_ENCODER_TMDS;
@@ -2178,7 +2163,7 @@ static int tegra_sor_init(struct host1x_client *client)
sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs,
- encoder);
+ encoder, NULL);
drm_encoder_helper_add(&sor->output.encoder, helpers);
drm_mode_connector_attach_encoder(&sor->output.connector,
@@ -2199,8 +2184,8 @@ static int tegra_sor_init(struct host1x_client *client)
dev_err(sor->dev, "debugfs setup failed: %d\n", err);
}
- if (sor->dpaux) {
- err = tegra_dpaux_attach(sor->dpaux, &sor->output);
+ if (sor->aux) {
+ err = drm_dp_aux_attach(sor->aux, &sor->output);
if (err < 0) {
dev_err(sor->dev, "failed to attach DP: %d\n", err);
return err;
@@ -2249,8 +2234,8 @@ static int tegra_sor_exit(struct host1x_client *client)
tegra_output_exit(&sor->output);
- if (sor->dpaux) {
- err = tegra_dpaux_detach(sor->dpaux);
+ if (sor->aux) {
+ err = drm_dp_aux_detach(sor->aux);
if (err < 0) {
dev_err(sor->dev, "failed to detach DP: %d\n", err);
return err;
@@ -2399,14 +2384,14 @@ static int tegra_sor_probe(struct platform_device *pdev)
np = of_parse_phandle(pdev->dev.of_node, "nvidia,dpaux", 0);
if (np) {
- sor->dpaux = tegra_dpaux_find_by_of_node(np);
+ sor->aux = drm_dp_aux_find_by_of_node(np);
of_node_put(np);
- if (!sor->dpaux)
+ if (!sor->aux)
return -EPROBE_DEFER;
}
- if (!sor->dpaux) {
+ if (!sor->aux) {
if (sor->soc->supports_hdmi) {
sor->ops = &tegra_sor_hdmi_ops;
} else if (sor->soc->supports_lvds) {
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 0af8bed7ce1e..4dda6e2f464b 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -128,7 +128,7 @@ static struct drm_encoder *panel_encoder_create(struct drm_device *dev,
encoder->possible_crtcs = 1;
ret = drm_encoder_init(dev, encoder, &panel_encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
+ DRM_MODE_ENCODER_LVDS, NULL);
if (ret < 0)
goto fail;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 354c47ca6374..5052a8af7ecb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -138,7 +138,7 @@ static struct drm_encoder *tfp410_encoder_create(struct drm_device *dev,
encoder->possible_crtcs = 1;
ret = drm_encoder_init(dev, encoder, &tfp410_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
if (ret < 0)
goto fail;
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 0110d95522f3..4709b54c204c 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -122,13 +122,13 @@ static void udl_connector_destroy(struct drm_connector *connector)
kfree(connector);
}
-static struct drm_connector_helper_funcs udl_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
.get_modes = udl_get_modes,
.mode_valid = udl_mode_valid,
.best_encoder = udl_best_single_encoder,
};
-static struct drm_connector_funcs udl_connector_funcs = {
+static const struct drm_connector_funcs udl_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = udl_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c
index 4052c4656498..a181a647fcf9 100644
--- a/drivers/gpu/drm/udl/udl_encoder.c
+++ b/drivers/gpu/drm/udl/udl_encoder.c
@@ -73,7 +73,8 @@ struct drm_encoder *udl_encoder_init(struct drm_device *dev)
if (!encoder)
return NULL;
- drm_encoder_init(dev, encoder, &udl_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_init(dev, encoder, &udl_enc_funcs, DRM_MODE_ENCODER_TMDS,
+ NULL);
drm_encoder_helper_add(encoder, &udl_helper_funcs);
encoder->possible_crtcs = 1;
return encoder;
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 677190a65e82..160ef2a08b89 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -400,7 +400,7 @@ static void udl_crtc_commit(struct drm_crtc *crtc)
udl_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
}
-static struct drm_crtc_helper_funcs udl_helper_funcs = {
+static const struct drm_crtc_helper_funcs udl_helper_funcs = {
.dpms = udl_crtc_dpms,
.mode_fixup = udl_crtc_mode_fixup,
.mode_set = udl_crtc_mode_set,
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index 32b4f9cd8f52..4c6a99f0398c 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -8,10 +8,19 @@ vc4-y := \
vc4_crtc.o \
vc4_drv.o \
vc4_kms.o \
+ vc4_gem.o \
vc4_hdmi.o \
vc4_hvs.o \
- vc4_plane.o
+ vc4_irq.o \
+ vc4_plane.o \
+ vc4_render_cl.o \
+ vc4_trace_points.o \
+ vc4_v3d.o \
+ vc4_validate.o \
+ vc4_validate_shaders.o
vc4-$(CONFIG_DEBUG_FS) += vc4_debugfs.o
obj-$(CONFIG_DRM_VC4) += vc4.o
+
+CFLAGS_vc4_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index ab9f5108ae1a..18dfe3ec9a62 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -12,19 +12,236 @@
* access to system memory with no MMU in between. To support it, we
* use the GEM CMA helper functions to allocate contiguous ranges of
* physical memory for our BOs.
+ *
+ * Since the CMA allocator is very slow, we keep a cache of recently
+ * freed BOs around so that the kernel's allocation of objects for 3D
+ * rendering can return quickly.
*/
#include "vc4_drv.h"
+#include "uapi/drm/vc4_drm.h"
+
+static void vc4_bo_stats_dump(struct vc4_dev *vc4)
+{
+ DRM_INFO("num bos allocated: %d\n",
+ vc4->bo_stats.num_allocated);
+ DRM_INFO("size bos allocated: %dkb\n",
+ vc4->bo_stats.size_allocated / 1024);
+ DRM_INFO("num bos used: %d\n",
+ vc4->bo_stats.num_allocated - vc4->bo_stats.num_cached);
+ DRM_INFO("size bos used: %dkb\n",
+ (vc4->bo_stats.size_allocated -
+ vc4->bo_stats.size_cached) / 1024);
+ DRM_INFO("num bos cached: %d\n",
+ vc4->bo_stats.num_cached);
+ DRM_INFO("size bos cached: %dkb\n",
+ vc4->bo_stats.size_cached / 1024);
+}
+
+#ifdef CONFIG_DEBUG_FS
+int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_bo_stats stats;
+
+ /* Take a snapshot of the current stats with the lock held. */
+ mutex_lock(&vc4->bo_lock);
+ stats = vc4->bo_stats;
+ mutex_unlock(&vc4->bo_lock);
+
+ seq_printf(m, "num bos allocated: %d\n",
+ stats.num_allocated);
+ seq_printf(m, "size bos allocated: %dkb\n",
+ stats.size_allocated / 1024);
+ seq_printf(m, "num bos used: %d\n",
+ stats.num_allocated - stats.num_cached);
+ seq_printf(m, "size bos used: %dkb\n",
+ (stats.size_allocated - stats.size_cached) / 1024);
+ seq_printf(m, "num bos cached: %d\n",
+ stats.num_cached);
+ seq_printf(m, "size bos cached: %dkb\n",
+ stats.size_cached / 1024);
+
+ return 0;
+}
+#endif
+
+static uint32_t bo_page_index(size_t size)
+{
+ return (size / PAGE_SIZE) - 1;
+}
+
+/* Must be called with bo_lock held. */
+static void vc4_bo_destroy(struct vc4_bo *bo)
+{
+ struct drm_gem_object *obj = &bo->base.base;
+ struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
+
+ if (bo->validated_shader) {
+ kfree(bo->validated_shader->texture_samples);
+ kfree(bo->validated_shader);
+ bo->validated_shader = NULL;
+ }
+
+ vc4->bo_stats.num_allocated--;
+ vc4->bo_stats.size_allocated -= obj->size;
+ drm_gem_cma_free_object(obj);
+}
+
+/* Must be called with bo_lock held. */
+static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
+{
+ struct drm_gem_object *obj = &bo->base.base;
+ struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
+
+ vc4->bo_stats.num_cached--;
+ vc4->bo_stats.size_cached -= obj->size;
+
+ list_del(&bo->unref_head);
+ list_del(&bo->size_head);
+}
+
+static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
+ size_t size)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ uint32_t page_index = bo_page_index(size);
+
+ if (vc4->bo_cache.size_list_size <= page_index) {
+ uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
+ page_index + 1);
+ struct list_head *new_list;
+ uint32_t i;
+
+ new_list = kmalloc_array(new_size, sizeof(struct list_head),
+ GFP_KERNEL);
+ if (!new_list)
+ return NULL;
+
+ /* Rebase the old cached BO lists to their new list
+ * head locations.
+ */
+ for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
+ struct list_head *old_list =
+ &vc4->bo_cache.size_list[i];
+
+ if (list_empty(old_list))
+ INIT_LIST_HEAD(&new_list[i]);
+ else
+ list_replace(old_list, &new_list[i]);
+ }
+ /* And initialize the brand new BO list heads. */
+ for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
+ INIT_LIST_HEAD(&new_list[i]);
+
+ kfree(vc4->bo_cache.size_list);
+ vc4->bo_cache.size_list = new_list;
+ vc4->bo_cache.size_list_size = new_size;
+ }
+
+ return &vc4->bo_cache.size_list[page_index];
+}
+
+void vc4_bo_cache_purge(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ mutex_lock(&vc4->bo_lock);
+ while (!list_empty(&vc4->bo_cache.time_list)) {
+ struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
+ struct vc4_bo, unref_head);
+ vc4_bo_remove_from_cache(bo);
+ vc4_bo_destroy(bo);
+ }
+ mutex_unlock(&vc4->bo_lock);
+}
+
+static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
+ uint32_t size)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ uint32_t page_index = bo_page_index(size);
+ struct vc4_bo *bo = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+
+ mutex_lock(&vc4->bo_lock);
+ if (page_index >= vc4->bo_cache.size_list_size)
+ goto out;
-struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size)
+ if (list_empty(&vc4->bo_cache.size_list[page_index]))
+ goto out;
+
+ bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
+ struct vc4_bo, size_head);
+ vc4_bo_remove_from_cache(bo);
+ kref_init(&bo->base.base.refcount);
+
+out:
+ mutex_unlock(&vc4->bo_lock);
+ return bo;
+}
+
+/**
+ * vc4_gem_create_object - Implementation of driver->gem_create_object.
+ *
+ * This lets the CMA helpers allocate object structs for us, and keep
+ * our BO stats correct.
+ */
+struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_bo *bo;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&vc4->bo_lock);
+ vc4->bo_stats.num_allocated++;
+ vc4->bo_stats.size_allocated += size;
+ mutex_unlock(&vc4->bo_lock);
+
+ return &bo->base.base;
+}
+
+struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
+ bool from_cache)
+{
+ size_t size = roundup(unaligned_size, PAGE_SIZE);
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_gem_cma_object *cma_obj;
- cma_obj = drm_gem_cma_create(dev, size);
- if (IS_ERR(cma_obj))
+ if (size == 0)
return NULL;
- else
- return to_vc4_bo(&cma_obj->base);
+
+ /* First, try to get a vc4_bo from the kernel BO cache. */
+ if (from_cache) {
+ struct vc4_bo *bo = vc4_bo_get_from_cache(dev, size);
+
+ if (bo)
+ return bo;
+ }
+
+ cma_obj = drm_gem_cma_create(dev, size);
+ if (IS_ERR(cma_obj)) {
+ /*
+ * If we've run out of CMA memory, kill the cache of
+ * CMA allocations we've got laying around and try again.
+ */
+ vc4_bo_cache_purge(dev);
+
+ cma_obj = drm_gem_cma_create(dev, size);
+ if (IS_ERR(cma_obj)) {
+ DRM_ERROR("Failed to allocate from CMA:\n");
+ vc4_bo_stats_dump(vc4);
+ return NULL;
+ }
+ }
+
+ return to_vc4_bo(&cma_obj->base);
}
int vc4_dumb_create(struct drm_file *file_priv,
@@ -41,7 +258,191 @@ int vc4_dumb_create(struct drm_file *file_priv,
if (args->size < args->pitch * args->height)
args->size = args->pitch * args->height;
- bo = vc4_bo_create(dev, roundup(args->size, PAGE_SIZE));
+ bo = vc4_bo_create(dev, args->size, false);
+ if (!bo)
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
+ drm_gem_object_unreference_unlocked(&bo->base.base);
+
+ return ret;
+}
+
+/* Must be called with bo_lock held. */
+static void vc4_bo_cache_free_old(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
+
+ while (!list_empty(&vc4->bo_cache.time_list)) {
+ struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
+ struct vc4_bo, unref_head);
+ if (time_before(expire_time, bo->free_time)) {
+ mod_timer(&vc4->bo_cache.time_timer,
+ round_jiffies_up(jiffies +
+ msecs_to_jiffies(1000)));
+ return;
+ }
+
+ vc4_bo_remove_from_cache(bo);
+ vc4_bo_destroy(bo);
+ }
+}
+
+/* Called on the last userspace/kernel unreference of the BO. Returns
+ * it to the BO cache if possible, otherwise frees it.
+ *
+ * Note that this is called with the struct_mutex held.
+ */
+void vc4_free_object(struct drm_gem_object *gem_bo)
+{
+ struct drm_device *dev = gem_bo->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_bo *bo = to_vc4_bo(gem_bo);
+ struct list_head *cache_list;
+
+ mutex_lock(&vc4->bo_lock);
+ /* If the object references someone else's memory, we can't cache it.
+ */
+ if (gem_bo->import_attach) {
+ vc4_bo_destroy(bo);
+ goto out;
+ }
+
+ /* Don't cache if it was publicly named. */
+ if (gem_bo->name) {
+ vc4_bo_destroy(bo);
+ goto out;
+ }
+
+ cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
+ if (!cache_list) {
+ vc4_bo_destroy(bo);
+ goto out;
+ }
+
+ if (bo->validated_shader) {
+ kfree(bo->validated_shader->texture_samples);
+ kfree(bo->validated_shader);
+ bo->validated_shader = NULL;
+ }
+
+ bo->free_time = jiffies;
+ list_add(&bo->size_head, cache_list);
+ list_add(&bo->unref_head, &vc4->bo_cache.time_list);
+
+ vc4->bo_stats.num_cached++;
+ vc4->bo_stats.size_cached += gem_bo->size;
+
+ vc4_bo_cache_free_old(dev);
+
+out:
+ mutex_unlock(&vc4->bo_lock);
+}
+
+static void vc4_bo_cache_time_work(struct work_struct *work)
+{
+ struct vc4_dev *vc4 =
+ container_of(work, struct vc4_dev, bo_cache.time_work);
+ struct drm_device *dev = vc4->dev;
+
+ mutex_lock(&vc4->bo_lock);
+ vc4_bo_cache_free_old(dev);
+ mutex_unlock(&vc4->bo_lock);
+}
+
+static void vc4_bo_cache_time_timer(unsigned long data)
+{
+ struct drm_device *dev = (struct drm_device *)data;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ schedule_work(&vc4->bo_cache.time_work);
+}
+
+struct dma_buf *
+vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
+{
+ struct vc4_bo *bo = to_vc4_bo(obj);
+
+ if (bo->validated_shader) {
+ DRM_ERROR("Attempting to export shader BO\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return drm_gem_prime_export(dev, obj, flags);
+}
+
+int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *gem_obj;
+ struct vc4_bo *bo;
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
+
+ gem_obj = vma->vm_private_data;
+ bo = to_vc4_bo(gem_obj);
+
+ if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
+ DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
+ * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
+ * the whole buffer.
+ */
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_pgoff = 0;
+
+ ret = dma_mmap_writecombine(bo->base.base.dev->dev, vma,
+ bo->base.vaddr, bo->base.paddr,
+ vma->vm_end - vma->vm_start);
+ if (ret)
+ drm_gem_vm_close(vma);
+
+ return ret;
+}
+
+int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct vc4_bo *bo = to_vc4_bo(obj);
+
+ if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
+ DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
+ return -EINVAL;
+ }
+
+ return drm_gem_cma_prime_mmap(obj, vma);
+}
+
+void *vc4_prime_vmap(struct drm_gem_object *obj)
+{
+ struct vc4_bo *bo = to_vc4_bo(obj);
+
+ if (bo->validated_shader) {
+ DRM_ERROR("mmaping of shader BOs not allowed.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return drm_gem_cma_prime_vmap(obj);
+}
+
+int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vc4_create_bo *args = data;
+ struct vc4_bo *bo = NULL;
+ int ret;
+
+ /*
+ * We can't allocate from the BO cache, because the BOs don't
+ * get zeroed, and that might leak data between users.
+ */
+ bo = vc4_bo_create(dev, args->size, false);
if (!bo)
return -ENOMEM;
@@ -50,3 +451,107 @@ int vc4_dumb_create(struct drm_file *file_priv,
return ret;
}
+
+int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vc4_mmap_bo *args = data;
+ struct drm_gem_object *gem_obj;
+
+ gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ /* The mmap offset was set up at BO allocation time. */
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+
+ drm_gem_object_unreference_unlocked(gem_obj);
+ return 0;
+}
+
+int
+vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vc4_create_shader_bo *args = data;
+ struct vc4_bo *bo = NULL;
+ int ret;
+
+ if (args->size == 0)
+ return -EINVAL;
+
+ if (args->size % sizeof(u64) != 0)
+ return -EINVAL;
+
+ if (args->flags != 0) {
+ DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
+ return -EINVAL;
+ }
+
+ if (args->pad != 0) {
+ DRM_INFO("Pad set: 0x%08x\n", args->pad);
+ return -EINVAL;
+ }
+
+ bo = vc4_bo_create(dev, args->size, true);
+ if (!bo)
+ return -ENOMEM;
+
+ ret = copy_from_user(bo->base.vaddr,
+ (void __user *)(uintptr_t)args->data,
+ args->size);
+ if (ret != 0)
+ goto fail;
+ /* Clear the rest of the memory from allocating from the BO
+ * cache.
+ */
+ memset(bo->base.vaddr + args->size, 0,
+ bo->base.base.size - args->size);
+
+ bo->validated_shader = vc4_validate_shader(&bo->base);
+ if (!bo->validated_shader) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* We have to create the handle after validation, to avoid
+ * races for users to do doing things like mmap the shader BO.
+ */
+ ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
+
+ fail:
+ drm_gem_object_unreference_unlocked(&bo->base.base);
+
+ return ret;
+}
+
+void vc4_bo_cache_init(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ mutex_init(&vc4->bo_lock);
+
+ INIT_LIST_HEAD(&vc4->bo_cache.time_list);
+
+ INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
+ setup_timer(&vc4->bo_cache.time_timer,
+ vc4_bo_cache_time_timer,
+ (unsigned long)dev);
+}
+
+void vc4_bo_cache_destroy(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ del_timer(&vc4->bo_cache.time_timer);
+ cancel_work_sync(&vc4->bo_cache.time_work);
+
+ vc4_bo_cache_purge(dev);
+
+ if (vc4->bo_stats.num_allocated) {
+ DRM_ERROR("Destroying BO cache while BOs still allocated:\n");
+ vc4_bo_stats_dump(vc4);
+ }
+}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 265064c62d49..8d0d70e51ef2 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -35,6 +35,7 @@
#include "drm_atomic_helper.h"
#include "drm_crtc_helper.h"
#include "linux/clk.h"
+#include "drm_fb_cma_helper.h"
#include "linux/component.h"
#include "linux/of_device.h"
#include "vc4_drv.h"
@@ -476,10 +477,106 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
return ret;
}
+struct vc4_async_flip_state {
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ struct drm_pending_vblank_event *event;
+
+ struct vc4_seqno_cb cb;
+};
+
+/* Called when the V3D execution for the BO being flipped to is done, so that
+ * we can actually update the plane's address to point to it.
+ */
+static void
+vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
+{
+ struct vc4_async_flip_state *flip_state =
+ container_of(cb, struct vc4_async_flip_state, cb);
+ struct drm_crtc *crtc = flip_state->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_plane *plane = crtc->primary;
+
+ vc4_plane_async_set_fb(plane, flip_state->fb);
+ if (flip_state->event) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, flip_state->event);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ drm_framebuffer_unreference(flip_state->fb);
+ kfree(flip_state);
+
+ up(&vc4->async_modeset);
+}
+
+/* Implements async (non-vblank-synced) page flips.
+ *
+ * The page flip ioctl needs to return immediately, so we grab the
+ * modeset semaphore on the pipe, and queue the address update for
+ * when V3D is done with the BO being flipped to.
+ */
+static int vc4_async_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_plane *plane = crtc->primary;
+ int ret = 0;
+ struct vc4_async_flip_state *flip_state;
+ struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
+ struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+
+ flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL);
+ if (!flip_state)
+ return -ENOMEM;
+
+ drm_framebuffer_reference(fb);
+ flip_state->fb = fb;
+ flip_state->crtc = crtc;
+ flip_state->event = event;
+
+ /* Make sure all other async modesetes have landed. */
+ ret = down_interruptible(&vc4->async_modeset);
+ if (ret) {
+ kfree(flip_state);
+ return ret;
+ }
+
+ /* Immediately update the plane's legacy fb pointer, so that later
+ * modeset prep sees the state that will be present when the semaphore
+ * is released.
+ */
+ drm_atomic_set_fb_for_plane(plane->state, fb);
+ plane->fb = fb;
+
+ vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno,
+ vc4_async_page_flip_complete);
+
+ /* Driver takes ownership of state on successful async commit. */
+ return 0;
+}
+
+static int vc4_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags)
+{
+ if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ return vc4_async_page_flip(crtc, fb, event, flags);
+ else
+ return drm_atomic_helper_page_flip(crtc, fb, event, flags);
+}
+
static const struct drm_crtc_funcs vc4_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = vc4_crtc_destroy,
- .page_flip = drm_atomic_helper_page_flip,
+ .page_flip = vc4_page_flip,
.set_property = NULL,
.cursor_set = NULL, /* handled by drm_mode_cursor_universal */
.cursor_move = NULL, /* handled by drm_mode_cursor_universal */
@@ -606,7 +703,7 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
}
drm_crtc_init_with_planes(drm, crtc, primary_plane, cursor_plane,
- &vc4_crtc_funcs);
+ &vc4_crtc_funcs, NULL);
drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs);
primary_plane->crtc = crtc;
cursor_plane->crtc = crtc;
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index 4297b0a5b74e..d76ad10b07fd 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -16,11 +16,14 @@
#include "vc4_regs.h"
static const struct drm_info_list vc4_debugfs_list[] = {
+ {"bo_stats", vc4_bo_stats_debugfs, 0},
{"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
{"hvs_regs", vc4_hvs_debugfs_regs, 0},
{"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
{"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1},
{"crtc2_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)2},
+ {"v3d_ident", vc4_v3d_debugfs_ident, 0},
+ {"v3d_regs", vc4_v3d_debugfs_regs, 0},
};
#define VC4_DEBUGFS_ENTRIES ARRAY_SIZE(vc4_debugfs_list)
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index d5db9e0f3b73..f1655fff8425 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include "drm_fb_cma_helper.h"
+#include "uapi/drm/vc4_drm.h"
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -63,7 +64,7 @@ static const struct file_operations vc4_drm_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
- .mmap = drm_gem_cma_mmap,
+ .mmap = vc4_mmap,
.poll = drm_poll,
.read = drm_read,
#ifdef CONFIG_COMPAT
@@ -73,16 +74,30 @@ static const struct file_operations vc4_drm_fops = {
};
static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl,
+ DRM_ROOT_ONLY),
};
static struct drm_driver vc4_drm_driver = {
.driver_features = (DRIVER_MODESET |
DRIVER_ATOMIC |
DRIVER_GEM |
+ DRIVER_HAVE_IRQ |
DRIVER_PRIME),
.lastclose = vc4_lastclose,
.preclose = vc4_drm_preclose,
+ .irq_handler = vc4_irq,
+ .irq_preinstall = vc4_irq_preinstall,
+ .irq_postinstall = vc4_irq_postinstall,
+ .irq_uninstall = vc4_irq_uninstall,
+
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
.get_vblank_counter = drm_vblank_count,
@@ -92,18 +107,19 @@ static struct drm_driver vc4_drm_driver = {
.debugfs_cleanup = vc4_debugfs_cleanup,
#endif
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_create_object = vc4_create_object,
+ .gem_free_object = vc4_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_export = vc4_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vmap = vc4_prime_vmap,
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .gem_prime_mmap = vc4_prime_mmap,
.dumb_create = vc4_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
@@ -168,15 +184,17 @@ static int vc4_drm_bind(struct device *dev)
vc4->dev = drm;
drm->dev_private = vc4;
- drm_dev_set_unique(drm, dev_name(dev));
+ vc4_bo_cache_init(drm);
drm_mode_config_init(drm);
if (ret)
goto unref;
+ vc4_gem_init(drm);
+
ret = component_bind_all(dev, drm);
if (ret)
- goto unref;
+ goto gem_destroy;
ret = drm_dev_register(drm, 0);
if (ret < 0)
@@ -200,8 +218,11 @@ unregister:
drm_dev_unregister(drm);
unbind_all:
component_unbind_all(dev, drm);
+gem_destroy:
+ vc4_gem_destroy(drm);
unref:
drm_dev_unref(drm);
+ vc4_bo_cache_destroy(drm);
return ret;
}
@@ -228,6 +249,7 @@ static struct platform_driver *const component_drivers[] = {
&vc4_hdmi_driver,
&vc4_crtc_driver,
&vc4_hvs_driver,
+ &vc4_v3d_driver,
};
static int vc4_platform_drm_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index fd8319fa682e..080865ec2bae 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -15,8 +15,89 @@ struct vc4_dev {
struct vc4_hdmi *hdmi;
struct vc4_hvs *hvs;
struct vc4_crtc *crtc[3];
+ struct vc4_v3d *v3d;
struct drm_fbdev_cma *fbdev;
+
+ struct vc4_hang_state *hang_state;
+
+ /* The kernel-space BO cache. Tracks buffers that have been
+ * unreferenced by all other users (refcounts of 0!) but not
+ * yet freed, so we can do cheap allocations.
+ */
+ struct vc4_bo_cache {
+ /* Array of list heads for entries in the BO cache,
+ * based on number of pages, so we can do O(1) lookups
+ * in the cache when allocating.
+ */
+ struct list_head *size_list;
+ uint32_t size_list_size;
+
+ /* List of all BOs in the cache, ordered by age, so we
+ * can do O(1) lookups when trying to free old
+ * buffers.
+ */
+ struct list_head time_list;
+ struct work_struct time_work;
+ struct timer_list time_timer;
+ } bo_cache;
+
+ struct vc4_bo_stats {
+ u32 num_allocated;
+ u32 size_allocated;
+ u32 num_cached;
+ u32 size_cached;
+ } bo_stats;
+
+ /* Protects bo_cache and the BO stats. */
+ struct mutex bo_lock;
+
+ /* Sequence number for the last job queued in job_list.
+ * Starts at 0 (no jobs emitted).
+ */
+ uint64_t emit_seqno;
+
+ /* Sequence number for the last completed job on the GPU.
+ * Starts at 0 (no jobs completed).
+ */
+ uint64_t finished_seqno;
+
+ /* List of all struct vc4_exec_info for jobs to be executed.
+ * The first job in the list is the one currently programmed
+ * into ct0ca/ct1ca for execution.
+ */
+ struct list_head job_list;
+ /* List of the finished vc4_exec_infos waiting to be freed by
+ * job_done_work.
+ */
+ struct list_head job_done_list;
+ /* Spinlock used to synchronize the job_list and seqno
+ * accesses between the IRQ handler and GEM ioctls.
+ */
+ spinlock_t job_lock;
+ wait_queue_head_t job_wait_queue;
+ struct work_struct job_done_work;
+
+ /* List of struct vc4_seqno_cb for callbacks to be made from a
+ * workqueue when the given seqno is passed.
+ */
+ struct list_head seqno_cb_list;
+
+ /* The binner overflow memory that's currently set up in
+ * BPOA/BPOS registers. When overflow occurs and a new one is
+ * allocated, the previous one will be moved to
+ * vc4->current_exec's free list.
+ */
+ struct vc4_bo *overflow_mem;
+ struct work_struct overflow_mem_work;
+
+ struct {
+ uint32_t last_ct0ca, last_ct1ca;
+ struct timer_list timer;
+ struct work_struct reset_work;
+ } hangcheck;
+
+ struct semaphore async_modeset;
};
static inline struct vc4_dev *
@@ -27,6 +108,25 @@ to_vc4_dev(struct drm_device *dev)
struct vc4_bo {
struct drm_gem_cma_object base;
+
+ /* seqno of the last job to render to this BO. */
+ uint64_t seqno;
+
+ /* List entry for the BO's position in either
+ * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
+ */
+ struct list_head unref_head;
+
+ /* Time in jiffies when the BO was put in vc4->bo_cache. */
+ unsigned long free_time;
+
+ /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
+ struct list_head size_head;
+
+ /* Struct for shader validation state, if created by
+ * DRM_IOCTL_VC4_CREATE_SHADER_BO.
+ */
+ struct vc4_validated_shader_info *validated_shader;
};
static inline struct vc4_bo *
@@ -35,6 +135,17 @@ to_vc4_bo(struct drm_gem_object *bo)
return (struct vc4_bo *)bo;
}
+struct vc4_seqno_cb {
+ struct work_struct work;
+ uint64_t seqno;
+ void (*func)(struct vc4_seqno_cb *cb);
+};
+
+struct vc4_v3d {
+ struct platform_device *pdev;
+ void __iomem *regs;
+};
+
struct vc4_hvs {
struct platform_device *pdev;
void __iomem *regs;
@@ -72,9 +183,142 @@ to_vc4_encoder(struct drm_encoder *encoder)
return container_of(encoder, struct vc4_encoder, base);
}
+#define V3D_READ(offset) readl(vc4->v3d->regs + offset)
+#define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
#define HVS_READ(offset) readl(vc4->hvs->regs + offset)
#define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
+struct vc4_exec_info {
+ /* Sequence number for this bin/render job. */
+ uint64_t seqno;
+
+ /* Kernel-space copy of the ioctl arguments */
+ struct drm_vc4_submit_cl *args;
+
+ /* This is the array of BOs that were looked up at the start of exec.
+ * Command validation will use indices into this array.
+ */
+ struct drm_gem_cma_object **bo;
+ uint32_t bo_count;
+
+ /* Pointers for our position in vc4->job_list */
+ struct list_head head;
+
+ /* List of other BOs used in the job that need to be released
+ * once the job is complete.
+ */
+ struct list_head unref_list;
+
+ /* Current unvalidated indices into @bo loaded by the non-hardware
+ * VC4_PACKET_GEM_HANDLES.
+ */
+ uint32_t bo_index[2];
+
+ /* This is the BO where we store the validated command lists, shader
+ * records, and uniforms.
+ */
+ struct drm_gem_cma_object *exec_bo;
+
+ /**
+ * This tracks the per-shader-record state (packet 64) that
+ * determines the length of the shader record and the offset
+ * it's expected to be found at. It gets read in from the
+ * command lists.
+ */
+ struct vc4_shader_state {
+ uint32_t addr;
+ /* Maximum vertex index referenced by any primitive using this
+ * shader state.
+ */
+ uint32_t max_index;
+ } *shader_state;
+
+ /** How many shader states the user declared they were using. */
+ uint32_t shader_state_size;
+ /** How many shader state records the validator has seen. */
+ uint32_t shader_state_count;
+
+ bool found_tile_binning_mode_config_packet;
+ bool found_start_tile_binning_packet;
+ bool found_increment_semaphore_packet;
+ bool found_flush;
+ uint8_t bin_tiles_x, bin_tiles_y;
+ struct drm_gem_cma_object *tile_bo;
+ uint32_t tile_alloc_offset;
+
+ /**
+ * Computed addresses pointing into exec_bo where we start the
+ * bin thread (ct0) and render thread (ct1).
+ */
+ uint32_t ct0ca, ct0ea;
+ uint32_t ct1ca, ct1ea;
+
+ /* Pointer to the unvalidated bin CL (if present). */
+ void *bin_u;
+
+ /* Pointers to the shader recs. These paddr gets incremented as CL
+ * packets are relocated in validate_gl_shader_state, and the vaddrs
+ * (u and v) get incremented and size decremented as the shader recs
+ * themselves are validated.
+ */
+ void *shader_rec_u;
+ void *shader_rec_v;
+ uint32_t shader_rec_p;
+ uint32_t shader_rec_size;
+
+ /* Pointers to the uniform data. These pointers are incremented, and
+ * size decremented, as each batch of uniforms is uploaded.
+ */
+ void *uniforms_u;
+ void *uniforms_v;
+ uint32_t uniforms_p;
+ uint32_t uniforms_size;
+};
+
+static inline struct vc4_exec_info *
+vc4_first_job(struct vc4_dev *vc4)
+{
+ if (list_empty(&vc4->job_list))
+ return NULL;
+ return list_first_entry(&vc4->job_list, struct vc4_exec_info, head);
+}
+
+/**
+ * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
+ * setup parameters.
+ *
+ * This will be used at draw time to relocate the reference to the texture
+ * contents in p0, and validate that the offset combined with
+ * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
+ * Note that the hardware treats unprovided config parameters as 0, so not all
+ * of them need to be set up for every texure sample, and we'll store ~0 as
+ * the offset to mark the unused ones.
+ *
+ * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
+ * Setup") for definitions of the texture parameters.
+ */
+struct vc4_texture_sample_info {
+ bool is_direct;
+ uint32_t p_offset[4];
+};
+
+/**
+ * struct vc4_validated_shader_info - information about validated shaders that
+ * needs to be used from command list validation.
+ *
+ * For a given shader, each time a shader state record references it, we need
+ * to verify that the shader doesn't read more uniforms than the shader state
+ * record's uniform BO pointer can provide, and we need to apply relocations
+ * and validate the shader state record's uniforms that define the texture
+ * samples.
+ */
+struct vc4_validated_shader_info {
+ uint32_t uniforms_size;
+ uint32_t uniforms_src_size;
+ uint32_t num_texture_samples;
+ struct vc4_texture_sample_info *texture_samples;
+};
+
/**
* _wait_for - magic (register) wait macro
*
@@ -104,13 +348,29 @@ to_vc4_encoder(struct drm_encoder *encoder)
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
/* vc4_bo.c */
+struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
void vc4_free_object(struct drm_gem_object *gem_obj);
-struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size);
+struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
+ bool from_cache);
int vc4_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
struct dma_buf *vc4_prime_export(struct drm_device *dev,
struct drm_gem_object *obj, int flags);
+int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
+int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+void *vc4_prime_vmap(struct drm_gem_object *obj);
+void vc4_bo_cache_init(struct drm_device *dev);
+void vc4_bo_cache_destroy(struct drm_device *dev);
+int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
/* vc4_crtc.c */
extern struct platform_driver vc4_crtc_driver;
@@ -126,10 +386,34 @@ void vc4_debugfs_cleanup(struct drm_minor *minor);
/* vc4_drv.c */
void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
+/* vc4_gem.c */
+void vc4_gem_init(struct drm_device *dev);
+void vc4_gem_destroy(struct drm_device *dev);
+int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void vc4_submit_next_job(struct drm_device *dev);
+int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
+ uint64_t timeout_ns, bool interruptible);
+void vc4_job_handle_completed(struct vc4_dev *vc4);
+int vc4_queue_seqno_cb(struct drm_device *dev,
+ struct vc4_seqno_cb *cb, uint64_t seqno,
+ void (*func)(struct vc4_seqno_cb *cb));
+
/* vc4_hdmi.c */
extern struct platform_driver vc4_hdmi_driver;
int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
+/* vc4_irq.c */
+irqreturn_t vc4_irq(int irq, void *arg);
+void vc4_irq_preinstall(struct drm_device *dev);
+int vc4_irq_postinstall(struct drm_device *dev);
+void vc4_irq_uninstall(struct drm_device *dev);
+void vc4_irq_reset(struct drm_device *dev);
+
/* vc4_hvs.c */
extern struct platform_driver vc4_hvs_driver;
void vc4_hvs_dump_state(struct drm_device *dev);
@@ -143,3 +427,35 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
enum drm_plane_type type);
u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
u32 vc4_plane_dlist_size(struct drm_plane_state *state);
+void vc4_plane_async_set_fb(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+
+/* vc4_v3d.c */
+extern struct platform_driver vc4_v3d_driver;
+int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
+int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
+int vc4_v3d_set_power(struct vc4_dev *vc4, bool on);
+
+/* vc4_validate.c */
+int
+vc4_validate_bin_cl(struct drm_device *dev,
+ void *validated,
+ void *unvalidated,
+ struct vc4_exec_info *exec);
+
+int
+vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
+
+struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
+ uint32_t hindex);
+
+int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
+
+bool vc4_check_tex_size(struct vc4_exec_info *exec,
+ struct drm_gem_cma_object *fbo,
+ uint32_t offset, uint8_t tiling_format,
+ uint32_t width, uint32_t height, uint8_t cpp);
+
+/* vc4_validate_shader.c */
+struct vc4_validated_shader_info *
+vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
new file mode 100644
index 000000000000..48ce30a6f4b5
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -0,0 +1,866 @@
+/*
+ * Copyright © 2014 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+
+#include "uapi/drm/vc4_drm.h"
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+#include "vc4_trace.h"
+
+static void
+vc4_queue_hangcheck(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ mod_timer(&vc4->hangcheck.timer,
+ round_jiffies_up(jiffies + msecs_to_jiffies(100)));
+}
+
+struct vc4_hang_state {
+ struct drm_vc4_get_hang_state user_state;
+
+ u32 bo_count;
+ struct drm_gem_object **bo;
+};
+
+static void
+vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
+{
+ unsigned int i;
+
+ mutex_lock(&dev->struct_mutex);
+ for (i = 0; i < state->user_state.bo_count; i++)
+ drm_gem_object_unreference(state->bo[i]);
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(state);
+}
+
+int
+vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vc4_get_hang_state *get_state = data;
+ struct drm_vc4_get_hang_state_bo *bo_state;
+ struct vc4_hang_state *kernel_state;
+ struct drm_vc4_get_hang_state *state;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ unsigned long irqflags;
+ u32 i;
+ int ret = 0;
+
+ spin_lock_irqsave(&vc4->job_lock, irqflags);
+ kernel_state = vc4->hang_state;
+ if (!kernel_state) {
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+ return -ENOENT;
+ }
+ state = &kernel_state->user_state;
+
+ /* If the user's array isn't big enough, just return the
+ * required array size.
+ */
+ if (get_state->bo_count < state->bo_count) {
+ get_state->bo_count = state->bo_count;
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+ return 0;
+ }
+
+ vc4->hang_state = NULL;
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+
+ /* Save the user's BO pointer, so we don't stomp it with the memcpy. */
+ state->bo = get_state->bo;
+ memcpy(get_state, state, sizeof(*state));
+
+ bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
+ if (!bo_state) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ for (i = 0; i < state->bo_count; i++) {
+ struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
+ u32 handle;
+
+ ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
+ &handle);
+
+ if (ret) {
+ state->bo_count = i - 1;
+ goto err;
+ }
+ bo_state[i].handle = handle;
+ bo_state[i].paddr = vc4_bo->base.paddr;
+ bo_state[i].size = vc4_bo->base.base.size;
+ }
+
+ if (copy_to_user((void __user *)(uintptr_t)get_state->bo,
+ bo_state,
+ state->bo_count * sizeof(*bo_state)))
+ ret = -EFAULT;
+
+ kfree(bo_state);
+
+err_free:
+
+ vc4_free_hang_state(dev, kernel_state);
+
+err:
+ return ret;
+}
+
+static void
+vc4_save_hang_state(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_vc4_get_hang_state *state;
+ struct vc4_hang_state *kernel_state;
+ struct vc4_exec_info *exec;
+ struct vc4_bo *bo;
+ unsigned long irqflags;
+ unsigned int i, unref_list_count;
+
+ kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
+ if (!kernel_state)
+ return;
+
+ state = &kernel_state->user_state;
+
+ spin_lock_irqsave(&vc4->job_lock, irqflags);
+ exec = vc4_first_job(vc4);
+ if (!exec) {
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+ return;
+ }
+
+ unref_list_count = 0;
+ list_for_each_entry(bo, &exec->unref_list, unref_head)
+ unref_list_count++;
+
+ state->bo_count = exec->bo_count + unref_list_count;
+ kernel_state->bo = kcalloc(state->bo_count, sizeof(*kernel_state->bo),
+ GFP_ATOMIC);
+ if (!kernel_state->bo) {
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+ return;
+ }
+
+ for (i = 0; i < exec->bo_count; i++) {
+ drm_gem_object_reference(&exec->bo[i]->base);
+ kernel_state->bo[i] = &exec->bo[i]->base;
+ }
+
+ list_for_each_entry(bo, &exec->unref_list, unref_head) {
+ drm_gem_object_reference(&bo->base.base);
+ kernel_state->bo[i] = &bo->base.base;
+ i++;
+ }
+
+ state->start_bin = exec->ct0ca;
+ state->start_render = exec->ct1ca;
+
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+
+ state->ct0ca = V3D_READ(V3D_CTNCA(0));
+ state->ct0ea = V3D_READ(V3D_CTNEA(0));
+
+ state->ct1ca = V3D_READ(V3D_CTNCA(1));
+ state->ct1ea = V3D_READ(V3D_CTNEA(1));
+
+ state->ct0cs = V3D_READ(V3D_CTNCS(0));
+ state->ct1cs = V3D_READ(V3D_CTNCS(1));
+
+ state->ct0ra0 = V3D_READ(V3D_CT00RA0);
+ state->ct1ra0 = V3D_READ(V3D_CT01RA0);
+
+ state->bpca = V3D_READ(V3D_BPCA);
+ state->bpcs = V3D_READ(V3D_BPCS);
+ state->bpoa = V3D_READ(V3D_BPOA);
+ state->bpos = V3D_READ(V3D_BPOS);
+
+ state->vpmbase = V3D_READ(V3D_VPMBASE);
+
+ state->dbge = V3D_READ(V3D_DBGE);
+ state->fdbgo = V3D_READ(V3D_FDBGO);
+ state->fdbgb = V3D_READ(V3D_FDBGB);
+ state->fdbgr = V3D_READ(V3D_FDBGR);
+ state->fdbgs = V3D_READ(V3D_FDBGS);
+ state->errstat = V3D_READ(V3D_ERRSTAT);
+
+ spin_lock_irqsave(&vc4->job_lock, irqflags);
+ if (vc4->hang_state) {
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+ vc4_free_hang_state(dev, kernel_state);
+ } else {
+ vc4->hang_state = kernel_state;
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+ }
+}
+
+static void
+vc4_reset(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ DRM_INFO("Resetting GPU.\n");
+ vc4_v3d_set_power(vc4, false);
+ vc4_v3d_set_power(vc4, true);
+
+ vc4_irq_reset(dev);
+
+ /* Rearm the hangcheck -- another job might have been waiting
+ * for our hung one to get kicked off, and vc4_irq_reset()
+ * would have started it.
+ */
+ vc4_queue_hangcheck(dev);
+}
+
+static void
+vc4_reset_work(struct work_struct *work)
+{
+ struct vc4_dev *vc4 =
+ container_of(work, struct vc4_dev, hangcheck.reset_work);
+
+ vc4_save_hang_state(vc4->dev);
+
+ vc4_reset(vc4->dev);
+}
+
+static void
+vc4_hangcheck_elapsed(unsigned long data)
+{
+ struct drm_device *dev = (struct drm_device *)data;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ uint32_t ct0ca, ct1ca;
+
+ /* If idle, we can stop watching for hangs. */
+ if (list_empty(&vc4->job_list))
+ return;
+
+ ct0ca = V3D_READ(V3D_CTNCA(0));
+ ct1ca = V3D_READ(V3D_CTNCA(1));
+
+ /* If we've made any progress in execution, rearm the timer
+ * and wait.
+ */
+ if (ct0ca != vc4->hangcheck.last_ct0ca ||
+ ct1ca != vc4->hangcheck.last_ct1ca) {
+ vc4->hangcheck.last_ct0ca = ct0ca;
+ vc4->hangcheck.last_ct1ca = ct1ca;
+ vc4_queue_hangcheck(dev);
+ return;
+ }
+
+ /* We've gone too long with no progress, reset. This has to
+ * be done from a work struct, since resetting can sleep and
+ * this timer hook isn't allowed to.
+ */
+ schedule_work(&vc4->hangcheck.reset_work);
+}
+
+static void
+submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ /* Set the current and end address of the control list.
+ * Writing the end register is what starts the job.
+ */
+ V3D_WRITE(V3D_CTNCA(thread), start);
+ V3D_WRITE(V3D_CTNEA(thread), end);
+}
+
+int
+vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
+ bool interruptible)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int ret = 0;
+ unsigned long timeout_expire;
+ DEFINE_WAIT(wait);
+
+ if (vc4->finished_seqno >= seqno)
+ return 0;
+
+ if (timeout_ns == 0)
+ return -ETIME;
+
+ timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
+
+ trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
+ for (;;) {
+ prepare_to_wait(&vc4->job_wait_queue, &wait,
+ interruptible ? TASK_INTERRUPTIBLE :
+ TASK_UNINTERRUPTIBLE);
+
+ if (interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ if (vc4->finished_seqno >= seqno)
+ break;
+
+ if (timeout_ns != ~0ull) {
+ if (time_after_eq(jiffies, timeout_expire)) {
+ ret = -ETIME;
+ break;
+ }
+ schedule_timeout(timeout_expire - jiffies);
+ } else {
+ schedule();
+ }
+ }
+
+ finish_wait(&vc4->job_wait_queue, &wait);
+ trace_vc4_wait_for_seqno_end(dev, seqno);
+
+ if (ret && ret != -ERESTARTSYS) {
+ DRM_ERROR("timeout waiting for render thread idle\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+vc4_flush_caches(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ /* Flush the GPU L2 caches. These caches sit on top of system
+ * L3 (the 128kb or so shared with the CPU), and are
+ * non-allocating in the L3.
+ */
+ V3D_WRITE(V3D_L2CACTL,
+ V3D_L2CACTL_L2CCLR);
+
+ V3D_WRITE(V3D_SLCACTL,
+ VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
+ VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
+ VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
+ VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
+}
+
+/* Sets the registers for the next job to be actually be executed in
+ * the hardware.
+ *
+ * The job_lock should be held during this.
+ */
+void
+vc4_submit_next_job(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_exec_info *exec = vc4_first_job(vc4);
+
+ if (!exec)
+ return;
+
+ vc4_flush_caches(dev);
+
+ /* Disable the binner's pre-loaded overflow memory address */
+ V3D_WRITE(V3D_BPOA, 0);
+ V3D_WRITE(V3D_BPOS, 0);
+
+ if (exec->ct0ca != exec->ct0ea)
+ submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
+ submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
+}
+
+static void
+vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
+{
+ struct vc4_bo *bo;
+ unsigned i;
+
+ for (i = 0; i < exec->bo_count; i++) {
+ bo = to_vc4_bo(&exec->bo[i]->base);
+ bo->seqno = seqno;
+ }
+
+ list_for_each_entry(bo, &exec->unref_list, unref_head) {
+ bo->seqno = seqno;
+ }
+}
+
+/* Queues a struct vc4_exec_info for execution. If no job is
+ * currently executing, then submits it.
+ *
+ * Unlike most GPUs, our hardware only handles one command list at a
+ * time. To queue multiple jobs at once, we'd need to edit the
+ * previous command list to have a jump to the new one at the end, and
+ * then bump the end address. That's a change for a later date,
+ * though.
+ */
+static void
+vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ uint64_t seqno;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&vc4->job_lock, irqflags);
+
+ seqno = ++vc4->emit_seqno;
+ exec->seqno = seqno;
+ vc4_update_bo_seqnos(exec, seqno);
+
+ list_add_tail(&exec->head, &vc4->job_list);
+
+ /* If no job was executing, kick ours off. Otherwise, it'll
+ * get started when the previous job's frame done interrupt
+ * occurs.
+ */
+ if (vc4_first_job(vc4) == exec) {
+ vc4_submit_next_job(dev);
+ vc4_queue_hangcheck(dev);
+ }
+
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+}
+
+/**
+ * Looks up a bunch of GEM handles for BOs and stores the array for
+ * use in the command validator that actually writes relocated
+ * addresses pointing to them.
+ */
+static int
+vc4_cl_lookup_bos(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct vc4_exec_info *exec)
+{
+ struct drm_vc4_submit_cl *args = exec->args;
+ uint32_t *handles;
+ int ret = 0;
+ int i;
+
+ exec->bo_count = args->bo_handle_count;
+
+ if (!exec->bo_count) {
+ /* See comment on bo_index for why we have to check
+ * this.
+ */
+ DRM_ERROR("Rendering requires BOs to validate\n");
+ return -EINVAL;
+ }
+
+ exec->bo = kcalloc(exec->bo_count, sizeof(struct drm_gem_cma_object *),
+ GFP_KERNEL);
+ if (!exec->bo) {
+ DRM_ERROR("Failed to allocate validated BO pointers\n");
+ return -ENOMEM;
+ }
+
+ handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t));
+ if (!handles) {
+ DRM_ERROR("Failed to allocate incoming GEM handles\n");
+ goto fail;
+ }
+
+ ret = copy_from_user(handles,
+ (void __user *)(uintptr_t)args->bo_handles,
+ exec->bo_count * sizeof(uint32_t));
+ if (ret) {
+ DRM_ERROR("Failed to copy in GEM handles\n");
+ goto fail;
+ }
+
+ spin_lock(&file_priv->table_lock);
+ for (i = 0; i < exec->bo_count; i++) {
+ struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
+ handles[i]);
+ if (!bo) {
+ DRM_ERROR("Failed to look up GEM BO %d: %d\n",
+ i, handles[i]);
+ ret = -EINVAL;
+ spin_unlock(&file_priv->table_lock);
+ goto fail;
+ }
+ drm_gem_object_reference(bo);
+ exec->bo[i] = (struct drm_gem_cma_object *)bo;
+ }
+ spin_unlock(&file_priv->table_lock);
+
+fail:
+ kfree(handles);
+ return 0;
+}
+
+static int
+vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
+{
+ struct drm_vc4_submit_cl *args = exec->args;
+ void *temp = NULL;
+ void *bin;
+ int ret = 0;
+ uint32_t bin_offset = 0;
+ uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
+ 16);
+ uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
+ uint32_t exec_size = uniforms_offset + args->uniforms_size;
+ uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
+ args->shader_rec_count);
+ struct vc4_bo *bo;
+
+ if (uniforms_offset < shader_rec_offset ||
+ exec_size < uniforms_offset ||
+ args->shader_rec_count >= (UINT_MAX /
+ sizeof(struct vc4_shader_state)) ||
+ temp_size < exec_size) {
+ DRM_ERROR("overflow in exec arguments\n");
+ goto fail;
+ }
+
+ /* Allocate space where we'll store the copied in user command lists
+ * and shader records.
+ *
+ * We don't just copy directly into the BOs because we need to
+ * read the contents back for validation, and I think the
+ * bo->vaddr is uncached access.
+ */
+ temp = kmalloc(temp_size, GFP_KERNEL);
+ if (!temp) {
+ DRM_ERROR("Failed to allocate storage for copying "
+ "in bin/render CLs.\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ bin = temp + bin_offset;
+ exec->shader_rec_u = temp + shader_rec_offset;
+ exec->uniforms_u = temp + uniforms_offset;
+ exec->shader_state = temp + exec_size;
+ exec->shader_state_size = args->shader_rec_count;
+
+ if (copy_from_user(bin,
+ (void __user *)(uintptr_t)args->bin_cl,
+ args->bin_cl_size)) {
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ if (copy_from_user(exec->shader_rec_u,
+ (void __user *)(uintptr_t)args->shader_rec,
+ args->shader_rec_size)) {
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ if (copy_from_user(exec->uniforms_u,
+ (void __user *)(uintptr_t)args->uniforms,
+ args->uniforms_size)) {
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ bo = vc4_bo_create(dev, exec_size, true);
+ if (!bo) {
+ DRM_ERROR("Couldn't allocate BO for binning\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ exec->exec_bo = &bo->base;
+
+ list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
+ &exec->unref_list);
+
+ exec->ct0ca = exec->exec_bo->paddr + bin_offset;
+
+ exec->bin_u = bin;
+
+ exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
+ exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
+ exec->shader_rec_size = args->shader_rec_size;
+
+ exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
+ exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
+ exec->uniforms_size = args->uniforms_size;
+
+ ret = vc4_validate_bin_cl(dev,
+ exec->exec_bo->vaddr + bin_offset,
+ bin,
+ exec);
+ if (ret)
+ goto fail;
+
+ ret = vc4_validate_shader_recs(dev, exec);
+
+fail:
+ kfree(temp);
+ return ret;
+}
+
+static void
+vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
+{
+ unsigned i;
+
+ /* Need the struct lock for drm_gem_object_unreference(). */
+ mutex_lock(&dev->struct_mutex);
+ if (exec->bo) {
+ for (i = 0; i < exec->bo_count; i++)
+ drm_gem_object_unreference(&exec->bo[i]->base);
+ kfree(exec->bo);
+ }
+
+ while (!list_empty(&exec->unref_list)) {
+ struct vc4_bo *bo = list_first_entry(&exec->unref_list,
+ struct vc4_bo, unref_head);
+ list_del(&bo->unref_head);
+ drm_gem_object_unreference(&bo->base.base);
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(exec);
+}
+
+void
+vc4_job_handle_completed(struct vc4_dev *vc4)
+{
+ unsigned long irqflags;
+ struct vc4_seqno_cb *cb, *cb_temp;
+
+ spin_lock_irqsave(&vc4->job_lock, irqflags);
+ while (!list_empty(&vc4->job_done_list)) {
+ struct vc4_exec_info *exec =
+ list_first_entry(&vc4->job_done_list,
+ struct vc4_exec_info, head);
+ list_del(&exec->head);
+
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+ vc4_complete_exec(vc4->dev, exec);
+ spin_lock_irqsave(&vc4->job_lock, irqflags);
+ }
+
+ list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
+ if (cb->seqno <= vc4->finished_seqno) {
+ list_del_init(&cb->work.entry);
+ schedule_work(&cb->work);
+ }
+ }
+
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+}
+
+static void vc4_seqno_cb_work(struct work_struct *work)
+{
+ struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
+
+ cb->func(cb);
+}
+
+int vc4_queue_seqno_cb(struct drm_device *dev,
+ struct vc4_seqno_cb *cb, uint64_t seqno,
+ void (*func)(struct vc4_seqno_cb *cb))
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int ret = 0;
+ unsigned long irqflags;
+
+ cb->func = func;
+ INIT_WORK(&cb->work, vc4_seqno_cb_work);
+
+ spin_lock_irqsave(&vc4->job_lock, irqflags);
+ if (seqno > vc4->finished_seqno) {
+ cb->seqno = seqno;
+ list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
+ } else {
+ schedule_work(&cb->work);
+ }
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+
+ return ret;
+}
+
+/* Scheduled when any job has been completed, this walks the list of
+ * jobs that had completed and unrefs their BOs and frees their exec
+ * structs.
+ */
+static void
+vc4_job_done_work(struct work_struct *work)
+{
+ struct vc4_dev *vc4 =
+ container_of(work, struct vc4_dev, job_done_work);
+
+ vc4_job_handle_completed(vc4);
+}
+
+static int
+vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
+ uint64_t seqno,
+ uint64_t *timeout_ns)
+{
+ unsigned long start = jiffies;
+ int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
+
+ if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
+ uint64_t delta = jiffies_to_nsecs(jiffies - start);
+
+ if (*timeout_ns >= delta)
+ *timeout_ns -= delta;
+ }
+
+ return ret;
+}
+
+int
+vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vc4_wait_seqno *args = data;
+
+ return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
+ &args->timeout_ns);
+}
+
+int
+vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ int ret;
+ struct drm_vc4_wait_bo *args = data;
+ struct drm_gem_object *gem_obj;
+ struct vc4_bo *bo;
+
+ gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+ bo = to_vc4_bo(gem_obj);
+
+ ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
+ &args->timeout_ns);
+
+ drm_gem_object_unreference_unlocked(gem_obj);
+ return ret;
+}
+
+/**
+ * Submits a command list to the VC4.
+ *
+ * This is what is called batchbuffer emitting on other hardware.
+ */
+int
+vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_vc4_submit_cl *args = data;
+ struct vc4_exec_info *exec;
+ int ret;
+
+ if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
+ DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
+ return -EINVAL;
+ }
+
+ exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
+ if (!exec) {
+ DRM_ERROR("malloc failure on exec struct\n");
+ return -ENOMEM;
+ }
+
+ exec->args = args;
+ INIT_LIST_HEAD(&exec->unref_list);
+
+ ret = vc4_cl_lookup_bos(dev, file_priv, exec);
+ if (ret)
+ goto fail;
+
+ if (exec->args->bin_cl_size != 0) {
+ ret = vc4_get_bcl(dev, exec);
+ if (ret)
+ goto fail;
+ } else {
+ exec->ct0ca = 0;
+ exec->ct0ea = 0;
+ }
+
+ ret = vc4_get_rcl(dev, exec);
+ if (ret)
+ goto fail;
+
+ /* Clear this out of the struct we'll be putting in the queue,
+ * since it's part of our stack.
+ */
+ exec->args = NULL;
+
+ vc4_queue_submit(dev, exec);
+
+ /* Return the seqno for our job. */
+ args->seqno = vc4->emit_seqno;
+
+ return 0;
+
+fail:
+ vc4_complete_exec(vc4->dev, exec);
+
+ return ret;
+}
+
+void
+vc4_gem_init(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ INIT_LIST_HEAD(&vc4->job_list);
+ INIT_LIST_HEAD(&vc4->job_done_list);
+ INIT_LIST_HEAD(&vc4->seqno_cb_list);
+ spin_lock_init(&vc4->job_lock);
+
+ INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
+ setup_timer(&vc4->hangcheck.timer,
+ vc4_hangcheck_elapsed,
+ (unsigned long)dev);
+
+ INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
+}
+
+void
+vc4_gem_destroy(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ /* Waiting for exec to finish would need to be done before
+ * unregistering V3D.
+ */
+ WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
+
+ /* V3D should already have disabled its interrupt and cleared
+ * the overflow allocation registers. Now free the object.
+ */
+ if (vc4->overflow_mem) {
+ drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base);
+ vc4->overflow_mem = NULL;
+ }
+
+ vc4_bo_cache_destroy(dev);
+
+ if (vc4->hang_state)
+ vc4_free_hang_state(dev, vc4->hang_state);
+}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index da9a36d6e1d1..c69c0460196b 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -519,7 +519,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
WARN_ON_ONCE((HD_READ(VC4_HD_M_CTL) & VC4_HD_M_ENABLE) == 0);
drm_encoder_init(drm, hdmi->encoder, &vc4_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS);
+ DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs);
hdmi->connector = vc4_hdmi_connector_init(drm, hdmi->encoder);
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
new file mode 100644
index 000000000000..b68060e758db
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright © 2014 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/** DOC: Interrupt management for the V3D engine.
+ *
+ * We have an interrupt status register (V3D_INTCTL) which reports
+ * interrupts, and where writing 1 bits clears those interrupts.
+ * There are also a pair of interrupt registers
+ * (V3D_INTENA/V3D_INTDIS) where writing a 1 to their bits enables or
+ * disables that specific interrupt, and 0s written are ignored
+ * (reading either one returns the set of enabled interrupts).
+ *
+ * When we take a render frame interrupt, we need to wake the
+ * processes waiting for some frame to be done, and get the next frame
+ * submitted ASAP (so the hardware doesn't sit idle when there's work
+ * to do).
+ *
+ * When we take the binner out of memory interrupt, we need to
+ * allocate some new memory and pass it to the binner so that the
+ * current job can make progress.
+ */
+
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+#define V3D_DRIVER_IRQS (V3D_INT_OUTOMEM | \
+ V3D_INT_FRDONE)
+
+DECLARE_WAIT_QUEUE_HEAD(render_wait);
+
+static void
+vc4_overflow_mem_work(struct work_struct *work)
+{
+ struct vc4_dev *vc4 =
+ container_of(work, struct vc4_dev, overflow_mem_work);
+ struct drm_device *dev = vc4->dev;
+ struct vc4_bo *bo;
+
+ bo = vc4_bo_create(dev, 256 * 1024, true);
+ if (!bo) {
+ DRM_ERROR("Couldn't allocate binner overflow mem\n");
+ return;
+ }
+
+ /* If there's a job executing currently, then our previous
+ * overflow allocation is getting used in that job and we need
+ * to queue it to be released when the job is done. But if no
+ * job is executing at all, then we can free the old overflow
+ * object direcctly.
+ *
+ * No lock necessary for this pointer since we're the only
+ * ones that update the pointer, and our workqueue won't
+ * reenter.
+ */
+ if (vc4->overflow_mem) {
+ struct vc4_exec_info *current_exec;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&vc4->job_lock, irqflags);
+ current_exec = vc4_first_job(vc4);
+ if (current_exec) {
+ vc4->overflow_mem->seqno = vc4->finished_seqno + 1;
+ list_add_tail(&vc4->overflow_mem->unref_head,
+ &current_exec->unref_list);
+ vc4->overflow_mem = NULL;
+ }
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+ }
+
+ if (vc4->overflow_mem)
+ drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base);
+ vc4->overflow_mem = bo;
+
+ V3D_WRITE(V3D_BPOA, bo->base.paddr);
+ V3D_WRITE(V3D_BPOS, bo->base.base.size);
+ V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM);
+ V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
+}
+
+static void
+vc4_irq_finish_job(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_exec_info *exec = vc4_first_job(vc4);
+
+ if (!exec)
+ return;
+
+ vc4->finished_seqno++;
+ list_move_tail(&exec->head, &vc4->job_done_list);
+ vc4_submit_next_job(dev);
+
+ wake_up_all(&vc4->job_wait_queue);
+ schedule_work(&vc4->job_done_work);
+}
+
+irqreturn_t
+vc4_irq(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ uint32_t intctl;
+ irqreturn_t status = IRQ_NONE;
+
+ barrier();
+ intctl = V3D_READ(V3D_INTCTL);
+
+ /* Acknowledge the interrupts we're handling here. The render
+ * frame done interrupt will be cleared, while OUTOMEM will
+ * stay high until the underlying cause is cleared.
+ */
+ V3D_WRITE(V3D_INTCTL, intctl);
+
+ if (intctl & V3D_INT_OUTOMEM) {
+ /* Disable OUTOMEM until the work is done. */
+ V3D_WRITE(V3D_INTDIS, V3D_INT_OUTOMEM);
+ schedule_work(&vc4->overflow_mem_work);
+ status = IRQ_HANDLED;
+ }
+
+ if (intctl & V3D_INT_FRDONE) {
+ spin_lock(&vc4->job_lock);
+ vc4_irq_finish_job(dev);
+ spin_unlock(&vc4->job_lock);
+ status = IRQ_HANDLED;
+ }
+
+ return status;
+}
+
+void
+vc4_irq_preinstall(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ init_waitqueue_head(&vc4->job_wait_queue);
+ INIT_WORK(&vc4->overflow_mem_work, vc4_overflow_mem_work);
+
+ /* Clear any pending interrupts someone might have left around
+ * for us.
+ */
+ V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
+}
+
+int
+vc4_irq_postinstall(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ /* Enable both the render done and out of memory interrupts. */
+ V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
+
+ return 0;
+}
+
+void
+vc4_irq_uninstall(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ /* Disable sending interrupts for our driver's IRQs. */
+ V3D_WRITE(V3D_INTDIS, V3D_DRIVER_IRQS);
+
+ /* Clear any pending interrupts we might have left. */
+ V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
+
+ cancel_work_sync(&vc4->overflow_mem_work);
+}
+
+/** Reinitializes interrupt registers when a GPU reset is performed. */
+void vc4_irq_reset(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ unsigned long irqflags;
+
+ /* Acknowledge any stale IRQs. */
+ V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
+
+ /*
+ * Turn all our interrupts on. Binner out of memory is the
+ * only one we expect to trigger at this point, since we've
+ * just come from poweron and haven't supplied any overflow
+ * memory yet.
+ */
+ V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
+
+ spin_lock_irqsave(&vc4->job_lock, irqflags);
+ vc4_irq_finish_job(dev);
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+}
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 2e5597d10cc6..f95f2df5f8d1 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -15,6 +15,7 @@
*/
#include "drm_crtc.h"
+#include "drm_atomic.h"
#include "drm_atomic_helper.h"
#include "drm_crtc_helper.h"
#include "drm_plane_helper.h"
@@ -29,10 +30,152 @@ static void vc4_output_poll_changed(struct drm_device *dev)
drm_fbdev_cma_hotplug_event(vc4->fbdev);
}
+struct vc4_commit {
+ struct drm_device *dev;
+ struct drm_atomic_state *state;
+ struct vc4_seqno_cb cb;
+};
+
+static void
+vc4_atomic_complete_commit(struct vc4_commit *c)
+{
+ struct drm_atomic_state *state = c->state;
+ struct drm_device *dev = state->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ drm_atomic_helper_commit_modeset_disables(dev, state);
+
+ drm_atomic_helper_commit_planes(dev, state, false);
+
+ drm_atomic_helper_commit_modeset_enables(dev, state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+ drm_atomic_state_free(state);
+
+ up(&vc4->async_modeset);
+
+ kfree(c);
+}
+
+static void
+vc4_atomic_complete_commit_seqno_cb(struct vc4_seqno_cb *cb)
+{
+ struct vc4_commit *c = container_of(cb, struct vc4_commit, cb);
+
+ vc4_atomic_complete_commit(c);
+}
+
+static struct vc4_commit *commit_init(struct drm_atomic_state *state)
+{
+ struct vc4_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
+
+ if (!c)
+ return NULL;
+ c->dev = state->dev;
+ c->state = state;
+
+ return c;
+}
+
+/**
+ * vc4_atomic_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the driver state object
+ * @async: asynchronous commit
+ *
+ * This function commits a with drm_atomic_helper_check() pre-validated state
+ * object. This can still fail when e.g. the framebuffer reservation fails. For
+ * now this doesn't implement asynchronous commits.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+static int vc4_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool async)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int ret;
+ int i;
+ uint64_t wait_seqno = 0;
+ struct vc4_commit *c;
+
+ c = commit_init(state);
+ if (!c)
+ return -ENOMEM;
+
+ /* Make sure that any outstanding modesets have finished. */
+ ret = down_interruptible(&vc4->async_modeset);
+ if (ret) {
+ kfree(c);
+ return ret;
+ }
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret) {
+ kfree(c);
+ up(&vc4->async_modeset);
+ return ret;
+ }
+
+ for (i = 0; i < dev->mode_config.num_total_plane; i++) {
+ struct drm_plane *plane = state->planes[i];
+ struct drm_plane_state *new_state = state->plane_states[i];
+
+ if (!plane)
+ continue;
+
+ if ((plane->state->fb != new_state->fb) && new_state->fb) {
+ struct drm_gem_cma_object *cma_bo =
+ drm_fb_cma_get_gem_obj(new_state->fb, 0);
+ struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+
+ wait_seqno = max(bo->seqno, wait_seqno);
+ }
+ }
+
+ /*
+ * This is the point of no return - everything below never fails except
+ * when the hw goes bonghits. Which means we can commit the new state on
+ * the software side now.
+ */
+
+ drm_atomic_helper_swap_state(dev, state);
+
+ /*
+ * Everything below can be run asynchronously without the need to grab
+ * any modeset locks at all under one condition: It must be guaranteed
+ * that the asynchronous work has either been cancelled (if the driver
+ * supports it, which at least requires that the framebuffers get
+ * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
+ * before the new state gets committed on the software side with
+ * drm_atomic_helper_swap_state().
+ *
+ * This scheme allows new atomic state updates to be prepared and
+ * checked in parallel to the asynchronous completion of the previous
+ * update. Which is important since compositors need to figure out the
+ * composition of the next frame right after having submitted the
+ * current layout.
+ */
+
+ if (async) {
+ vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
+ vc4_atomic_complete_commit_seqno_cb);
+ } else {
+ vc4_wait_for_seqno(dev, wait_seqno, ~0ull, false);
+ vc4_atomic_complete_commit(c);
+ }
+
+ return 0;
+}
+
static const struct drm_mode_config_funcs vc4_mode_funcs = {
.output_poll_changed = vc4_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
+ .atomic_commit = vc4_atomic_commit,
.fb_create = drm_fb_cma_create,
};
@@ -41,6 +184,8 @@ int vc4_kms_load(struct drm_device *dev)
struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret;
+ sema_init(&vc4->async_modeset, 1);
+
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret < 0) {
dev_err(dev->dev, "failed to initialize vblank\n");
@@ -51,6 +196,8 @@ int vc4_kms_load(struct drm_device *dev)
dev->mode_config.max_height = 2048;
dev->mode_config.funcs = &vc4_mode_funcs;
dev->mode_config.preferred_depth = 24;
+ dev->mode_config.async_page_flip = true;
+
dev->vblank_disable_allowed = true;
drm_mode_config_reset(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_packet.h b/drivers/gpu/drm/vc4/vc4_packet.h
new file mode 100644
index 000000000000..0f31cc06500f
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_packet.h
@@ -0,0 +1,399 @@
+/*
+ * Copyright © 2014 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VC4_PACKET_H
+#define VC4_PACKET_H
+
+#include "vc4_regs.h" /* for VC4_MASK, VC4_GET_FIELD, VC4_SET_FIELD */
+
+enum vc4_packet {
+ VC4_PACKET_HALT = 0,
+ VC4_PACKET_NOP = 1,
+
+ VC4_PACKET_FLUSH = 4,
+ VC4_PACKET_FLUSH_ALL = 5,
+ VC4_PACKET_START_TILE_BINNING = 6,
+ VC4_PACKET_INCREMENT_SEMAPHORE = 7,
+ VC4_PACKET_WAIT_ON_SEMAPHORE = 8,
+
+ VC4_PACKET_BRANCH = 16,
+ VC4_PACKET_BRANCH_TO_SUB_LIST = 17,
+
+ VC4_PACKET_STORE_MS_TILE_BUFFER = 24,
+ VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF = 25,
+ VC4_PACKET_STORE_FULL_RES_TILE_BUFFER = 26,
+ VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER = 27,
+ VC4_PACKET_STORE_TILE_BUFFER_GENERAL = 28,
+ VC4_PACKET_LOAD_TILE_BUFFER_GENERAL = 29,
+
+ VC4_PACKET_GL_INDEXED_PRIMITIVE = 32,
+ VC4_PACKET_GL_ARRAY_PRIMITIVE = 33,
+
+ VC4_PACKET_COMPRESSED_PRIMITIVE = 48,
+ VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE = 49,
+
+ VC4_PACKET_PRIMITIVE_LIST_FORMAT = 56,
+
+ VC4_PACKET_GL_SHADER_STATE = 64,
+ VC4_PACKET_NV_SHADER_STATE = 65,
+ VC4_PACKET_VG_SHADER_STATE = 66,
+
+ VC4_PACKET_CONFIGURATION_BITS = 96,
+ VC4_PACKET_FLAT_SHADE_FLAGS = 97,
+ VC4_PACKET_POINT_SIZE = 98,
+ VC4_PACKET_LINE_WIDTH = 99,
+ VC4_PACKET_RHT_X_BOUNDARY = 100,
+ VC4_PACKET_DEPTH_OFFSET = 101,
+ VC4_PACKET_CLIP_WINDOW = 102,
+ VC4_PACKET_VIEWPORT_OFFSET = 103,
+ VC4_PACKET_Z_CLIPPING = 104,
+ VC4_PACKET_CLIPPER_XY_SCALING = 105,
+ VC4_PACKET_CLIPPER_Z_SCALING = 106,
+
+ VC4_PACKET_TILE_BINNING_MODE_CONFIG = 112,
+ VC4_PACKET_TILE_RENDERING_MODE_CONFIG = 113,
+ VC4_PACKET_CLEAR_COLORS = 114,
+ VC4_PACKET_TILE_COORDINATES = 115,
+
+ /* Not an actual hardware packet -- this is what we use to put
+ * references to GEM bos in the command stream, since we need the u32
+ * int the actual address packet in order to store the offset from the
+ * start of the BO.
+ */
+ VC4_PACKET_GEM_HANDLES = 254,
+} __attribute__ ((__packed__));
+
+#define VC4_PACKET_HALT_SIZE 1
+#define VC4_PACKET_NOP_SIZE 1
+#define VC4_PACKET_FLUSH_SIZE 1
+#define VC4_PACKET_FLUSH_ALL_SIZE 1
+#define VC4_PACKET_START_TILE_BINNING_SIZE 1
+#define VC4_PACKET_INCREMENT_SEMAPHORE_SIZE 1
+#define VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE 1
+#define VC4_PACKET_BRANCH_SIZE 5
+#define VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE 5
+#define VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE 1
+#define VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF_SIZE 1
+#define VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE 5
+#define VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE 5
+#define VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE 7
+#define VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE 7
+#define VC4_PACKET_GL_INDEXED_PRIMITIVE_SIZE 14
+#define VC4_PACKET_GL_ARRAY_PRIMITIVE_SIZE 10
+#define VC4_PACKET_COMPRESSED_PRIMITIVE_SIZE 1
+#define VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE_SIZE 1
+#define VC4_PACKET_PRIMITIVE_LIST_FORMAT_SIZE 2
+#define VC4_PACKET_GL_SHADER_STATE_SIZE 5
+#define VC4_PACKET_NV_SHADER_STATE_SIZE 5
+#define VC4_PACKET_VG_SHADER_STATE_SIZE 5
+#define VC4_PACKET_CONFIGURATION_BITS_SIZE 4
+#define VC4_PACKET_FLAT_SHADE_FLAGS_SIZE 5
+#define VC4_PACKET_POINT_SIZE_SIZE 5
+#define VC4_PACKET_LINE_WIDTH_SIZE 5
+#define VC4_PACKET_RHT_X_BOUNDARY_SIZE 3
+#define VC4_PACKET_DEPTH_OFFSET_SIZE 5
+#define VC4_PACKET_CLIP_WINDOW_SIZE 9
+#define VC4_PACKET_VIEWPORT_OFFSET_SIZE 5
+#define VC4_PACKET_Z_CLIPPING_SIZE 9
+#define VC4_PACKET_CLIPPER_XY_SCALING_SIZE 9
+#define VC4_PACKET_CLIPPER_Z_SCALING_SIZE 9
+#define VC4_PACKET_TILE_BINNING_MODE_CONFIG_SIZE 16
+#define VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE 11
+#define VC4_PACKET_CLEAR_COLORS_SIZE 14
+#define VC4_PACKET_TILE_COORDINATES_SIZE 3
+#define VC4_PACKET_GEM_HANDLES_SIZE 9
+
+/* Number of multisamples supported. */
+#define VC4_MAX_SAMPLES 4
+/* Size of a full resolution color or Z tile buffer load/store. */
+#define VC4_TILE_BUFFER_SIZE (64 * 64 * 4)
+
+/** @{
+ * Bits used by packets like VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
+ * VC4_PACKET_TILE_RENDERING_MODE_CONFIG.
+*/
+#define VC4_TILING_FORMAT_LINEAR 0
+#define VC4_TILING_FORMAT_T 1
+#define VC4_TILING_FORMAT_LT 2
+/** @} */
+
+/** @{
+ *
+ * low bits of VC4_PACKET_STORE_FULL_RES_TILE_BUFFER and
+ * VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER.
+ */
+#define VC4_LOADSTORE_FULL_RES_EOF BIT(3)
+#define VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL BIT(2)
+#define VC4_LOADSTORE_FULL_RES_DISABLE_ZS BIT(1)
+#define VC4_LOADSTORE_FULL_RES_DISABLE_COLOR BIT(0)
+
+/** @{
+ *
+ * low bits of VC4_PACKET_STORE_FULL_RES_TILE_BUFFER and
+ * VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER.
+ */
+#define VC4_LOADSTORE_FULL_RES_EOF BIT(3)
+#define VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL BIT(2)
+#define VC4_LOADSTORE_FULL_RES_DISABLE_ZS BIT(1)
+#define VC4_LOADSTORE_FULL_RES_DISABLE_COLOR BIT(0)
+
+/** @{
+ *
+ * byte 2 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
+ * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL (low bits of the address)
+ */
+
+#define VC4_LOADSTORE_TILE_BUFFER_EOF BIT(3)
+#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_VG_MASK BIT(2)
+#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_ZS BIT(1)
+#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_COLOR BIT(0)
+
+/** @} */
+
+/** @{
+ *
+ * byte 0-1 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
+ * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
+ */
+#define VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR BIT(15)
+#define VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR BIT(14)
+#define VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR BIT(13)
+#define VC4_STORE_TILE_BUFFER_DISABLE_SWAP BIT(12)
+
+#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK VC4_MASK(9, 8)
+#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT 8
+#define VC4_LOADSTORE_TILE_BUFFER_RGBA8888 0
+#define VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER 1
+#define VC4_LOADSTORE_TILE_BUFFER_BGR565 2
+/** @} */
+
+/** @{
+ *
+ * byte 0 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
+ * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
+ */
+#define VC4_STORE_TILE_BUFFER_MODE_MASK VC4_MASK(7, 6)
+#define VC4_STORE_TILE_BUFFER_MODE_SHIFT 6
+#define VC4_STORE_TILE_BUFFER_MODE_SAMPLE0 (0 << 6)
+#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X4 (1 << 6)
+#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X16 (2 << 6)
+
+/** The values of the field are VC4_TILING_FORMAT_* */
+#define VC4_LOADSTORE_TILE_BUFFER_TILING_MASK VC4_MASK(5, 4)
+#define VC4_LOADSTORE_TILE_BUFFER_TILING_SHIFT 4
+
+#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK VC4_MASK(2, 0)
+#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_SHIFT 0
+#define VC4_LOADSTORE_TILE_BUFFER_NONE 0
+#define VC4_LOADSTORE_TILE_BUFFER_COLOR 1
+#define VC4_LOADSTORE_TILE_BUFFER_ZS 2
+#define VC4_LOADSTORE_TILE_BUFFER_Z 3
+#define VC4_LOADSTORE_TILE_BUFFER_VG_MASK 4
+#define VC4_LOADSTORE_TILE_BUFFER_FULL 5
+/** @} */
+
+#define VC4_INDEX_BUFFER_U8 (0 << 4)
+#define VC4_INDEX_BUFFER_U16 (1 << 4)
+
+/* This flag is only present in NV shader state. */
+#define VC4_SHADER_FLAG_SHADED_CLIP_COORDS BIT(3)
+#define VC4_SHADER_FLAG_ENABLE_CLIPPING BIT(2)
+#define VC4_SHADER_FLAG_VS_POINT_SIZE BIT(1)
+#define VC4_SHADER_FLAG_FS_SINGLE_THREAD BIT(0)
+
+/** @{ byte 2 of config bits. */
+#define VC4_CONFIG_BITS_EARLY_Z_UPDATE BIT(1)
+#define VC4_CONFIG_BITS_EARLY_Z BIT(0)
+/** @} */
+
+/** @{ byte 1 of config bits. */
+#define VC4_CONFIG_BITS_Z_UPDATE BIT(7)
+/** same values in this 3-bit field as PIPE_FUNC_* */
+#define VC4_CONFIG_BITS_DEPTH_FUNC_SHIFT 4
+#define VC4_CONFIG_BITS_COVERAGE_READ_LEAVE BIT(3)
+
+#define VC4_CONFIG_BITS_COVERAGE_UPDATE_NONZERO (0 << 1)
+#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ODD (1 << 1)
+#define VC4_CONFIG_BITS_COVERAGE_UPDATE_OR (2 << 1)
+#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ZERO (3 << 1)
+
+#define VC4_CONFIG_BITS_COVERAGE_PIPE_SELECT BIT(0)
+/** @} */
+
+/** @{ byte 0 of config bits. */
+#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_NONE (0 << 6)
+#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_4X (1 << 6)
+#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_16X (2 << 6)
+
+#define VC4_CONFIG_BITS_AA_POINTS_AND_LINES BIT(4)
+#define VC4_CONFIG_BITS_ENABLE_DEPTH_OFFSET BIT(3)
+#define VC4_CONFIG_BITS_CW_PRIMITIVES BIT(2)
+#define VC4_CONFIG_BITS_ENABLE_PRIM_BACK BIT(1)
+#define VC4_CONFIG_BITS_ENABLE_PRIM_FRONT BIT(0)
+/** @} */
+
+/** @{ bits in the last u8 of VC4_PACKET_TILE_BINNING_MODE_CONFIG */
+#define VC4_BIN_CONFIG_DB_NON_MS BIT(7)
+
+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK VC4_MASK(6, 5)
+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_SHIFT 5
+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_32 0
+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_64 1
+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128 2
+#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_256 3
+
+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK VC4_MASK(4, 3)
+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_SHIFT 3
+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32 0
+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_64 1
+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_128 2
+#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_256 3
+
+#define VC4_BIN_CONFIG_AUTO_INIT_TSDA BIT(2)
+#define VC4_BIN_CONFIG_TILE_BUFFER_64BIT BIT(1)
+#define VC4_BIN_CONFIG_MS_MODE_4X BIT(0)
+/** @} */
+
+/** @{ bits in the last u16 of VC4_PACKET_TILE_RENDERING_MODE_CONFIG */
+#define VC4_RENDER_CONFIG_DB_NON_MS BIT(12)
+#define VC4_RENDER_CONFIG_EARLY_Z_COVERAGE_DISABLE BIT(11)
+#define VC4_RENDER_CONFIG_EARLY_Z_DIRECTION_G BIT(10)
+#define VC4_RENDER_CONFIG_COVERAGE_MODE BIT(9)
+#define VC4_RENDER_CONFIG_ENABLE_VG_MASK BIT(8)
+
+/** The values of the field are VC4_TILING_FORMAT_* */
+#define VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK VC4_MASK(7, 6)
+#define VC4_RENDER_CONFIG_MEMORY_FORMAT_SHIFT 6
+
+#define VC4_RENDER_CONFIG_DECIMATE_MODE_1X (0 << 4)
+#define VC4_RENDER_CONFIG_DECIMATE_MODE_4X (1 << 4)
+#define VC4_RENDER_CONFIG_DECIMATE_MODE_16X (2 << 4)
+
+#define VC4_RENDER_CONFIG_FORMAT_MASK VC4_MASK(3, 2)
+#define VC4_RENDER_CONFIG_FORMAT_SHIFT 2
+#define VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED 0
+#define VC4_RENDER_CONFIG_FORMAT_RGBA8888 1
+#define VC4_RENDER_CONFIG_FORMAT_BGR565 2
+
+#define VC4_RENDER_CONFIG_TILE_BUFFER_64BIT BIT(1)
+#define VC4_RENDER_CONFIG_MS_MODE_4X BIT(0)
+
+#define VC4_PRIMITIVE_LIST_FORMAT_16_INDEX (1 << 4)
+#define VC4_PRIMITIVE_LIST_FORMAT_32_XY (3 << 4)
+#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_POINTS (0 << 0)
+#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_LINES (1 << 0)
+#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_TRIANGLES (2 << 0)
+#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_RHT (3 << 0)
+
+enum vc4_texture_data_type {
+ VC4_TEXTURE_TYPE_RGBA8888 = 0,
+ VC4_TEXTURE_TYPE_RGBX8888 = 1,
+ VC4_TEXTURE_TYPE_RGBA4444 = 2,
+ VC4_TEXTURE_TYPE_RGBA5551 = 3,
+ VC4_TEXTURE_TYPE_RGB565 = 4,
+ VC4_TEXTURE_TYPE_LUMINANCE = 5,
+ VC4_TEXTURE_TYPE_ALPHA = 6,
+ VC4_TEXTURE_TYPE_LUMALPHA = 7,
+ VC4_TEXTURE_TYPE_ETC1 = 8,
+ VC4_TEXTURE_TYPE_S16F = 9,
+ VC4_TEXTURE_TYPE_S8 = 10,
+ VC4_TEXTURE_TYPE_S16 = 11,
+ VC4_TEXTURE_TYPE_BW1 = 12,
+ VC4_TEXTURE_TYPE_A4 = 13,
+ VC4_TEXTURE_TYPE_A1 = 14,
+ VC4_TEXTURE_TYPE_RGBA64 = 15,
+ VC4_TEXTURE_TYPE_RGBA32R = 16,
+ VC4_TEXTURE_TYPE_YUV422R = 17,
+};
+
+#define VC4_TEX_P0_OFFSET_MASK VC4_MASK(31, 12)
+#define VC4_TEX_P0_OFFSET_SHIFT 12
+#define VC4_TEX_P0_CSWIZ_MASK VC4_MASK(11, 10)
+#define VC4_TEX_P0_CSWIZ_SHIFT 10
+#define VC4_TEX_P0_CMMODE_MASK VC4_MASK(9, 9)
+#define VC4_TEX_P0_CMMODE_SHIFT 9
+#define VC4_TEX_P0_FLIPY_MASK VC4_MASK(8, 8)
+#define VC4_TEX_P0_FLIPY_SHIFT 8
+#define VC4_TEX_P0_TYPE_MASK VC4_MASK(7, 4)
+#define VC4_TEX_P0_TYPE_SHIFT 4
+#define VC4_TEX_P0_MIPLVLS_MASK VC4_MASK(3, 0)
+#define VC4_TEX_P0_MIPLVLS_SHIFT 0
+
+#define VC4_TEX_P1_TYPE4_MASK VC4_MASK(31, 31)
+#define VC4_TEX_P1_TYPE4_SHIFT 31
+#define VC4_TEX_P1_HEIGHT_MASK VC4_MASK(30, 20)
+#define VC4_TEX_P1_HEIGHT_SHIFT 20
+#define VC4_TEX_P1_ETCFLIP_MASK VC4_MASK(19, 19)
+#define VC4_TEX_P1_ETCFLIP_SHIFT 19
+#define VC4_TEX_P1_WIDTH_MASK VC4_MASK(18, 8)
+#define VC4_TEX_P1_WIDTH_SHIFT 8
+
+#define VC4_TEX_P1_MAGFILT_MASK VC4_MASK(7, 7)
+#define VC4_TEX_P1_MAGFILT_SHIFT 7
+# define VC4_TEX_P1_MAGFILT_LINEAR 0
+# define VC4_TEX_P1_MAGFILT_NEAREST 1
+
+#define VC4_TEX_P1_MINFILT_MASK VC4_MASK(6, 4)
+#define VC4_TEX_P1_MINFILT_SHIFT 4
+# define VC4_TEX_P1_MINFILT_LINEAR 0
+# define VC4_TEX_P1_MINFILT_NEAREST 1
+# define VC4_TEX_P1_MINFILT_NEAR_MIP_NEAR 2
+# define VC4_TEX_P1_MINFILT_NEAR_MIP_LIN 3
+# define VC4_TEX_P1_MINFILT_LIN_MIP_NEAR 4
+# define VC4_TEX_P1_MINFILT_LIN_MIP_LIN 5
+
+#define VC4_TEX_P1_WRAP_T_MASK VC4_MASK(3, 2)
+#define VC4_TEX_P1_WRAP_T_SHIFT 2
+#define VC4_TEX_P1_WRAP_S_MASK VC4_MASK(1, 0)
+#define VC4_TEX_P1_WRAP_S_SHIFT 0
+# define VC4_TEX_P1_WRAP_REPEAT 0
+# define VC4_TEX_P1_WRAP_CLAMP 1
+# define VC4_TEX_P1_WRAP_MIRROR 2
+# define VC4_TEX_P1_WRAP_BORDER 3
+
+#define VC4_TEX_P2_PTYPE_MASK VC4_MASK(31, 30)
+#define VC4_TEX_P2_PTYPE_SHIFT 30
+# define VC4_TEX_P2_PTYPE_IGNORED 0
+# define VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE 1
+# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS 2
+# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS 3
+
+/* VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE bits */
+#define VC4_TEX_P2_CMST_MASK VC4_MASK(29, 12)
+#define VC4_TEX_P2_CMST_SHIFT 12
+#define VC4_TEX_P2_BSLOD_MASK VC4_MASK(0, 0)
+#define VC4_TEX_P2_BSLOD_SHIFT 0
+
+/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS */
+#define VC4_TEX_P2_CHEIGHT_MASK VC4_MASK(22, 12)
+#define VC4_TEX_P2_CHEIGHT_SHIFT 12
+#define VC4_TEX_P2_CWIDTH_MASK VC4_MASK(10, 0)
+#define VC4_TEX_P2_CWIDTH_SHIFT 0
+
+/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS */
+#define VC4_TEX_P2_CYOFF_MASK VC4_MASK(22, 12)
+#define VC4_TEX_P2_CYOFF_SHIFT 12
+#define VC4_TEX_P2_CXOFF_MASK VC4_MASK(10, 0)
+#define VC4_TEX_P2_CXOFF_SHIFT 0
+
+#endif /* VC4_PACKET_H */
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 887f3caad0be..0addbad15832 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -29,6 +29,14 @@ struct vc4_plane_state {
u32 *dlist;
u32 dlist_size; /* Number of dwords in allocated for the display list */
u32 dlist_count; /* Number of used dwords in the display list. */
+
+ /* Offset in the dlist to pointer word 0. */
+ u32 pw0_offset;
+
+ /* Offset where the plane's dlist was last stored in the
+ hardware at vc4_crtc_atomic_flush() time.
+ */
+ u32 *hw_dlist;
};
static inline struct vc4_plane_state *
@@ -207,6 +215,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
/* Position Word 3: Context. Written by the HVS. */
vc4_dlist_write(vc4_state, 0xc0c0c0c0);
+ vc4_state->pw0_offset = vc4_state->dlist_count;
+
/* Pointer Word 0: RGB / Y Pointer */
vc4_dlist_write(vc4_state, bo->paddr + offset);
@@ -258,6 +268,8 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
int i;
+ vc4_state->hw_dlist = dlist;
+
/* Can't memcpy_toio() because it needs to be 32-bit writes. */
for (i = 0; i < vc4_state->dlist_count; i++)
writel(vc4_state->dlist[i], &dlist[i]);
@@ -272,6 +284,34 @@ u32 vc4_plane_dlist_size(struct drm_plane_state *state)
return vc4_state->dlist_count;
}
+/* Updates the plane to immediately (well, once the FIFO needs
+ * refilling) scan out from at a new framebuffer.
+ */
+void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
+{
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
+ struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
+ uint32_t addr;
+
+ /* We're skipping the address adjustment for negative origin,
+ * because this is only called on the primary plane.
+ */
+ WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
+ addr = bo->paddr + fb->offsets[0];
+
+ /* Write the new address into the hardware immediately. The
+ * scanout will start from this address as soon as the FIFO
+ * needs to refill with pixels.
+ */
+ writel(addr, &vc4_state->hw_dlist[vc4_state->pw0_offset]);
+
+ /* Also update the CPU-side dlist copy, so that any later
+ * atomic updates that don't do a new modeset on our plane
+ * also use our updated address.
+ */
+ vc4_state->dlist[vc4_state->pw0_offset] = addr;
+}
+
static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
.prepare_fb = NULL,
.cleanup_fb = NULL,
@@ -317,7 +357,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
ret = drm_universal_plane_init(dev, plane, 0xff,
&vc4_plane_funcs,
formats, ARRAY_SIZE(formats),
- type);
+ type, NULL);
drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
diff --git a/drivers/gpu/drm/vc4/vc4_qpu_defines.h b/drivers/gpu/drm/vc4/vc4_qpu_defines.h
new file mode 100644
index 000000000000..d5c2f3c85ebb
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_qpu_defines.h
@@ -0,0 +1,264 @@
+/*
+ * Copyright © 2014 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef VC4_QPU_DEFINES_H
+#define VC4_QPU_DEFINES_H
+
+enum qpu_op_add {
+ QPU_A_NOP,
+ QPU_A_FADD,
+ QPU_A_FSUB,
+ QPU_A_FMIN,
+ QPU_A_FMAX,
+ QPU_A_FMINABS,
+ QPU_A_FMAXABS,
+ QPU_A_FTOI,
+ QPU_A_ITOF,
+ QPU_A_ADD = 12,
+ QPU_A_SUB,
+ QPU_A_SHR,
+ QPU_A_ASR,
+ QPU_A_ROR,
+ QPU_A_SHL,
+ QPU_A_MIN,
+ QPU_A_MAX,
+ QPU_A_AND,
+ QPU_A_OR,
+ QPU_A_XOR,
+ QPU_A_NOT,
+ QPU_A_CLZ,
+ QPU_A_V8ADDS = 30,
+ QPU_A_V8SUBS = 31,
+};
+
+enum qpu_op_mul {
+ QPU_M_NOP,
+ QPU_M_FMUL,
+ QPU_M_MUL24,
+ QPU_M_V8MULD,
+ QPU_M_V8MIN,
+ QPU_M_V8MAX,
+ QPU_M_V8ADDS,
+ QPU_M_V8SUBS,
+};
+
+enum qpu_raddr {
+ QPU_R_FRAG_PAYLOAD_ZW = 15, /* W for A file, Z for B file */
+ /* 0-31 are the plain regfile a or b fields */
+ QPU_R_UNIF = 32,
+ QPU_R_VARY = 35,
+ QPU_R_ELEM_QPU = 38,
+ QPU_R_NOP,
+ QPU_R_XY_PIXEL_COORD = 41,
+ QPU_R_MS_REV_FLAGS = 41,
+ QPU_R_VPM = 48,
+ QPU_R_VPM_LD_BUSY,
+ QPU_R_VPM_LD_WAIT,
+ QPU_R_MUTEX_ACQUIRE,
+};
+
+enum qpu_waddr {
+ /* 0-31 are the plain regfile a or b fields */
+ QPU_W_ACC0 = 32, /* aka r0 */
+ QPU_W_ACC1,
+ QPU_W_ACC2,
+ QPU_W_ACC3,
+ QPU_W_TMU_NOSWAP,
+ QPU_W_ACC5,
+ QPU_W_HOST_INT,
+ QPU_W_NOP,
+ QPU_W_UNIFORMS_ADDRESS,
+ QPU_W_QUAD_XY, /* X for regfile a, Y for regfile b */
+ QPU_W_MS_FLAGS = 42,
+ QPU_W_REV_FLAG = 42,
+ QPU_W_TLB_STENCIL_SETUP = 43,
+ QPU_W_TLB_Z,
+ QPU_W_TLB_COLOR_MS,
+ QPU_W_TLB_COLOR_ALL,
+ QPU_W_TLB_ALPHA_MASK,
+ QPU_W_VPM,
+ QPU_W_VPMVCD_SETUP, /* LD for regfile a, ST for regfile b */
+ QPU_W_VPM_ADDR, /* LD for regfile a, ST for regfile b */
+ QPU_W_MUTEX_RELEASE,
+ QPU_W_SFU_RECIP,
+ QPU_W_SFU_RECIPSQRT,
+ QPU_W_SFU_EXP,
+ QPU_W_SFU_LOG,
+ QPU_W_TMU0_S,
+ QPU_W_TMU0_T,
+ QPU_W_TMU0_R,
+ QPU_W_TMU0_B,
+ QPU_W_TMU1_S,
+ QPU_W_TMU1_T,
+ QPU_W_TMU1_R,
+ QPU_W_TMU1_B,
+};
+
+enum qpu_sig_bits {
+ QPU_SIG_SW_BREAKPOINT,
+ QPU_SIG_NONE,
+ QPU_SIG_THREAD_SWITCH,
+ QPU_SIG_PROG_END,
+ QPU_SIG_WAIT_FOR_SCOREBOARD,
+ QPU_SIG_SCOREBOARD_UNLOCK,
+ QPU_SIG_LAST_THREAD_SWITCH,
+ QPU_SIG_COVERAGE_LOAD,
+ QPU_SIG_COLOR_LOAD,
+ QPU_SIG_COLOR_LOAD_END,
+ QPU_SIG_LOAD_TMU0,
+ QPU_SIG_LOAD_TMU1,
+ QPU_SIG_ALPHA_MASK_LOAD,
+ QPU_SIG_SMALL_IMM,
+ QPU_SIG_LOAD_IMM,
+ QPU_SIG_BRANCH
+};
+
+enum qpu_mux {
+ /* hardware mux values */
+ QPU_MUX_R0,
+ QPU_MUX_R1,
+ QPU_MUX_R2,
+ QPU_MUX_R3,
+ QPU_MUX_R4,
+ QPU_MUX_R5,
+ QPU_MUX_A,
+ QPU_MUX_B,
+
+ /* non-hardware mux values */
+ QPU_MUX_IMM,
+};
+
+enum qpu_cond {
+ QPU_COND_NEVER,
+ QPU_COND_ALWAYS,
+ QPU_COND_ZS,
+ QPU_COND_ZC,
+ QPU_COND_NS,
+ QPU_COND_NC,
+ QPU_COND_CS,
+ QPU_COND_CC,
+};
+
+enum qpu_pack_mul {
+ QPU_PACK_MUL_NOP,
+ /* replicated to each 8 bits of the 32-bit dst. */
+ QPU_PACK_MUL_8888 = 3,
+ QPU_PACK_MUL_8A,
+ QPU_PACK_MUL_8B,
+ QPU_PACK_MUL_8C,
+ QPU_PACK_MUL_8D,
+};
+
+enum qpu_pack_a {
+ QPU_PACK_A_NOP,
+ /* convert to 16 bit float if float input, or to int16. */
+ QPU_PACK_A_16A,
+ QPU_PACK_A_16B,
+ /* replicated to each 8 bits of the 32-bit dst. */
+ QPU_PACK_A_8888,
+ /* Convert to 8-bit unsigned int. */
+ QPU_PACK_A_8A,
+ QPU_PACK_A_8B,
+ QPU_PACK_A_8C,
+ QPU_PACK_A_8D,
+
+ /* Saturating variants of the previous instructions. */
+ QPU_PACK_A_32_SAT, /* int-only */
+ QPU_PACK_A_16A_SAT, /* int or float */
+ QPU_PACK_A_16B_SAT,
+ QPU_PACK_A_8888_SAT,
+ QPU_PACK_A_8A_SAT,
+ QPU_PACK_A_8B_SAT,
+ QPU_PACK_A_8C_SAT,
+ QPU_PACK_A_8D_SAT,
+};
+
+enum qpu_unpack_r4 {
+ QPU_UNPACK_R4_NOP,
+ QPU_UNPACK_R4_F16A_TO_F32,
+ QPU_UNPACK_R4_F16B_TO_F32,
+ QPU_UNPACK_R4_8D_REP,
+ QPU_UNPACK_R4_8A,
+ QPU_UNPACK_R4_8B,
+ QPU_UNPACK_R4_8C,
+ QPU_UNPACK_R4_8D,
+};
+
+#define QPU_MASK(high, low) \
+ ((((uint64_t)1 << ((high) - (low) + 1)) - 1) << (low))
+
+#define QPU_GET_FIELD(word, field) \
+ ((uint32_t)(((word) & field ## _MASK) >> field ## _SHIFT))
+
+#define QPU_SIG_SHIFT 60
+#define QPU_SIG_MASK QPU_MASK(63, 60)
+
+#define QPU_UNPACK_SHIFT 57
+#define QPU_UNPACK_MASK QPU_MASK(59, 57)
+
+/**
+ * If set, the pack field means PACK_MUL or R4 packing, instead of normal
+ * regfile a packing.
+ */
+#define QPU_PM ((uint64_t)1 << 56)
+
+#define QPU_PACK_SHIFT 52
+#define QPU_PACK_MASK QPU_MASK(55, 52)
+
+#define QPU_COND_ADD_SHIFT 49
+#define QPU_COND_ADD_MASK QPU_MASK(51, 49)
+#define QPU_COND_MUL_SHIFT 46
+#define QPU_COND_MUL_MASK QPU_MASK(48, 46)
+
+#define QPU_SF ((uint64_t)1 << 45)
+
+#define QPU_WADDR_ADD_SHIFT 38
+#define QPU_WADDR_ADD_MASK QPU_MASK(43, 38)
+#define QPU_WADDR_MUL_SHIFT 32
+#define QPU_WADDR_MUL_MASK QPU_MASK(37, 32)
+
+#define QPU_OP_MUL_SHIFT 29
+#define QPU_OP_MUL_MASK QPU_MASK(31, 29)
+
+#define QPU_RADDR_A_SHIFT 18
+#define QPU_RADDR_A_MASK QPU_MASK(23, 18)
+#define QPU_RADDR_B_SHIFT 12
+#define QPU_RADDR_B_MASK QPU_MASK(17, 12)
+#define QPU_SMALL_IMM_SHIFT 12
+#define QPU_SMALL_IMM_MASK QPU_MASK(17, 12)
+
+#define QPU_ADD_A_SHIFT 9
+#define QPU_ADD_A_MASK QPU_MASK(11, 9)
+#define QPU_ADD_B_SHIFT 6
+#define QPU_ADD_B_MASK QPU_MASK(8, 6)
+#define QPU_MUL_A_SHIFT 3
+#define QPU_MUL_A_MASK QPU_MASK(5, 3)
+#define QPU_MUL_B_SHIFT 0
+#define QPU_MUL_B_MASK QPU_MASK(2, 0)
+
+#define QPU_WS ((uint64_t)1 << 44)
+
+#define QPU_OP_ADD_SHIFT 24
+#define QPU_OP_ADD_MASK QPU_MASK(28, 24)
+
+#endif /* VC4_QPU_DEFINES_H */
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 9e4e904c668e..4e52a0a88551 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -154,7 +154,7 @@
#define V3D_PCTRS14 0x006f4
#define V3D_PCTR15 0x006f8
#define V3D_PCTRS15 0x006fc
-#define V3D_BGE 0x00f00
+#define V3D_DBGE 0x00f00
#define V3D_FDBGO 0x00f04
#define V3D_FDBGB 0x00f08
#define V3D_FDBGR 0x00f0c
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
new file mode 100644
index 000000000000..8a2a312e2c1b
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -0,0 +1,634 @@
+/*
+ * Copyright © 2014-2015 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: Render command list generation
+ *
+ * In the VC4 driver, render command list generation is performed by the
+ * kernel instead of userspace. We do this because validating a
+ * user-submitted command list is hard to get right and has high CPU overhead,
+ * while the number of valid configurations for render command lists is
+ * actually fairly low.
+ */
+
+#include "uapi/drm/vc4_drm.h"
+#include "vc4_drv.h"
+#include "vc4_packet.h"
+
+struct vc4_rcl_setup {
+ struct drm_gem_cma_object *color_read;
+ struct drm_gem_cma_object *color_write;
+ struct drm_gem_cma_object *zs_read;
+ struct drm_gem_cma_object *zs_write;
+ struct drm_gem_cma_object *msaa_color_write;
+ struct drm_gem_cma_object *msaa_zs_write;
+
+ struct drm_gem_cma_object *rcl;
+ u32 next_offset;
+};
+
+static inline void rcl_u8(struct vc4_rcl_setup *setup, u8 val)
+{
+ *(u8 *)(setup->rcl->vaddr + setup->next_offset) = val;
+ setup->next_offset += 1;
+}
+
+static inline void rcl_u16(struct vc4_rcl_setup *setup, u16 val)
+{
+ *(u16 *)(setup->rcl->vaddr + setup->next_offset) = val;
+ setup->next_offset += 2;
+}
+
+static inline void rcl_u32(struct vc4_rcl_setup *setup, u32 val)
+{
+ *(u32 *)(setup->rcl->vaddr + setup->next_offset) = val;
+ setup->next_offset += 4;
+}
+
+/*
+ * Emits a no-op STORE_TILE_BUFFER_GENERAL.
+ *
+ * If we emit a PACKET_TILE_COORDINATES, it must be followed by a store of
+ * some sort before another load is triggered.
+ */
+static void vc4_store_before_load(struct vc4_rcl_setup *setup)
+{
+ rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
+ rcl_u16(setup,
+ VC4_SET_FIELD(VC4_LOADSTORE_TILE_BUFFER_NONE,
+ VC4_LOADSTORE_TILE_BUFFER_BUFFER) |
+ VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR |
+ VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR |
+ VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR);
+ rcl_u32(setup, 0); /* no address, since we're in None mode */
+}
+
+/*
+ * Calculates the physical address of the start of a tile in a RCL surface.
+ *
+ * Unlike the other load/store packets,
+ * VC4_PACKET_LOAD/STORE_FULL_RES_TILE_BUFFER don't look at the tile
+ * coordinates packet, and instead just store to the address given.
+ */
+static uint32_t vc4_full_res_offset(struct vc4_exec_info *exec,
+ struct drm_gem_cma_object *bo,
+ struct drm_vc4_submit_rcl_surface *surf,
+ uint8_t x, uint8_t y)
+{
+ return bo->paddr + surf->offset + VC4_TILE_BUFFER_SIZE *
+ (DIV_ROUND_UP(exec->args->width, 32) * y + x);
+}
+
+/*
+ * Emits a PACKET_TILE_COORDINATES if one isn't already pending.
+ *
+ * The tile coordinates packet triggers a pending load if there is one, are
+ * used for clipping during rendering, and determine where loads/stores happen
+ * relative to their base address.
+ */
+static void vc4_tile_coordinates(struct vc4_rcl_setup *setup,
+ uint32_t x, uint32_t y)
+{
+ rcl_u8(setup, VC4_PACKET_TILE_COORDINATES);
+ rcl_u8(setup, x);
+ rcl_u8(setup, y);
+}
+
+static void emit_tile(struct vc4_exec_info *exec,
+ struct vc4_rcl_setup *setup,
+ uint8_t x, uint8_t y, bool first, bool last)
+{
+ struct drm_vc4_submit_cl *args = exec->args;
+ bool has_bin = args->bin_cl_size != 0;
+
+ /* Note that the load doesn't actually occur until the
+ * tile coords packet is processed, and only one load
+ * may be outstanding at a time.
+ */
+ if (setup->color_read) {
+ if (args->color_read.flags &
+ VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
+ rcl_u8(setup, VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER);
+ rcl_u32(setup,
+ vc4_full_res_offset(exec, setup->color_read,
+ &args->color_read, x, y) |
+ VC4_LOADSTORE_FULL_RES_DISABLE_ZS);
+ } else {
+ rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
+ rcl_u16(setup, args->color_read.bits);
+ rcl_u32(setup, setup->color_read->paddr +
+ args->color_read.offset);
+ }
+ }
+
+ if (setup->zs_read) {
+ if (args->zs_read.flags &
+ VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
+ rcl_u8(setup, VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER);
+ rcl_u32(setup,
+ vc4_full_res_offset(exec, setup->zs_read,
+ &args->zs_read, x, y) |
+ VC4_LOADSTORE_FULL_RES_DISABLE_COLOR);
+ } else {
+ if (setup->color_read) {
+ /* Exec previous load. */
+ vc4_tile_coordinates(setup, x, y);
+ vc4_store_before_load(setup);
+ }
+
+ rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
+ rcl_u16(setup, args->zs_read.bits);
+ rcl_u32(setup, setup->zs_read->paddr +
+ args->zs_read.offset);
+ }
+ }
+
+ /* Clipping depends on tile coordinates having been
+ * emitted, so we always need one here.
+ */
+ vc4_tile_coordinates(setup, x, y);
+
+ /* Wait for the binner before jumping to the first
+ * tile's lists.
+ */
+ if (first && has_bin)
+ rcl_u8(setup, VC4_PACKET_WAIT_ON_SEMAPHORE);
+
+ if (has_bin) {
+ rcl_u8(setup, VC4_PACKET_BRANCH_TO_SUB_LIST);
+ rcl_u32(setup, (exec->tile_bo->paddr +
+ exec->tile_alloc_offset +
+ (y * exec->bin_tiles_x + x) * 32));
+ }
+
+ if (setup->msaa_color_write) {
+ bool last_tile_write = (!setup->msaa_zs_write &&
+ !setup->zs_write &&
+ !setup->color_write);
+ uint32_t bits = VC4_LOADSTORE_FULL_RES_DISABLE_ZS;
+
+ if (!last_tile_write)
+ bits |= VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL;
+ else if (last)
+ bits |= VC4_LOADSTORE_FULL_RES_EOF;
+ rcl_u8(setup, VC4_PACKET_STORE_FULL_RES_TILE_BUFFER);
+ rcl_u32(setup,
+ vc4_full_res_offset(exec, setup->msaa_color_write,
+ &args->msaa_color_write, x, y) |
+ bits);
+ }
+
+ if (setup->msaa_zs_write) {
+ bool last_tile_write = (!setup->zs_write &&
+ !setup->color_write);
+ uint32_t bits = VC4_LOADSTORE_FULL_RES_DISABLE_COLOR;
+
+ if (setup->msaa_color_write)
+ vc4_tile_coordinates(setup, x, y);
+ if (!last_tile_write)
+ bits |= VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL;
+ else if (last)
+ bits |= VC4_LOADSTORE_FULL_RES_EOF;
+ rcl_u8(setup, VC4_PACKET_STORE_FULL_RES_TILE_BUFFER);
+ rcl_u32(setup,
+ vc4_full_res_offset(exec, setup->msaa_zs_write,
+ &args->msaa_zs_write, x, y) |
+ bits);
+ }
+
+ if (setup->zs_write) {
+ bool last_tile_write = !setup->color_write;
+
+ if (setup->msaa_color_write || setup->msaa_zs_write)
+ vc4_tile_coordinates(setup, x, y);
+
+ rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
+ rcl_u16(setup, args->zs_write.bits |
+ (last_tile_write ?
+ 0 : VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR));
+ rcl_u32(setup,
+ (setup->zs_write->paddr + args->zs_write.offset) |
+ ((last && last_tile_write) ?
+ VC4_LOADSTORE_TILE_BUFFER_EOF : 0));
+ }
+
+ if (setup->color_write) {
+ if (setup->msaa_color_write || setup->msaa_zs_write ||
+ setup->zs_write) {
+ vc4_tile_coordinates(setup, x, y);
+ }
+
+ if (last)
+ rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF);
+ else
+ rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER);
+ }
+}
+
+static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
+ struct vc4_rcl_setup *setup)
+{
+ struct drm_vc4_submit_cl *args = exec->args;
+ bool has_bin = args->bin_cl_size != 0;
+ uint8_t min_x_tile = args->min_x_tile;
+ uint8_t min_y_tile = args->min_y_tile;
+ uint8_t max_x_tile = args->max_x_tile;
+ uint8_t max_y_tile = args->max_y_tile;
+ uint8_t xtiles = max_x_tile - min_x_tile + 1;
+ uint8_t ytiles = max_y_tile - min_y_tile + 1;
+ uint8_t x, y;
+ uint32_t size, loop_body_size;
+
+ size = VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE;
+ loop_body_size = VC4_PACKET_TILE_COORDINATES_SIZE;
+
+ if (args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) {
+ size += VC4_PACKET_CLEAR_COLORS_SIZE +
+ VC4_PACKET_TILE_COORDINATES_SIZE +
+ VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
+ }
+
+ if (setup->color_read) {
+ if (args->color_read.flags &
+ VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
+ loop_body_size += VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE;
+ } else {
+ loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE;
+ }
+ }
+ if (setup->zs_read) {
+ if (args->zs_read.flags &
+ VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
+ loop_body_size += VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE;
+ } else {
+ if (setup->color_read &&
+ !(args->color_read.flags &
+ VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES)) {
+ loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE;
+ loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
+ }
+ loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE;
+ }
+ }
+
+ if (has_bin) {
+ size += VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE;
+ loop_body_size += VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE;
+ }
+
+ if (setup->msaa_color_write)
+ loop_body_size += VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE;
+ if (setup->msaa_zs_write)
+ loop_body_size += VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE;
+
+ if (setup->zs_write)
+ loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
+ if (setup->color_write)
+ loop_body_size += VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE;
+
+ /* We need a VC4_PACKET_TILE_COORDINATES in between each store. */
+ loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE *
+ ((setup->msaa_color_write != NULL) +
+ (setup->msaa_zs_write != NULL) +
+ (setup->color_write != NULL) +
+ (setup->zs_write != NULL) - 1);
+
+ size += xtiles * ytiles * loop_body_size;
+
+ setup->rcl = &vc4_bo_create(dev, size, true)->base;
+ if (!setup->rcl)
+ return -ENOMEM;
+ list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head,
+ &exec->unref_list);
+
+ rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
+ rcl_u32(setup,
+ (setup->color_write ? (setup->color_write->paddr +
+ args->color_write.offset) :
+ 0));
+ rcl_u16(setup, args->width);
+ rcl_u16(setup, args->height);
+ rcl_u16(setup, args->color_write.bits);
+
+ /* The tile buffer gets cleared when the previous tile is stored. If
+ * the clear values changed between frames, then the tile buffer has
+ * stale clear values in it, so we have to do a store in None mode (no
+ * writes) so that we trigger the tile buffer clear.
+ */
+ if (args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) {
+ rcl_u8(setup, VC4_PACKET_CLEAR_COLORS);
+ rcl_u32(setup, args->clear_color[0]);
+ rcl_u32(setup, args->clear_color[1]);
+ rcl_u32(setup, args->clear_z);
+ rcl_u8(setup, args->clear_s);
+
+ vc4_tile_coordinates(setup, 0, 0);
+
+ rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
+ rcl_u16(setup, VC4_LOADSTORE_TILE_BUFFER_NONE);
+ rcl_u32(setup, 0); /* no address, since we're in None mode */
+ }
+
+ for (y = min_y_tile; y <= max_y_tile; y++) {
+ for (x = min_x_tile; x <= max_x_tile; x++) {
+ bool first = (x == min_x_tile && y == min_y_tile);
+ bool last = (x == max_x_tile && y == max_y_tile);
+
+ emit_tile(exec, setup, x, y, first, last);
+ }
+ }
+
+ BUG_ON(setup->next_offset != size);
+ exec->ct1ca = setup->rcl->paddr;
+ exec->ct1ea = setup->rcl->paddr + setup->next_offset;
+
+ return 0;
+}
+
+static int vc4_full_res_bounds_check(struct vc4_exec_info *exec,
+ struct drm_gem_cma_object *obj,
+ struct drm_vc4_submit_rcl_surface *surf)
+{
+ struct drm_vc4_submit_cl *args = exec->args;
+ u32 render_tiles_stride = DIV_ROUND_UP(exec->args->width, 32);
+
+ if (surf->offset > obj->base.size) {
+ DRM_ERROR("surface offset %d > BO size %zd\n",
+ surf->offset, obj->base.size);
+ return -EINVAL;
+ }
+
+ if ((obj->base.size - surf->offset) / VC4_TILE_BUFFER_SIZE <
+ render_tiles_stride * args->max_y_tile + args->max_x_tile) {
+ DRM_ERROR("MSAA tile %d, %d out of bounds "
+ "(bo size %zd, offset %d).\n",
+ args->max_x_tile, args->max_y_tile,
+ obj->base.size,
+ surf->offset);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
+ struct drm_gem_cma_object **obj,
+ struct drm_vc4_submit_rcl_surface *surf)
+{
+ if (surf->flags != 0 || surf->bits != 0) {
+ DRM_ERROR("MSAA surface had nonzero flags/bits\n");
+ return -EINVAL;
+ }
+
+ if (surf->hindex == ~0)
+ return 0;
+
+ *obj = vc4_use_bo(exec, surf->hindex);
+ if (!*obj)
+ return -EINVAL;
+
+ if (surf->offset & 0xf) {
+ DRM_ERROR("MSAA write must be 16b aligned.\n");
+ return -EINVAL;
+ }
+
+ return vc4_full_res_bounds_check(exec, *obj, surf);
+}
+
+static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
+ struct drm_gem_cma_object **obj,
+ struct drm_vc4_submit_rcl_surface *surf)
+{
+ uint8_t tiling = VC4_GET_FIELD(surf->bits,
+ VC4_LOADSTORE_TILE_BUFFER_TILING);
+ uint8_t buffer = VC4_GET_FIELD(surf->bits,
+ VC4_LOADSTORE_TILE_BUFFER_BUFFER);
+ uint8_t format = VC4_GET_FIELD(surf->bits,
+ VC4_LOADSTORE_TILE_BUFFER_FORMAT);
+ int cpp;
+ int ret;
+
+ if (surf->flags & ~VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
+ DRM_ERROR("Extra flags set\n");
+ return -EINVAL;
+ }
+
+ if (surf->hindex == ~0)
+ return 0;
+
+ *obj = vc4_use_bo(exec, surf->hindex);
+ if (!*obj)
+ return -EINVAL;
+
+ if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
+ if (surf == &exec->args->zs_write) {
+ DRM_ERROR("general zs write may not be a full-res.\n");
+ return -EINVAL;
+ }
+
+ if (surf->bits != 0) {
+ DRM_ERROR("load/store general bits set with "
+ "full res load/store.\n");
+ return -EINVAL;
+ }
+
+ ret = vc4_full_res_bounds_check(exec, *obj, surf);
+ if (!ret)
+ return ret;
+
+ return 0;
+ }
+
+ if (surf->bits & ~(VC4_LOADSTORE_TILE_BUFFER_TILING_MASK |
+ VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK |
+ VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK)) {
+ DRM_ERROR("Unknown bits in load/store: 0x%04x\n",
+ surf->bits);
+ return -EINVAL;
+ }
+
+ if (tiling > VC4_TILING_FORMAT_LT) {
+ DRM_ERROR("Bad tiling format\n");
+ return -EINVAL;
+ }
+
+ if (buffer == VC4_LOADSTORE_TILE_BUFFER_ZS) {
+ if (format != 0) {
+ DRM_ERROR("No color format should be set for ZS\n");
+ return -EINVAL;
+ }
+ cpp = 4;
+ } else if (buffer == VC4_LOADSTORE_TILE_BUFFER_COLOR) {
+ switch (format) {
+ case VC4_LOADSTORE_TILE_BUFFER_BGR565:
+ case VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER:
+ cpp = 2;
+ break;
+ case VC4_LOADSTORE_TILE_BUFFER_RGBA8888:
+ cpp = 4;
+ break;
+ default:
+ DRM_ERROR("Bad tile buffer format\n");
+ return -EINVAL;
+ }
+ } else {
+ DRM_ERROR("Bad load/store buffer %d.\n", buffer);
+ return -EINVAL;
+ }
+
+ if (surf->offset & 0xf) {
+ DRM_ERROR("load/store buffer must be 16b aligned.\n");
+ return -EINVAL;
+ }
+
+ if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling,
+ exec->args->width, exec->args->height, cpp)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
+ struct vc4_rcl_setup *setup,
+ struct drm_gem_cma_object **obj,
+ struct drm_vc4_submit_rcl_surface *surf)
+{
+ uint8_t tiling = VC4_GET_FIELD(surf->bits,
+ VC4_RENDER_CONFIG_MEMORY_FORMAT);
+ uint8_t format = VC4_GET_FIELD(surf->bits,
+ VC4_RENDER_CONFIG_FORMAT);
+ int cpp;
+
+ if (surf->flags != 0) {
+ DRM_ERROR("No flags supported on render config.\n");
+ return -EINVAL;
+ }
+
+ if (surf->bits & ~(VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK |
+ VC4_RENDER_CONFIG_FORMAT_MASK |
+ VC4_RENDER_CONFIG_MS_MODE_4X |
+ VC4_RENDER_CONFIG_DECIMATE_MODE_4X)) {
+ DRM_ERROR("Unknown bits in render config: 0x%04x\n",
+ surf->bits);
+ return -EINVAL;
+ }
+
+ if (surf->hindex == ~0)
+ return 0;
+
+ *obj = vc4_use_bo(exec, surf->hindex);
+ if (!*obj)
+ return -EINVAL;
+
+ if (tiling > VC4_TILING_FORMAT_LT) {
+ DRM_ERROR("Bad tiling format\n");
+ return -EINVAL;
+ }
+
+ switch (format) {
+ case VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED:
+ case VC4_RENDER_CONFIG_FORMAT_BGR565:
+ cpp = 2;
+ break;
+ case VC4_RENDER_CONFIG_FORMAT_RGBA8888:
+ cpp = 4;
+ break;
+ default:
+ DRM_ERROR("Bad tile buffer format\n");
+ return -EINVAL;
+ }
+
+ if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling,
+ exec->args->width, exec->args->height, cpp)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
+{
+ struct vc4_rcl_setup setup = {0};
+ struct drm_vc4_submit_cl *args = exec->args;
+ bool has_bin = args->bin_cl_size != 0;
+ int ret;
+
+ if (args->min_x_tile > args->max_x_tile ||
+ args->min_y_tile > args->max_y_tile) {
+ DRM_ERROR("Bad render tile set (%d,%d)-(%d,%d)\n",
+ args->min_x_tile, args->min_y_tile,
+ args->max_x_tile, args->max_y_tile);
+ return -EINVAL;
+ }
+
+ if (has_bin &&
+ (args->max_x_tile > exec->bin_tiles_x ||
+ args->max_y_tile > exec->bin_tiles_y)) {
+ DRM_ERROR("Render tiles (%d,%d) outside of bin config "
+ "(%d,%d)\n",
+ args->max_x_tile, args->max_y_tile,
+ exec->bin_tiles_x, exec->bin_tiles_y);
+ return -EINVAL;
+ }
+
+ ret = vc4_rcl_render_config_surface_setup(exec, &setup,
+ &setup.color_write,
+ &args->color_write);
+ if (ret)
+ return ret;
+
+ ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read);
+ if (ret)
+ return ret;
+
+ ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read);
+ if (ret)
+ return ret;
+
+ ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write);
+ if (ret)
+ return ret;
+
+ ret = vc4_rcl_msaa_surface_setup(exec, &setup.msaa_color_write,
+ &args->msaa_color_write);
+ if (ret)
+ return ret;
+
+ ret = vc4_rcl_msaa_surface_setup(exec, &setup.msaa_zs_write,
+ &args->msaa_zs_write);
+ if (ret)
+ return ret;
+
+ /* We shouldn't even have the job submitted to us if there's no
+ * surface to write out.
+ */
+ if (!setup.color_write && !setup.zs_write &&
+ !setup.msaa_color_write && !setup.msaa_zs_write) {
+ DRM_ERROR("RCL requires color or Z/S write\n");
+ return -EINVAL;
+ }
+
+ return vc4_create_rcl_bo(dev, exec, &setup);
+}
diff --git a/drivers/gpu/drm/vc4/vc4_trace.h b/drivers/gpu/drm/vc4/vc4_trace.h
new file mode 100644
index 000000000000..ad7b1ea720c2
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_trace.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(_VC4_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _VC4_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vc4
+#define TRACE_INCLUDE_FILE vc4_trace
+
+TRACE_EVENT(vc4_wait_for_seqno_begin,
+ TP_PROTO(struct drm_device *dev, uint64_t seqno, uint64_t timeout),
+ TP_ARGS(dev, seqno, timeout),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u64, seqno)
+ __field(u64, timeout)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ __entry->timeout = timeout;
+ ),
+
+ TP_printk("dev=%u, seqno=%llu, timeout=%llu",
+ __entry->dev, __entry->seqno, __entry->timeout)
+);
+
+TRACE_EVENT(vc4_wait_for_seqno_end,
+ TP_PROTO(struct drm_device *dev, uint64_t seqno),
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%llu",
+ __entry->dev, __entry->seqno)
+);
+
+#endif /* _VC4_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/vc4/vc4_trace_points.c b/drivers/gpu/drm/vc4/vc4_trace_points.c
new file mode 100644
index 000000000000..e6278f25716b
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_trace_points.c
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2015 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "vc4_drv.h"
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "vc4_trace.h"
+#endif
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
new file mode 100644
index 000000000000..424d515ffcda
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -0,0 +1,262 @@
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "linux/component.h"
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+#ifdef CONFIG_DEBUG_FS
+#define REGDEF(reg) { reg, #reg }
+static const struct {
+ uint32_t reg;
+ const char *name;
+} vc4_reg_defs[] = {
+ REGDEF(V3D_IDENT0),
+ REGDEF(V3D_IDENT1),
+ REGDEF(V3D_IDENT2),
+ REGDEF(V3D_SCRATCH),
+ REGDEF(V3D_L2CACTL),
+ REGDEF(V3D_SLCACTL),
+ REGDEF(V3D_INTCTL),
+ REGDEF(V3D_INTENA),
+ REGDEF(V3D_INTDIS),
+ REGDEF(V3D_CT0CS),
+ REGDEF(V3D_CT1CS),
+ REGDEF(V3D_CT0EA),
+ REGDEF(V3D_CT1EA),
+ REGDEF(V3D_CT0CA),
+ REGDEF(V3D_CT1CA),
+ REGDEF(V3D_CT00RA0),
+ REGDEF(V3D_CT01RA0),
+ REGDEF(V3D_CT0LC),
+ REGDEF(V3D_CT1LC),
+ REGDEF(V3D_CT0PC),
+ REGDEF(V3D_CT1PC),
+ REGDEF(V3D_PCS),
+ REGDEF(V3D_BFC),
+ REGDEF(V3D_RFC),
+ REGDEF(V3D_BPCA),
+ REGDEF(V3D_BPCS),
+ REGDEF(V3D_BPOA),
+ REGDEF(V3D_BPOS),
+ REGDEF(V3D_BXCF),
+ REGDEF(V3D_SQRSV0),
+ REGDEF(V3D_SQRSV1),
+ REGDEF(V3D_SQCNTL),
+ REGDEF(V3D_SRQPC),
+ REGDEF(V3D_SRQUA),
+ REGDEF(V3D_SRQUL),
+ REGDEF(V3D_SRQCS),
+ REGDEF(V3D_VPACNTL),
+ REGDEF(V3D_VPMBASE),
+ REGDEF(V3D_PCTRC),
+ REGDEF(V3D_PCTRE),
+ REGDEF(V3D_PCTR0),
+ REGDEF(V3D_PCTRS0),
+ REGDEF(V3D_PCTR1),
+ REGDEF(V3D_PCTRS1),
+ REGDEF(V3D_PCTR2),
+ REGDEF(V3D_PCTRS2),
+ REGDEF(V3D_PCTR3),
+ REGDEF(V3D_PCTRS3),
+ REGDEF(V3D_PCTR4),
+ REGDEF(V3D_PCTRS4),
+ REGDEF(V3D_PCTR5),
+ REGDEF(V3D_PCTRS5),
+ REGDEF(V3D_PCTR6),
+ REGDEF(V3D_PCTRS6),
+ REGDEF(V3D_PCTR7),
+ REGDEF(V3D_PCTRS7),
+ REGDEF(V3D_PCTR8),
+ REGDEF(V3D_PCTRS8),
+ REGDEF(V3D_PCTR9),
+ REGDEF(V3D_PCTRS9),
+ REGDEF(V3D_PCTR10),
+ REGDEF(V3D_PCTRS10),
+ REGDEF(V3D_PCTR11),
+ REGDEF(V3D_PCTRS11),
+ REGDEF(V3D_PCTR12),
+ REGDEF(V3D_PCTRS12),
+ REGDEF(V3D_PCTR13),
+ REGDEF(V3D_PCTRS13),
+ REGDEF(V3D_PCTR14),
+ REGDEF(V3D_PCTRS14),
+ REGDEF(V3D_PCTR15),
+ REGDEF(V3D_PCTRS15),
+ REGDEF(V3D_DBGE),
+ REGDEF(V3D_FDBGO),
+ REGDEF(V3D_FDBGB),
+ REGDEF(V3D_FDBGR),
+ REGDEF(V3D_FDBGS),
+ REGDEF(V3D_ERRSTAT),
+};
+
+int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vc4_reg_defs); i++) {
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
+ vc4_reg_defs[i].name, vc4_reg_defs[i].reg,
+ V3D_READ(vc4_reg_defs[i].reg));
+ }
+
+ return 0;
+}
+
+int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ uint32_t ident1 = V3D_READ(V3D_IDENT1);
+ uint32_t nslc = VC4_GET_FIELD(ident1, V3D_IDENT1_NSLC);
+ uint32_t tups = VC4_GET_FIELD(ident1, V3D_IDENT1_TUPS);
+ uint32_t qups = VC4_GET_FIELD(ident1, V3D_IDENT1_QUPS);
+
+ seq_printf(m, "Revision: %d\n",
+ VC4_GET_FIELD(ident1, V3D_IDENT1_REV));
+ seq_printf(m, "Slices: %d\n", nslc);
+ seq_printf(m, "TMUs: %d\n", nslc * tups);
+ seq_printf(m, "QPUs: %d\n", nslc * qups);
+ seq_printf(m, "Semaphores: %d\n",
+ VC4_GET_FIELD(ident1, V3D_IDENT1_NSEM));
+
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * Asks the firmware to turn on power to the V3D engine.
+ *
+ * This may be doable with just the clocks interface, though this
+ * packet does some other register setup from the firmware, too.
+ */
+int
+vc4_v3d_set_power(struct vc4_dev *vc4, bool on)
+{
+ if (on)
+ return pm_generic_poweroff(&vc4->v3d->pdev->dev);
+ else
+ return pm_generic_resume(&vc4->v3d->pdev->dev);
+}
+
+static void vc4_v3d_init_hw(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ /* Take all the memory that would have been reserved for user
+ * QPU programs, since we don't have an interface for running
+ * them, anyway.
+ */
+ V3D_WRITE(V3D_VPMBASE, 0);
+}
+
+static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_v3d *v3d = NULL;
+ int ret;
+
+ v3d = devm_kzalloc(&pdev->dev, sizeof(*v3d), GFP_KERNEL);
+ if (!v3d)
+ return -ENOMEM;
+
+ v3d->pdev = pdev;
+
+ v3d->regs = vc4_ioremap_regs(pdev, 0);
+ if (IS_ERR(v3d->regs))
+ return PTR_ERR(v3d->regs);
+
+ vc4->v3d = v3d;
+
+ if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
+ DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
+ V3D_READ(V3D_IDENT0), V3D_EXPECTED_IDENT0);
+ return -EINVAL;
+ }
+
+ /* Reset the binner overflow address/size at setup, to be sure
+ * we don't reuse an old one.
+ */
+ V3D_WRITE(V3D_BPOA, 0);
+ V3D_WRITE(V3D_BPOS, 0);
+
+ vc4_v3d_init_hw(drm);
+
+ ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
+ if (ret) {
+ DRM_ERROR("Failed to install IRQ handler\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vc4_v3d_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+
+ drm_irq_uninstall(drm);
+
+ /* Disable the binner's overflow memory address, so the next
+ * driver probe (if any) doesn't try to reuse our old
+ * allocation.
+ */
+ V3D_WRITE(V3D_BPOA, 0);
+ V3D_WRITE(V3D_BPOS, 0);
+
+ vc4->v3d = NULL;
+}
+
+static const struct component_ops vc4_v3d_ops = {
+ .bind = vc4_v3d_bind,
+ .unbind = vc4_v3d_unbind,
+};
+
+static int vc4_v3d_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vc4_v3d_ops);
+}
+
+static int vc4_v3d_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vc4_v3d_ops);
+ return 0;
+}
+
+static const struct of_device_id vc4_v3d_dt_match[] = {
+ { .compatible = "brcm,vc4-v3d" },
+ {}
+};
+
+struct platform_driver vc4_v3d_driver = {
+ .probe = vc4_v3d_dev_probe,
+ .remove = vc4_v3d_dev_remove,
+ .driver = {
+ .name = "vc4_v3d",
+ .of_match_table = vc4_v3d_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
new file mode 100644
index 000000000000..0fb5b994b9dd
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -0,0 +1,900 @@
+/*
+ * Copyright © 2014 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/**
+ * Command list validator for VC4.
+ *
+ * The VC4 has no IOMMU between it and system memory. So, a user with
+ * access to execute command lists could escalate privilege by
+ * overwriting system memory (drawing to it as a framebuffer) or
+ * reading system memory it shouldn't (reading it as a texture, or
+ * uniform data, or vertex data).
+ *
+ * This validates command lists to ensure that all accesses are within
+ * the bounds of the GEM objects referenced. It explicitly whitelists
+ * packets, and looks at the offsets in any address fields to make
+ * sure they're constrained within the BOs they reference.
+ *
+ * Note that because of the validation that's happening anyway, this
+ * is where GEM relocation processing happens.
+ */
+
+#include "uapi/drm/vc4_drm.h"
+#include "vc4_drv.h"
+#include "vc4_packet.h"
+
+#define VALIDATE_ARGS \
+ struct vc4_exec_info *exec, \
+ void *validated, \
+ void *untrusted
+
+/** Return the width in pixels of a 64-byte microtile. */
+static uint32_t
+utile_width(int cpp)
+{
+ switch (cpp) {
+ case 1:
+ case 2:
+ return 8;
+ case 4:
+ return 4;
+ case 8:
+ return 2;
+ default:
+ DRM_ERROR("unknown cpp: %d\n", cpp);
+ return 1;
+ }
+}
+
+/** Return the height in pixels of a 64-byte microtile. */
+static uint32_t
+utile_height(int cpp)
+{
+ switch (cpp) {
+ case 1:
+ return 8;
+ case 2:
+ case 4:
+ case 8:
+ return 4;
+ default:
+ DRM_ERROR("unknown cpp: %d\n", cpp);
+ return 1;
+ }
+}
+
+/**
+ * The texture unit decides what tiling format a particular miplevel is using
+ * this function, so we lay out our miptrees accordingly.
+ */
+static bool
+size_is_lt(uint32_t width, uint32_t height, int cpp)
+{
+ return (width <= 4 * utile_width(cpp) ||
+ height <= 4 * utile_height(cpp));
+}
+
+struct drm_gem_cma_object *
+vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
+{
+ struct drm_gem_cma_object *obj;
+ struct vc4_bo *bo;
+
+ if (hindex >= exec->bo_count) {
+ DRM_ERROR("BO index %d greater than BO count %d\n",
+ hindex, exec->bo_count);
+ return NULL;
+ }
+ obj = exec->bo[hindex];
+ bo = to_vc4_bo(&obj->base);
+
+ if (bo->validated_shader) {
+ DRM_ERROR("Trying to use shader BO as something other than "
+ "a shader\n");
+ return NULL;
+ }
+
+ return obj;
+}
+
+static struct drm_gem_cma_object *
+vc4_use_handle(struct vc4_exec_info *exec, uint32_t gem_handles_packet_index)
+{
+ return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index]);
+}
+
+static bool
+validate_bin_pos(struct vc4_exec_info *exec, void *untrusted, uint32_t pos)
+{
+ /* Note that the untrusted pointer passed to these functions is
+ * incremented past the packet byte.
+ */
+ return (untrusted - 1 == exec->bin_u + pos);
+}
+
+static uint32_t
+gl_shader_rec_size(uint32_t pointer_bits)
+{
+ uint32_t attribute_count = pointer_bits & 7;
+ bool extended = pointer_bits & 8;
+
+ if (attribute_count == 0)
+ attribute_count = 8;
+
+ if (extended)
+ return 100 + attribute_count * 4;
+ else
+ return 36 + attribute_count * 8;
+}
+
+bool
+vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
+ uint32_t offset, uint8_t tiling_format,
+ uint32_t width, uint32_t height, uint8_t cpp)
+{
+ uint32_t aligned_width, aligned_height, stride, size;
+ uint32_t utile_w = utile_width(cpp);
+ uint32_t utile_h = utile_height(cpp);
+
+ /* The shaded vertex format stores signed 12.4 fixed point
+ * (-2048,2047) offsets from the viewport center, so we should
+ * never have a render target larger than 4096. The texture
+ * unit can only sample from 2048x2048, so it's even more
+ * restricted. This lets us avoid worrying about overflow in
+ * our math.
+ */
+ if (width > 4096 || height > 4096) {
+ DRM_ERROR("Surface dimesions (%d,%d) too large", width, height);
+ return false;
+ }
+
+ switch (tiling_format) {
+ case VC4_TILING_FORMAT_LINEAR:
+ aligned_width = round_up(width, utile_w);
+ aligned_height = height;
+ break;
+ case VC4_TILING_FORMAT_T:
+ aligned_width = round_up(width, utile_w * 8);
+ aligned_height = round_up(height, utile_h * 8);
+ break;
+ case VC4_TILING_FORMAT_LT:
+ aligned_width = round_up(width, utile_w);
+ aligned_height = round_up(height, utile_h);
+ break;
+ default:
+ DRM_ERROR("buffer tiling %d unsupported\n", tiling_format);
+ return false;
+ }
+
+ stride = aligned_width * cpp;
+ size = stride * aligned_height;
+
+ if (size + offset < size ||
+ size + offset > fbo->base.size) {
+ DRM_ERROR("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n",
+ width, height,
+ aligned_width, aligned_height,
+ size, offset, fbo->base.size);
+ return false;
+ }
+
+ return true;
+}
+
+static int
+validate_flush(VALIDATE_ARGS)
+{
+ if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 1)) {
+ DRM_ERROR("Bin CL must end with VC4_PACKET_FLUSH\n");
+ return -EINVAL;
+ }
+ exec->found_flush = true;
+
+ return 0;
+}
+
+static int
+validate_start_tile_binning(VALIDATE_ARGS)
+{
+ if (exec->found_start_tile_binning_packet) {
+ DRM_ERROR("Duplicate VC4_PACKET_START_TILE_BINNING\n");
+ return -EINVAL;
+ }
+ exec->found_start_tile_binning_packet = true;
+
+ if (!exec->found_tile_binning_mode_config_packet) {
+ DRM_ERROR("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+validate_increment_semaphore(VALIDATE_ARGS)
+{
+ if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 2)) {
+ DRM_ERROR("Bin CL must end with "
+ "VC4_PACKET_INCREMENT_SEMAPHORE\n");
+ return -EINVAL;
+ }
+ exec->found_increment_semaphore_packet = true;
+
+ return 0;
+}
+
+static int
+validate_indexed_prim_list(VALIDATE_ARGS)
+{
+ struct drm_gem_cma_object *ib;
+ uint32_t length = *(uint32_t *)(untrusted + 1);
+ uint32_t offset = *(uint32_t *)(untrusted + 5);
+ uint32_t max_index = *(uint32_t *)(untrusted + 9);
+ uint32_t index_size = (*(uint8_t *)(untrusted + 0) >> 4) ? 2 : 1;
+ struct vc4_shader_state *shader_state;
+
+ /* Check overflow condition */
+ if (exec->shader_state_count == 0) {
+ DRM_ERROR("shader state must precede primitives\n");
+ return -EINVAL;
+ }
+ shader_state = &exec->shader_state[exec->shader_state_count - 1];
+
+ if (max_index > shader_state->max_index)
+ shader_state->max_index = max_index;
+
+ ib = vc4_use_handle(exec, 0);
+ if (!ib)
+ return -EINVAL;
+
+ if (offset > ib->base.size ||
+ (ib->base.size - offset) / index_size < length) {
+ DRM_ERROR("IB access overflow (%d + %d*%d > %zd)\n",
+ offset, length, index_size, ib->base.size);
+ return -EINVAL;
+ }
+
+ *(uint32_t *)(validated + 5) = ib->paddr + offset;
+
+ return 0;
+}
+
+static int
+validate_gl_array_primitive(VALIDATE_ARGS)
+{
+ uint32_t length = *(uint32_t *)(untrusted + 1);
+ uint32_t base_index = *(uint32_t *)(untrusted + 5);
+ uint32_t max_index;
+ struct vc4_shader_state *shader_state;
+
+ /* Check overflow condition */
+ if (exec->shader_state_count == 0) {
+ DRM_ERROR("shader state must precede primitives\n");
+ return -EINVAL;
+ }
+ shader_state = &exec->shader_state[exec->shader_state_count - 1];
+
+ if (length + base_index < length) {
+ DRM_ERROR("primitive vertex count overflow\n");
+ return -EINVAL;
+ }
+ max_index = length + base_index - 1;
+
+ if (max_index > shader_state->max_index)
+ shader_state->max_index = max_index;
+
+ return 0;
+}
+
+static int
+validate_gl_shader_state(VALIDATE_ARGS)
+{
+ uint32_t i = exec->shader_state_count++;
+
+ if (i >= exec->shader_state_size) {
+ DRM_ERROR("More requests for shader states than declared\n");
+ return -EINVAL;
+ }
+
+ exec->shader_state[i].addr = *(uint32_t *)untrusted;
+ exec->shader_state[i].max_index = 0;
+
+ if (exec->shader_state[i].addr & ~0xf) {
+ DRM_ERROR("high bits set in GL shader rec reference\n");
+ return -EINVAL;
+ }
+
+ *(uint32_t *)validated = (exec->shader_rec_p +
+ exec->shader_state[i].addr);
+
+ exec->shader_rec_p +=
+ roundup(gl_shader_rec_size(exec->shader_state[i].addr), 16);
+
+ return 0;
+}
+
+static int
+validate_tile_binning_config(VALIDATE_ARGS)
+{
+ struct drm_device *dev = exec->exec_bo->base.dev;
+ struct vc4_bo *tile_bo;
+ uint8_t flags;
+ uint32_t tile_state_size, tile_alloc_size;
+ uint32_t tile_count;
+
+ if (exec->found_tile_binning_mode_config_packet) {
+ DRM_ERROR("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
+ return -EINVAL;
+ }
+ exec->found_tile_binning_mode_config_packet = true;
+
+ exec->bin_tiles_x = *(uint8_t *)(untrusted + 12);
+ exec->bin_tiles_y = *(uint8_t *)(untrusted + 13);
+ tile_count = exec->bin_tiles_x * exec->bin_tiles_y;
+ flags = *(uint8_t *)(untrusted + 14);
+
+ if (exec->bin_tiles_x == 0 ||
+ exec->bin_tiles_y == 0) {
+ DRM_ERROR("Tile binning config of %dx%d too small\n",
+ exec->bin_tiles_x, exec->bin_tiles_y);
+ return -EINVAL;
+ }
+
+ if (flags & (VC4_BIN_CONFIG_DB_NON_MS |
+ VC4_BIN_CONFIG_TILE_BUFFER_64BIT)) {
+ DRM_ERROR("unsupported binning config flags 0x%02x\n", flags);
+ return -EINVAL;
+ }
+
+ /* The tile state data array is 48 bytes per tile, and we put it at
+ * the start of a BO containing both it and the tile alloc.
+ */
+ tile_state_size = 48 * tile_count;
+
+ /* Since the tile alloc array will follow us, align. */
+ exec->tile_alloc_offset = roundup(tile_state_size, 4096);
+
+ *(uint8_t *)(validated + 14) =
+ ((flags & ~(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK |
+ VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK)) |
+ VC4_BIN_CONFIG_AUTO_INIT_TSDA |
+ VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32,
+ VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE) |
+ VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128,
+ VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE));
+
+ /* Initial block size. */
+ tile_alloc_size = 32 * tile_count;
+
+ /*
+ * The initial allocation gets rounded to the next 256 bytes before
+ * the hardware starts fulfilling further allocations.
+ */
+ tile_alloc_size = roundup(tile_alloc_size, 256);
+
+ /* Add space for the extra allocations. This is what gets used first,
+ * before overflow memory. It must have at least 4096 bytes, but we
+ * want to avoid overflow memory usage if possible.
+ */
+ tile_alloc_size += 1024 * 1024;
+
+ tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size,
+ true);
+ exec->tile_bo = &tile_bo->base;
+ if (!exec->tile_bo)
+ return -ENOMEM;
+ list_add_tail(&tile_bo->unref_head, &exec->unref_list);
+
+ /* tile alloc address. */
+ *(uint32_t *)(validated + 0) = (exec->tile_bo->paddr +
+ exec->tile_alloc_offset);
+ /* tile alloc size. */
+ *(uint32_t *)(validated + 4) = tile_alloc_size;
+ /* tile state address. */
+ *(uint32_t *)(validated + 8) = exec->tile_bo->paddr;
+
+ return 0;
+}
+
+static int
+validate_gem_handles(VALIDATE_ARGS)
+{
+ memcpy(exec->bo_index, untrusted, sizeof(exec->bo_index));
+ return 0;
+}
+
+#define VC4_DEFINE_PACKET(packet, func) \
+ [packet] = { packet ## _SIZE, #packet, func }
+
+static const struct cmd_info {
+ uint16_t len;
+ const char *name;
+ int (*func)(struct vc4_exec_info *exec, void *validated,
+ void *untrusted);
+} cmd_info[] = {
+ VC4_DEFINE_PACKET(VC4_PACKET_HALT, NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_NOP, NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_FLUSH, validate_flush),
+ VC4_DEFINE_PACKET(VC4_PACKET_FLUSH_ALL, NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_START_TILE_BINNING,
+ validate_start_tile_binning),
+ VC4_DEFINE_PACKET(VC4_PACKET_INCREMENT_SEMAPHORE,
+ validate_increment_semaphore),
+
+ VC4_DEFINE_PACKET(VC4_PACKET_GL_INDEXED_PRIMITIVE,
+ validate_indexed_prim_list),
+ VC4_DEFINE_PACKET(VC4_PACKET_GL_ARRAY_PRIMITIVE,
+ validate_gl_array_primitive),
+
+ VC4_DEFINE_PACKET(VC4_PACKET_PRIMITIVE_LIST_FORMAT, NULL),
+
+ VC4_DEFINE_PACKET(VC4_PACKET_GL_SHADER_STATE, validate_gl_shader_state),
+
+ VC4_DEFINE_PACKET(VC4_PACKET_CONFIGURATION_BITS, NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_FLAT_SHADE_FLAGS, NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_POINT_SIZE, NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_LINE_WIDTH, NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_RHT_X_BOUNDARY, NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_DEPTH_OFFSET, NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_CLIP_WINDOW, NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_VIEWPORT_OFFSET, NULL),
+ VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_XY_SCALING, NULL),
+ /* Note: The docs say this was also 105, but it was 106 in the
+ * initial userland code drop.
+ */
+ VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_Z_SCALING, NULL),
+
+ VC4_DEFINE_PACKET(VC4_PACKET_TILE_BINNING_MODE_CONFIG,
+ validate_tile_binning_config),
+
+ VC4_DEFINE_PACKET(VC4_PACKET_GEM_HANDLES, validate_gem_handles),
+};
+
+int
+vc4_validate_bin_cl(struct drm_device *dev,
+ void *validated,
+ void *unvalidated,
+ struct vc4_exec_info *exec)
+{
+ uint32_t len = exec->args->bin_cl_size;
+ uint32_t dst_offset = 0;
+ uint32_t src_offset = 0;
+
+ while (src_offset < len) {
+ void *dst_pkt = validated + dst_offset;
+ void *src_pkt = unvalidated + src_offset;
+ u8 cmd = *(uint8_t *)src_pkt;
+ const struct cmd_info *info;
+
+ if (cmd >= ARRAY_SIZE(cmd_info)) {
+ DRM_ERROR("0x%08x: packet %d out of bounds\n",
+ src_offset, cmd);
+ return -EINVAL;
+ }
+
+ info = &cmd_info[cmd];
+ if (!info->name) {
+ DRM_ERROR("0x%08x: packet %d invalid\n",
+ src_offset, cmd);
+ return -EINVAL;
+ }
+
+ if (src_offset + info->len > len) {
+ DRM_ERROR("0x%08x: packet %d (%s) length 0x%08x "
+ "exceeds bounds (0x%08x)\n",
+ src_offset, cmd, info->name, info->len,
+ src_offset + len);
+ return -EINVAL;
+ }
+
+ if (cmd != VC4_PACKET_GEM_HANDLES)
+ memcpy(dst_pkt, src_pkt, info->len);
+
+ if (info->func && info->func(exec,
+ dst_pkt + 1,
+ src_pkt + 1)) {
+ DRM_ERROR("0x%08x: packet %d (%s) failed to validate\n",
+ src_offset, cmd, info->name);
+ return -EINVAL;
+ }
+
+ src_offset += info->len;
+ /* GEM handle loading doesn't produce HW packets. */
+ if (cmd != VC4_PACKET_GEM_HANDLES)
+ dst_offset += info->len;
+
+ /* When the CL hits halt, it'll stop reading anything else. */
+ if (cmd == VC4_PACKET_HALT)
+ break;
+ }
+
+ exec->ct0ea = exec->ct0ca + dst_offset;
+
+ if (!exec->found_start_tile_binning_packet) {
+ DRM_ERROR("Bin CL missing VC4_PACKET_START_TILE_BINNING\n");
+ return -EINVAL;
+ }
+
+ /* The bin CL must be ended with INCREMENT_SEMAPHORE and FLUSH. The
+ * semaphore is used to trigger the render CL to start up, and the
+ * FLUSH is what caps the bin lists with
+ * VC4_PACKET_RETURN_FROM_SUB_LIST (so they jump back to the main
+ * render CL when they get called to) and actually triggers the queued
+ * semaphore increment.
+ */
+ if (!exec->found_increment_semaphore_packet || !exec->found_flush) {
+ DRM_ERROR("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + "
+ "VC4_PACKET_FLUSH\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool
+reloc_tex(struct vc4_exec_info *exec,
+ void *uniform_data_u,
+ struct vc4_texture_sample_info *sample,
+ uint32_t texture_handle_index)
+
+{
+ struct drm_gem_cma_object *tex;
+ uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]);
+ uint32_t p1 = *(uint32_t *)(uniform_data_u + sample->p_offset[1]);
+ uint32_t p2 = (sample->p_offset[2] != ~0 ?
+ *(uint32_t *)(uniform_data_u + sample->p_offset[2]) : 0);
+ uint32_t p3 = (sample->p_offset[3] != ~0 ?
+ *(uint32_t *)(uniform_data_u + sample->p_offset[3]) : 0);
+ uint32_t *validated_p0 = exec->uniforms_v + sample->p_offset[0];
+ uint32_t offset = p0 & VC4_TEX_P0_OFFSET_MASK;
+ uint32_t miplevels = VC4_GET_FIELD(p0, VC4_TEX_P0_MIPLVLS);
+ uint32_t width = VC4_GET_FIELD(p1, VC4_TEX_P1_WIDTH);
+ uint32_t height = VC4_GET_FIELD(p1, VC4_TEX_P1_HEIGHT);
+ uint32_t cpp, tiling_format, utile_w, utile_h;
+ uint32_t i;
+ uint32_t cube_map_stride = 0;
+ enum vc4_texture_data_type type;
+
+ tex = vc4_use_bo(exec, texture_handle_index);
+ if (!tex)
+ return false;
+
+ if (sample->is_direct) {
+ uint32_t remaining_size = tex->base.size - p0;
+
+ if (p0 > tex->base.size - 4) {
+ DRM_ERROR("UBO offset greater than UBO size\n");
+ goto fail;
+ }
+ if (p1 > remaining_size - 4) {
+ DRM_ERROR("UBO clamp would allow reads "
+ "outside of UBO\n");
+ goto fail;
+ }
+ *validated_p0 = tex->paddr + p0;
+ return true;
+ }
+
+ if (width == 0)
+ width = 2048;
+ if (height == 0)
+ height = 2048;
+
+ if (p0 & VC4_TEX_P0_CMMODE_MASK) {
+ if (VC4_GET_FIELD(p2, VC4_TEX_P2_PTYPE) ==
+ VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE)
+ cube_map_stride = p2 & VC4_TEX_P2_CMST_MASK;
+ if (VC4_GET_FIELD(p3, VC4_TEX_P2_PTYPE) ==
+ VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) {
+ if (cube_map_stride) {
+ DRM_ERROR("Cube map stride set twice\n");
+ goto fail;
+ }
+
+ cube_map_stride = p3 & VC4_TEX_P2_CMST_MASK;
+ }
+ if (!cube_map_stride) {
+ DRM_ERROR("Cube map stride not set\n");
+ goto fail;
+ }
+ }
+
+ type = (VC4_GET_FIELD(p0, VC4_TEX_P0_TYPE) |
+ (VC4_GET_FIELD(p1, VC4_TEX_P1_TYPE4) << 4));
+
+ switch (type) {
+ case VC4_TEXTURE_TYPE_RGBA8888:
+ case VC4_TEXTURE_TYPE_RGBX8888:
+ case VC4_TEXTURE_TYPE_RGBA32R:
+ cpp = 4;
+ break;
+ case VC4_TEXTURE_TYPE_RGBA4444:
+ case VC4_TEXTURE_TYPE_RGBA5551:
+ case VC4_TEXTURE_TYPE_RGB565:
+ case VC4_TEXTURE_TYPE_LUMALPHA:
+ case VC4_TEXTURE_TYPE_S16F:
+ case VC4_TEXTURE_TYPE_S16:
+ cpp = 2;
+ break;
+ case VC4_TEXTURE_TYPE_LUMINANCE:
+ case VC4_TEXTURE_TYPE_ALPHA:
+ case VC4_TEXTURE_TYPE_S8:
+ cpp = 1;
+ break;
+ case VC4_TEXTURE_TYPE_ETC1:
+ case VC4_TEXTURE_TYPE_BW1:
+ case VC4_TEXTURE_TYPE_A4:
+ case VC4_TEXTURE_TYPE_A1:
+ case VC4_TEXTURE_TYPE_RGBA64:
+ case VC4_TEXTURE_TYPE_YUV422R:
+ default:
+ DRM_ERROR("Texture format %d unsupported\n", type);
+ goto fail;
+ }
+ utile_w = utile_width(cpp);
+ utile_h = utile_height(cpp);
+
+ if (type == VC4_TEXTURE_TYPE_RGBA32R) {
+ tiling_format = VC4_TILING_FORMAT_LINEAR;
+ } else {
+ if (size_is_lt(width, height, cpp))
+ tiling_format = VC4_TILING_FORMAT_LT;
+ else
+ tiling_format = VC4_TILING_FORMAT_T;
+ }
+
+ if (!vc4_check_tex_size(exec, tex, offset + cube_map_stride * 5,
+ tiling_format, width, height, cpp)) {
+ goto fail;
+ }
+
+ /* The mipmap levels are stored before the base of the texture. Make
+ * sure there is actually space in the BO.
+ */
+ for (i = 1; i <= miplevels; i++) {
+ uint32_t level_width = max(width >> i, 1u);
+ uint32_t level_height = max(height >> i, 1u);
+ uint32_t aligned_width, aligned_height;
+ uint32_t level_size;
+
+ /* Once the levels get small enough, they drop from T to LT. */
+ if (tiling_format == VC4_TILING_FORMAT_T &&
+ size_is_lt(level_width, level_height, cpp)) {
+ tiling_format = VC4_TILING_FORMAT_LT;
+ }
+
+ switch (tiling_format) {
+ case VC4_TILING_FORMAT_T:
+ aligned_width = round_up(level_width, utile_w * 8);
+ aligned_height = round_up(level_height, utile_h * 8);
+ break;
+ case VC4_TILING_FORMAT_LT:
+ aligned_width = round_up(level_width, utile_w);
+ aligned_height = round_up(level_height, utile_h);
+ break;
+ default:
+ aligned_width = round_up(level_width, utile_w);
+ aligned_height = level_height;
+ break;
+ }
+
+ level_size = aligned_width * cpp * aligned_height;
+
+ if (offset < level_size) {
+ DRM_ERROR("Level %d (%dx%d -> %dx%d) size %db "
+ "overflowed buffer bounds (offset %d)\n",
+ i, level_width, level_height,
+ aligned_width, aligned_height,
+ level_size, offset);
+ goto fail;
+ }
+
+ offset -= level_size;
+ }
+
+ *validated_p0 = tex->paddr + p0;
+
+ return true;
+ fail:
+ DRM_INFO("Texture p0 at %d: 0x%08x\n", sample->p_offset[0], p0);
+ DRM_INFO("Texture p1 at %d: 0x%08x\n", sample->p_offset[1], p1);
+ DRM_INFO("Texture p2 at %d: 0x%08x\n", sample->p_offset[2], p2);
+ DRM_INFO("Texture p3 at %d: 0x%08x\n", sample->p_offset[3], p3);
+ return false;
+}
+
+static int
+validate_gl_shader_rec(struct drm_device *dev,
+ struct vc4_exec_info *exec,
+ struct vc4_shader_state *state)
+{
+ uint32_t *src_handles;
+ void *pkt_u, *pkt_v;
+ static const uint32_t shader_reloc_offsets[] = {
+ 4, /* fs */
+ 16, /* vs */
+ 28, /* cs */
+ };
+ uint32_t shader_reloc_count = ARRAY_SIZE(shader_reloc_offsets);
+ struct drm_gem_cma_object *bo[shader_reloc_count + 8];
+ uint32_t nr_attributes, nr_relocs, packet_size;
+ int i;
+
+ nr_attributes = state->addr & 0x7;
+ if (nr_attributes == 0)
+ nr_attributes = 8;
+ packet_size = gl_shader_rec_size(state->addr);
+
+ nr_relocs = ARRAY_SIZE(shader_reloc_offsets) + nr_attributes;
+ if (nr_relocs * 4 > exec->shader_rec_size) {
+ DRM_ERROR("overflowed shader recs reading %d handles "
+ "from %d bytes left\n",
+ nr_relocs, exec->shader_rec_size);
+ return -EINVAL;
+ }
+ src_handles = exec->shader_rec_u;
+ exec->shader_rec_u += nr_relocs * 4;
+ exec->shader_rec_size -= nr_relocs * 4;
+
+ if (packet_size > exec->shader_rec_size) {
+ DRM_ERROR("overflowed shader recs copying %db packet "
+ "from %d bytes left\n",
+ packet_size, exec->shader_rec_size);
+ return -EINVAL;
+ }
+ pkt_u = exec->shader_rec_u;
+ pkt_v = exec->shader_rec_v;
+ memcpy(pkt_v, pkt_u, packet_size);
+ exec->shader_rec_u += packet_size;
+ /* Shader recs have to be aligned to 16 bytes (due to the attribute
+ * flags being in the low bytes), so round the next validated shader
+ * rec address up. This should be safe, since we've got so many
+ * relocations in a shader rec packet.
+ */
+ BUG_ON(roundup(packet_size, 16) - packet_size > nr_relocs * 4);
+ exec->shader_rec_v += roundup(packet_size, 16);
+ exec->shader_rec_size -= packet_size;
+
+ if (!(*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD)) {
+ DRM_ERROR("Multi-threaded fragment shaders not supported.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < shader_reloc_count; i++) {
+ if (src_handles[i] > exec->bo_count) {
+ DRM_ERROR("Shader handle %d too big\n", src_handles[i]);
+ return -EINVAL;
+ }
+
+ bo[i] = exec->bo[src_handles[i]];
+ if (!bo[i])
+ return -EINVAL;
+ }
+ for (i = shader_reloc_count; i < nr_relocs; i++) {
+ bo[i] = vc4_use_bo(exec, src_handles[i]);
+ if (!bo[i])
+ return -EINVAL;
+ }
+
+ for (i = 0; i < shader_reloc_count; i++) {
+ struct vc4_validated_shader_info *validated_shader;
+ uint32_t o = shader_reloc_offsets[i];
+ uint32_t src_offset = *(uint32_t *)(pkt_u + o);
+ uint32_t *texture_handles_u;
+ void *uniform_data_u;
+ uint32_t tex;
+
+ *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
+
+ if (src_offset != 0) {
+ DRM_ERROR("Shaders must be at offset 0 of "
+ "the BO.\n");
+ return -EINVAL;
+ }
+
+ validated_shader = to_vc4_bo(&bo[i]->base)->validated_shader;
+ if (!validated_shader)
+ return -EINVAL;
+
+ if (validated_shader->uniforms_src_size >
+ exec->uniforms_size) {
+ DRM_ERROR("Uniforms src buffer overflow\n");
+ return -EINVAL;
+ }
+
+ texture_handles_u = exec->uniforms_u;
+ uniform_data_u = (texture_handles_u +
+ validated_shader->num_texture_samples);
+
+ memcpy(exec->uniforms_v, uniform_data_u,
+ validated_shader->uniforms_size);
+
+ for (tex = 0;
+ tex < validated_shader->num_texture_samples;
+ tex++) {
+ if (!reloc_tex(exec,
+ uniform_data_u,
+ &validated_shader->texture_samples[tex],
+ texture_handles_u[tex])) {
+ return -EINVAL;
+ }
+ }
+
+ *(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p;
+
+ exec->uniforms_u += validated_shader->uniforms_src_size;
+ exec->uniforms_v += validated_shader->uniforms_size;
+ exec->uniforms_p += validated_shader->uniforms_size;
+ }
+
+ for (i = 0; i < nr_attributes; i++) {
+ struct drm_gem_cma_object *vbo =
+ bo[ARRAY_SIZE(shader_reloc_offsets) + i];
+ uint32_t o = 36 + i * 8;
+ uint32_t offset = *(uint32_t *)(pkt_u + o + 0);
+ uint32_t attr_size = *(uint8_t *)(pkt_u + o + 4) + 1;
+ uint32_t stride = *(uint8_t *)(pkt_u + o + 5);
+ uint32_t max_index;
+
+ if (state->addr & 0x8)
+ stride |= (*(uint32_t *)(pkt_u + 100 + i * 4)) & ~0xff;
+
+ if (vbo->base.size < offset ||
+ vbo->base.size - offset < attr_size) {
+ DRM_ERROR("BO offset overflow (%d + %d > %d)\n",
+ offset, attr_size, vbo->base.size);
+ return -EINVAL;
+ }
+
+ if (stride != 0) {
+ max_index = ((vbo->base.size - offset - attr_size) /
+ stride);
+ if (state->max_index > max_index) {
+ DRM_ERROR("primitives use index %d out of "
+ "supplied %d\n",
+ state->max_index, max_index);
+ return -EINVAL;
+ }
+ }
+
+ *(uint32_t *)(pkt_v + o) = vbo->paddr + offset;
+ }
+
+ return 0;
+}
+
+int
+vc4_validate_shader_recs(struct drm_device *dev,
+ struct vc4_exec_info *exec)
+{
+ uint32_t i;
+ int ret = 0;
+
+ for (i = 0; i < exec->shader_state_count; i++) {
+ ret = validate_gl_shader_rec(dev, exec, &exec->shader_state[i]);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
new file mode 100644
index 000000000000..f67124b4c534
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -0,0 +1,513 @@
+/*
+ * Copyright © 2014 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: Shader validator for VC4.
+ *
+ * The VC4 has no IOMMU between it and system memory, so a user with
+ * access to execute shaders could escalate privilege by overwriting
+ * system memory (using the VPM write address register in the
+ * general-purpose DMA mode) or reading system memory it shouldn't
+ * (reading it as a texture, or uniform data, or vertex data).
+ *
+ * This walks over a shader BO, ensuring that its accesses are
+ * appropriately bounded, and recording how many texture accesses are
+ * made and where so that we can do relocations for them in the
+ * uniform stream.
+ */
+
+#include "vc4_drv.h"
+#include "vc4_qpu_defines.h"
+
+struct vc4_shader_validation_state {
+ struct vc4_texture_sample_info tmu_setup[2];
+ int tmu_write_count[2];
+
+ /* For registers that were last written to by a MIN instruction with
+ * one argument being a uniform, the address of the uniform.
+ * Otherwise, ~0.
+ *
+ * This is used for the validation of direct address memory reads.
+ */
+ uint32_t live_min_clamp_offsets[32 + 32 + 4];
+ bool live_max_clamp_regs[32 + 32 + 4];
+};
+
+static uint32_t
+waddr_to_live_reg_index(uint32_t waddr, bool is_b)
+{
+ if (waddr < 32) {
+ if (is_b)
+ return 32 + waddr;
+ else
+ return waddr;
+ } else if (waddr <= QPU_W_ACC3) {
+ return 64 + waddr - QPU_W_ACC0;
+ } else {
+ return ~0;
+ }
+}
+
+static uint32_t
+raddr_add_a_to_live_reg_index(uint64_t inst)
+{
+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+ uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
+ uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
+ uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
+
+ if (add_a == QPU_MUX_A)
+ return raddr_a;
+ else if (add_a == QPU_MUX_B && sig != QPU_SIG_SMALL_IMM)
+ return 32 + raddr_b;
+ else if (add_a <= QPU_MUX_R3)
+ return 64 + add_a;
+ else
+ return ~0;
+}
+
+static bool
+is_tmu_submit(uint32_t waddr)
+{
+ return (waddr == QPU_W_TMU0_S ||
+ waddr == QPU_W_TMU1_S);
+}
+
+static bool
+is_tmu_write(uint32_t waddr)
+{
+ return (waddr >= QPU_W_TMU0_S &&
+ waddr <= QPU_W_TMU1_B);
+}
+
+static bool
+record_texture_sample(struct vc4_validated_shader_info *validated_shader,
+ struct vc4_shader_validation_state *validation_state,
+ int tmu)
+{
+ uint32_t s = validated_shader->num_texture_samples;
+ int i;
+ struct vc4_texture_sample_info *temp_samples;
+
+ temp_samples = krealloc(validated_shader->texture_samples,
+ (s + 1) * sizeof(*temp_samples),
+ GFP_KERNEL);
+ if (!temp_samples)
+ return false;
+
+ memcpy(&temp_samples[s],
+ &validation_state->tmu_setup[tmu],
+ sizeof(*temp_samples));
+
+ validated_shader->num_texture_samples = s + 1;
+ validated_shader->texture_samples = temp_samples;
+
+ for (i = 0; i < 4; i++)
+ validation_state->tmu_setup[tmu].p_offset[i] = ~0;
+
+ return true;
+}
+
+static bool
+check_tmu_write(uint64_t inst,
+ struct vc4_validated_shader_info *validated_shader,
+ struct vc4_shader_validation_state *validation_state,
+ bool is_mul)
+{
+ uint32_t waddr = (is_mul ?
+ QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
+ QPU_GET_FIELD(inst, QPU_WADDR_ADD));
+ uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
+ uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
+ int tmu = waddr > QPU_W_TMU0_B;
+ bool submit = is_tmu_submit(waddr);
+ bool is_direct = submit && validation_state->tmu_write_count[tmu] == 0;
+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+
+ if (is_direct) {
+ uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
+ uint32_t clamp_reg, clamp_offset;
+
+ if (sig == QPU_SIG_SMALL_IMM) {
+ DRM_ERROR("direct TMU read used small immediate\n");
+ return false;
+ }
+
+ /* Make sure that this texture load is an add of the base
+ * address of the UBO to a clamped offset within the UBO.
+ */
+ if (is_mul ||
+ QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
+ DRM_ERROR("direct TMU load wasn't an add\n");
+ return false;
+ }
+
+ /* We assert that the the clamped address is the first
+ * argument, and the UBO base address is the second argument.
+ * This is arbitrary, but simpler than supporting flipping the
+ * two either way.
+ */
+ clamp_reg = raddr_add_a_to_live_reg_index(inst);
+ if (clamp_reg == ~0) {
+ DRM_ERROR("direct TMU load wasn't clamped\n");
+ return false;
+ }
+
+ clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg];
+ if (clamp_offset == ~0) {
+ DRM_ERROR("direct TMU load wasn't clamped\n");
+ return false;
+ }
+
+ /* Store the clamp value's offset in p1 (see reloc_tex() in
+ * vc4_validate.c).
+ */
+ validation_state->tmu_setup[tmu].p_offset[1] =
+ clamp_offset;
+
+ if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
+ !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
+ DRM_ERROR("direct TMU load didn't add to a uniform\n");
+ return false;
+ }
+
+ validation_state->tmu_setup[tmu].is_direct = true;
+ } else {
+ if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM &&
+ raddr_b == QPU_R_UNIF)) {
+ DRM_ERROR("uniform read in the same instruction as "
+ "texture setup.\n");
+ return false;
+ }
+ }
+
+ if (validation_state->tmu_write_count[tmu] >= 4) {
+ DRM_ERROR("TMU%d got too many parameters before dispatch\n",
+ tmu);
+ return false;
+ }
+ validation_state->tmu_setup[tmu].p_offset[validation_state->tmu_write_count[tmu]] =
+ validated_shader->uniforms_size;
+ validation_state->tmu_write_count[tmu]++;
+ /* Since direct uses a RADDR uniform reference, it will get counted in
+ * check_instruction_reads()
+ */
+ if (!is_direct)
+ validated_shader->uniforms_size += 4;
+
+ if (submit) {
+ if (!record_texture_sample(validated_shader,
+ validation_state, tmu)) {
+ return false;
+ }
+
+ validation_state->tmu_write_count[tmu] = 0;
+ }
+
+ return true;
+}
+
+static bool
+check_reg_write(uint64_t inst,
+ struct vc4_validated_shader_info *validated_shader,
+ struct vc4_shader_validation_state *validation_state,
+ bool is_mul)
+{
+ uint32_t waddr = (is_mul ?
+ QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
+ QPU_GET_FIELD(inst, QPU_WADDR_ADD));
+
+ switch (waddr) {
+ case QPU_W_UNIFORMS_ADDRESS:
+ /* XXX: We'll probably need to support this for reladdr, but
+ * it's definitely a security-related one.
+ */
+ DRM_ERROR("uniforms address load unsupported\n");
+ return false;
+
+ case QPU_W_TLB_COLOR_MS:
+ case QPU_W_TLB_COLOR_ALL:
+ case QPU_W_TLB_Z:
+ /* These only interact with the tile buffer, not main memory,
+ * so they're safe.
+ */
+ return true;
+
+ case QPU_W_TMU0_S:
+ case QPU_W_TMU0_T:
+ case QPU_W_TMU0_R:
+ case QPU_W_TMU0_B:
+ case QPU_W_TMU1_S:
+ case QPU_W_TMU1_T:
+ case QPU_W_TMU1_R:
+ case QPU_W_TMU1_B:
+ return check_tmu_write(inst, validated_shader, validation_state,
+ is_mul);
+
+ case QPU_W_HOST_INT:
+ case QPU_W_TMU_NOSWAP:
+ case QPU_W_TLB_ALPHA_MASK:
+ case QPU_W_MUTEX_RELEASE:
+ /* XXX: I haven't thought about these, so don't support them
+ * for now.
+ */
+ DRM_ERROR("Unsupported waddr %d\n", waddr);
+ return false;
+
+ case QPU_W_VPM_ADDR:
+ DRM_ERROR("General VPM DMA unsupported\n");
+ return false;
+
+ case QPU_W_VPM:
+ case QPU_W_VPMVCD_SETUP:
+ /* We allow VPM setup in general, even including VPM DMA
+ * configuration setup, because the (unsafe) DMA can only be
+ * triggered by QPU_W_VPM_ADDR writes.
+ */
+ return true;
+
+ case QPU_W_TLB_STENCIL_SETUP:
+ return true;
+ }
+
+ return true;
+}
+
+static void
+track_live_clamps(uint64_t inst,
+ struct vc4_validated_shader_info *validated_shader,
+ struct vc4_shader_validation_state *validation_state)
+{
+ uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD);
+ uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
+ uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
+ uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD);
+ uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
+ uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
+ uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
+ uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+ bool ws = inst & QPU_WS;
+ uint32_t lri_add_a, lri_add, lri_mul;
+ bool add_a_is_min_0;
+
+ /* Check whether OP_ADD's A argumennt comes from a live MAX(x, 0),
+ * before we clear previous live state.
+ */
+ lri_add_a = raddr_add_a_to_live_reg_index(inst);
+ add_a_is_min_0 = (lri_add_a != ~0 &&
+ validation_state->live_max_clamp_regs[lri_add_a]);
+
+ /* Clear live state for registers written by our instruction. */
+ lri_add = waddr_to_live_reg_index(waddr_add, ws);
+ lri_mul = waddr_to_live_reg_index(waddr_mul, !ws);
+ if (lri_mul != ~0) {
+ validation_state->live_max_clamp_regs[lri_mul] = false;
+ validation_state->live_min_clamp_offsets[lri_mul] = ~0;
+ }
+ if (lri_add != ~0) {
+ validation_state->live_max_clamp_regs[lri_add] = false;
+ validation_state->live_min_clamp_offsets[lri_add] = ~0;
+ } else {
+ /* Nothing further to do for live tracking, since only ADDs
+ * generate new live clamp registers.
+ */
+ return;
+ }
+
+ /* Now, handle remaining live clamp tracking for the ADD operation. */
+
+ if (cond_add != QPU_COND_ALWAYS)
+ return;
+
+ if (op_add == QPU_A_MAX) {
+ /* Track live clamps of a value to a minimum of 0 (in either
+ * arg).
+ */
+ if (sig != QPU_SIG_SMALL_IMM || raddr_b != 0 ||
+ (add_a != QPU_MUX_B && add_b != QPU_MUX_B)) {
+ return;
+ }
+
+ validation_state->live_max_clamp_regs[lri_add] = true;
+ } else if (op_add == QPU_A_MIN) {
+ /* Track live clamps of a value clamped to a minimum of 0 and
+ * a maximum of some uniform's offset.
+ */
+ if (!add_a_is_min_0)
+ return;
+
+ if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
+ !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF &&
+ sig != QPU_SIG_SMALL_IMM)) {
+ return;
+ }
+
+ validation_state->live_min_clamp_offsets[lri_add] =
+ validated_shader->uniforms_size;
+ }
+}
+
+static bool
+check_instruction_writes(uint64_t inst,
+ struct vc4_validated_shader_info *validated_shader,
+ struct vc4_shader_validation_state *validation_state)
+{
+ uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
+ uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
+ bool ok;
+
+ if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) {
+ DRM_ERROR("ADD and MUL both set up textures\n");
+ return false;
+ }
+
+ ok = (check_reg_write(inst, validated_shader, validation_state,
+ false) &&
+ check_reg_write(inst, validated_shader, validation_state,
+ true));
+
+ track_live_clamps(inst, validated_shader, validation_state);
+
+ return ok;
+}
+
+static bool
+check_instruction_reads(uint64_t inst,
+ struct vc4_validated_shader_info *validated_shader)
+{
+ uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
+ uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+
+ if (raddr_a == QPU_R_UNIF ||
+ (raddr_b == QPU_R_UNIF && sig != QPU_SIG_SMALL_IMM)) {
+ /* This can't overflow the uint32_t, because we're reading 8
+ * bytes of instruction to increment by 4 here, so we'd
+ * already be OOM.
+ */
+ validated_shader->uniforms_size += 4;
+ }
+
+ return true;
+}
+
+struct vc4_validated_shader_info *
+vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
+{
+ bool found_shader_end = false;
+ int shader_end_ip = 0;
+ uint32_t ip, max_ip;
+ uint64_t *shader;
+ struct vc4_validated_shader_info *validated_shader;
+ struct vc4_shader_validation_state validation_state;
+ int i;
+
+ memset(&validation_state, 0, sizeof(validation_state));
+
+ for (i = 0; i < 8; i++)
+ validation_state.tmu_setup[i / 4].p_offset[i % 4] = ~0;
+ for (i = 0; i < ARRAY_SIZE(validation_state.live_min_clamp_offsets); i++)
+ validation_state.live_min_clamp_offsets[i] = ~0;
+
+ shader = shader_obj->vaddr;
+ max_ip = shader_obj->base.size / sizeof(uint64_t);
+
+ validated_shader = kcalloc(1, sizeof(*validated_shader), GFP_KERNEL);
+ if (!validated_shader)
+ return NULL;
+
+ for (ip = 0; ip < max_ip; ip++) {
+ uint64_t inst = shader[ip];
+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+
+ switch (sig) {
+ case QPU_SIG_NONE:
+ case QPU_SIG_WAIT_FOR_SCOREBOARD:
+ case QPU_SIG_SCOREBOARD_UNLOCK:
+ case QPU_SIG_COLOR_LOAD:
+ case QPU_SIG_LOAD_TMU0:
+ case QPU_SIG_LOAD_TMU1:
+ case QPU_SIG_PROG_END:
+ case QPU_SIG_SMALL_IMM:
+ if (!check_instruction_writes(inst, validated_shader,
+ &validation_state)) {
+ DRM_ERROR("Bad write at ip %d\n", ip);
+ goto fail;
+ }
+
+ if (!check_instruction_reads(inst, validated_shader))
+ goto fail;
+
+ if (sig == QPU_SIG_PROG_END) {
+ found_shader_end = true;
+ shader_end_ip = ip;
+ }
+
+ break;
+
+ case QPU_SIG_LOAD_IMM:
+ if (!check_instruction_writes(inst, validated_shader,
+ &validation_state)) {
+ DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip);
+ goto fail;
+ }
+ break;
+
+ default:
+ DRM_ERROR("Unsupported QPU signal %d at "
+ "instruction %d\n", sig, ip);
+ goto fail;
+ }
+
+ /* There are two delay slots after program end is signaled
+ * that are still executed, then we're finished.
+ */
+ if (found_shader_end && ip == shader_end_ip + 2)
+ break;
+ }
+
+ if (ip == max_ip) {
+ DRM_ERROR("shader failed to terminate before "
+ "shader BO end at %zd\n",
+ shader_obj->base.size);
+ goto fail;
+ }
+
+ /* Again, no chance of integer overflow here because the worst case
+ * scenario is 8 bytes of uniforms plus handles per 8-byte
+ * instruction.
+ */
+ validated_shader->uniforms_src_size =
+ (validated_shader->uniforms_size +
+ 4 * validated_shader->num_texture_samples);
+
+ return validated_shader;
+
+fail:
+ if (validated_shader) {
+ kfree(validated_shader->texture_samples);
+ kfree(validated_shader);
+ }
+ return NULL;
+}
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 306a7df7d013..a165f03eaa79 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -374,16 +374,6 @@ static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = {
.best_encoder = virtio_gpu_best_encoder,
};
-static void virtio_gpu_conn_save(struct drm_connector *connector)
-{
- DRM_DEBUG("\n");
-}
-
-static void virtio_gpu_conn_restore(struct drm_connector *connector)
-{
- DRM_DEBUG("\n");
-}
-
static enum drm_connector_status virtio_gpu_conn_detect(
struct drm_connector *connector,
bool force)
@@ -409,8 +399,6 @@ static void virtio_gpu_conn_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
- .save = virtio_gpu_conn_save,
- .restore = virtio_gpu_conn_restore,
.detect = virtio_gpu_conn_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = virtio_gpu_conn_destroy,
@@ -443,7 +431,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
if (IS_ERR(plane))
return PTR_ERR(plane);
drm_crtc_init_with_planes(dev, crtc, plane, NULL,
- &virtio_gpu_crtc_funcs);
+ &virtio_gpu_crtc_funcs, NULL);
drm_mode_crtc_set_gamma_size(crtc, 256);
drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
plane->crtc = crtc;
@@ -453,7 +441,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
drm_connector_helper_add(connector, &virtio_gpu_conn_helper_funcs);
drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs,
- DRM_MODE_ENCODER_VIRTUAL);
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs);
encoder->possible_crtcs = 1 << index;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 4a74129c5708..572fb351feab 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -107,7 +107,7 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
&virtio_gpu_plane_funcs,
virtio_gpu_formats,
ARRAY_SIZE(virtio_gpu_formats),
- DRM_PLANE_TYPE_PRIMARY);
+ DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
goto err_plane_init;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index e38db35132ed..162f188969a7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -470,7 +470,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
}
-static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
+static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
.destroy = vmw_framebuffer_surface_destroy,
.dirty = vmw_framebuffer_surface_dirty,
};
@@ -647,7 +647,7 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
return ret;
}
-static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
+static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
.destroy = vmw_framebuffer_dmabuf_destroy,
.dirty = vmw_framebuffer_dmabuf_dirty,
};
@@ -1331,14 +1331,6 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
return 0;
}
-void vmw_du_crtc_save(struct drm_crtc *crtc)
-{
-}
-
-void vmw_du_crtc_restore(struct drm_crtc *crtc)
-{
-}
-
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
uint32_t start, uint32_t size)
@@ -1360,14 +1352,6 @@ int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
return 0;
}
-void vmw_du_connector_save(struct drm_connector *connector)
-{
-}
-
-void vmw_du_connector_restore(struct drm_connector *connector)
-{
-}
-
enum drm_connector_status
vmw_du_connector_detect(struct drm_connector *connector, bool force)
{
@@ -1554,7 +1538,7 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
drm_mode_probed_add(connector, mode);
}
- drm_mode_connector_list_update(connector, true);
+ drm_mode_connector_list_update(connector);
/* Move the prefered mode first, help apps pick the right mode. */
drm_mode_sort(&connector->modes);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index bb63e4d795fa..2def684e61a4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -294,9 +294,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
return vmw_ldu_commit_list(dev_priv);
}
-static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
- .save = vmw_du_crtc_save,
- .restore = vmw_du_crtc_restore,
+static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.cursor_set = vmw_du_crtc_cursor_set,
.cursor_move = vmw_du_crtc_cursor_move,
.gamma_set = vmw_du_crtc_gamma_set,
@@ -314,7 +312,7 @@ static void vmw_ldu_encoder_destroy(struct drm_encoder *encoder)
vmw_ldu_destroy(vmw_encoder_to_ldu(encoder));
}
-static struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
+static const struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
.destroy = vmw_ldu_encoder_destroy,
};
@@ -327,10 +325,8 @@ static void vmw_ldu_connector_destroy(struct drm_connector *connector)
vmw_ldu_destroy(vmw_connector_to_ldu(connector));
}
-static struct drm_connector_funcs vmw_legacy_connector_funcs = {
+static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
.dpms = vmw_du_connector_dpms,
- .save = vmw_du_connector_save,
- .restore = vmw_du_connector_restore,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
.set_property = vmw_du_connector_set_property,
@@ -367,7 +363,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
connector->status = vmw_du_connector_detect(connector, true);
drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL);
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
drm_mode_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index b96d1ab610c5..ecac70af032a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -530,9 +530,7 @@ out_no_fence:
return ret;
}
-static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
- .save = vmw_du_crtc_save,
- .restore = vmw_du_crtc_restore,
+static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.cursor_set = vmw_du_crtc_cursor_set,
.cursor_move = vmw_du_crtc_cursor_move,
.gamma_set = vmw_du_crtc_gamma_set,
@@ -550,7 +548,7 @@ static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
vmw_sou_destroy(vmw_encoder_to_sou(encoder));
}
-static struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
+static const struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
.destroy = vmw_sou_encoder_destroy,
};
@@ -563,12 +561,8 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
vmw_sou_destroy(vmw_connector_to_sou(connector));
}
-static struct drm_connector_funcs vmw_sou_connector_funcs = {
+static const struct drm_connector_funcs vmw_sou_connector_funcs = {
.dpms = vmw_du_connector_dpms,
- .save = vmw_du_connector_save,
- .restore = vmw_du_connector_restore,
- .detect = vmw_du_connector_detect,
- .fill_modes = vmw_du_connector_fill_modes,
.set_property = vmw_du_connector_set_property,
.destroy = vmw_sou_connector_destroy,
};
@@ -603,7 +597,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
connector->status = vmw_du_connector_detect(connector, true);
drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL);
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
drm_mode_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index b1fc1c02792d..87fc00af8d28 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1040,9 +1040,7 @@ out_finish:
/*
* Screen Target CRTC dispatch table
*/
-static struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
- .save = vmw_du_crtc_save,
- .restore = vmw_du_crtc_restore,
+static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
.cursor_set = vmw_du_crtc_cursor_set,
.cursor_move = vmw_du_crtc_cursor_move,
.gamma_set = vmw_du_crtc_gamma_set,
@@ -1072,7 +1070,7 @@ static void vmw_stdu_encoder_destroy(struct drm_encoder *encoder)
vmw_stdu_destroy(vmw_encoder_to_stdu(encoder));
}
-static struct drm_encoder_funcs vmw_stdu_encoder_funcs = {
+static const struct drm_encoder_funcs vmw_stdu_encoder_funcs = {
.destroy = vmw_stdu_encoder_destroy,
};
@@ -1099,10 +1097,8 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector)
-static struct drm_connector_funcs vmw_stdu_connector_funcs = {
+static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
.dpms = vmw_du_connector_dpms,
- .save = vmw_du_connector_save,
- .restore = vmw_du_connector_restore,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
.set_property = vmw_du_connector_set_property,
@@ -1149,7 +1145,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
connector->status = vmw_du_connector_detect(connector, false);
drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL);
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
drm_mode_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7d620e82e000..c2a721a8cef9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -771,7 +771,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
GFP_KERNEL);
- if (unlikely(srf->sizes == NULL)) {
+ if (unlikely(srf->offsets == NULL)) {
ret = -ENOMEM;
goto out_no_offsets;
}
@@ -815,11 +815,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf->sizes[0].height == 64 &&
srf->format == SVGA3D_A8R8G8B8) {
- srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
- /* clear the image */
- if (srf->snooper.image) {
- memset(srf->snooper.image, 0x00, 64 * 64 * 4);
- } else {
+ srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
+ if (!srf->snooper.image) {
DRM_ERROR("Failed to allocate cursor_image\n");
ret = -ENOMEM;
goto out_no_copy;