summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorMaxime Ripard <maxime.ripard@bootlin.com>2019-01-11 16:32:10 +0100
committerMaxime Ripard <maxime.ripard@bootlin.com>2019-01-11 16:32:10 +0100
commit23d19ba06b9c5614d6457f5fed349ec8f6d4dac9 (patch)
tree39f0b657e5b1b5b958780cae4ae6360f69548d50 /drivers/gpu/drm
parent7d0250ed8e69fb6a66caecf60b8753a21224cc1a (diff)
parente3d093070eb0b5e3df668d3eb04100ea79343c65 (diff)
downloadlinux-23d19ba06b9c5614d6457f5fed349ec8f6d4dac9.tar.bz2
Merge drm/drm-next into drm-misc-next
drm-next has been forwarded to 5.0-rc1, and we need it to apply the damage helper for dirtyfb series from Noralf Trønnes. Signed-off-by: Maxime Ripard <maxime.ripard@bootlin.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h154
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c313
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c439
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h243
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c166
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c142
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c52
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c35
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c22
-rw-r--r--drivers/gpu/drm/amd/display/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c854
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h46
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c122
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c84
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c265
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c818
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c91
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c96
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c98
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_event_log.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/compressor.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c32
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c10
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h14
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_shared.h27
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c15
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/Makefile31
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c326
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h47
-rw-r--r--drivers/gpu/drm/amd/include/amd_acpi.h175
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h32
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h35
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h4
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c134
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c49
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c36
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c31
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c99
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c77
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c1
-rw-r--r--drivers/gpu/drm/ast/ast_main.c3
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c36
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c2
-rw-r--r--drivers/gpu/drm/drm_atomic.c22
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c3
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c3
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c17
-rw-r--r--drivers/gpu/drm/drm_auth.c2
-rw-r--r--drivers/gpu/drm/drm_connector.c117
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c335
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c14
-rw-r--r--drivers/gpu/drm/drm_dsc.c228
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_file.c2
-rw-r--r--drivers/gpu/drm/drm_internal.h2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c10
-rw-r--r--drivers/gpu/drm/drm_lease.c2
-rw-r--r--drivers/gpu/drm/drm_mode_config.c12
-rw-r--r--drivers/gpu/drm/drm_sysfs.c10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c20
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c9
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c37
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h12
-rw-r--r--drivers/gpu/drm/exynos/Kconfig5
-rw-r--r--drivers/gpu/drm/exynos/Makefile3
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c87
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c157
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c55
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c128
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c111
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h134
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c30
-rw-r--r--drivers/gpu/drm/exynos/regs-decon5433.h22
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h9
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c33
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c68
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c27
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h62
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c120
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c31
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c17
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c337
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h28
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c117
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_query.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h15
-rw-r--r--drivers/gpu/drm/i915/i915_request.c3
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c7
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h5
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c31
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h11
-rw-r--r--drivers/gpu/drm/i915/icl_dsi.c493
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c1
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c102
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c228
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c8
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h32
-rw-r--r--drivers/gpu/drm/i915/intel_display.c332
-rw-r--r--drivers/gpu/drm/i915/intel_display.h21
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c303
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c3
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h52
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h5
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c22
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c46
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c21
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c90
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.h6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c589
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c118
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c87
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h25
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c138
-rw-r--r--drivers/gpu/drm/i915/intel_vdsc.c1088
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c999
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.h36
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.c44
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.h15
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c199
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.h37
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c63
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c301
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c247
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c5
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c11
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c10
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c12
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c10
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c18
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c10
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c11
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c27
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c1
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c4
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c12
-rw-r--r--drivers/gpu/drm/msm/Kconfig4
-rw-r--r--drivers/gpu/drm/msm/Makefile11
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h298
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c492
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.h21
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c45
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c20
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h78
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c140
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c89
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h8
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c1165
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h430
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h19
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c77
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c159
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h21
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c45
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c169
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h29
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c402
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h68
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c2393
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h103
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c199
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h30
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c50
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c36
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c66
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h59
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c374
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h45
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c130
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c240
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h217
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h101
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c28
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h359
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c8
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c12
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c70
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c43
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c8
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c90
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c16
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c30
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c8
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c10
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c28
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c28
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c20
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c12
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c16
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c10
-rw-r--r--drivers/gpu/drm/msm/edp/edp.c8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c40
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c20
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c12
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c4
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c8
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c23
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c219
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h28
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c14
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c10
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c221
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h5
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c44
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c118
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c137
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h90
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_tracepoints.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gpummu.c123
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c5
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h3
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h6
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c18
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c14
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h16
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/atom.h7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base907c.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.h4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c61
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c44
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head507d.c19
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head907d.c18
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc37d.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c206
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/lut.c50
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/lut.h5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimm.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h18
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c21
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c133
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cla06f.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/clc36f.h19
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/memory.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c37
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c46
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c152
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c90
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c72
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c116
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c89
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c167
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c145
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c20
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c58
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c21
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c6
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c135
-rw-r--r--drivers/gpu/drm/selftests/Makefile3
-rw-r--r--drivers/gpu/drm/selftests/drm_modeset_selftests.h21
-rw-r--r--drivers/gpu/drm/selftests/test-drm_damage_helper.c811
-rw-r--r--drivers/gpu/drm/selftests/test-drm_modeset_common.h21
-rw-r--r--drivers/gpu/drm/tegra/dc.c38
-rw-r--r--drivers/gpu/drm/tegra/drm.c1
-rw-r--r--drivers/gpu/drm/tegra/falcon.c14
-rw-r--r--drivers/gpu/drm/tegra/hub.c48
-rw-r--r--drivers/gpu/drm/tegra/hub.h3
-rw-r--r--drivers/gpu/drm/tegra/sor.c254
-rw-r--r--drivers/gpu/drm/tegra/sor.h68
-rw-r--r--drivers/gpu/drm/tegra/vic.c46
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c34
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c33
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c569
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h150
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c359
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c561
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h37
-rw-r--r--drivers/gpu/drm/xen/Kconfig1
-rw-r--r--drivers/gpu/drm/xen/Makefile1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c65
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_shbuf.c414
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_shbuf.h64
599 files changed, 20872 insertions, 12282 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 1fafc2f8e8f9..ce8d1d384319 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -32,12 +32,12 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
-drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
+drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
drm_scdc_helper.o drm_gem_framebuffer_helper.o \
- drm_atomic_state_helper.o
+ drm_atomic_state_helper.o drm_damage_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 42f882c633ee..bcef6ea4bcf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -81,6 +81,8 @@
#include "amdgpu_job.h"
#include "amdgpu_bo_list.h"
#include "amdgpu_gem.h"
+#include "amdgpu_doorbell.h"
+#include "amdgpu_amdkfd.h"
#define MAX_GPU_INSTANCE 16
@@ -162,6 +164,7 @@ extern int amdgpu_si_support;
extern int amdgpu_cik_support;
#endif
+#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
@@ -234,7 +237,7 @@ enum amdgpu_kiq_irq {
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
-#define MAX_KIQ_REG_TRY 20
+#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */
int amdgpu_device_ip_set_clockgating_state(void *dev,
enum amd_ip_block_type block_type,
@@ -361,123 +364,6 @@ int amdgpu_fence_slab_init(void);
void amdgpu_fence_slab_fini(void);
/*
- * GPU doorbell structures, functions & helpers
- */
-typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
-{
- AMDGPU_DOORBELL_KIQ = 0x000,
- AMDGPU_DOORBELL_HIQ = 0x001,
- AMDGPU_DOORBELL_DIQ = 0x002,
- AMDGPU_DOORBELL_MEC_RING0 = 0x010,
- AMDGPU_DOORBELL_MEC_RING1 = 0x011,
- AMDGPU_DOORBELL_MEC_RING2 = 0x012,
- AMDGPU_DOORBELL_MEC_RING3 = 0x013,
- AMDGPU_DOORBELL_MEC_RING4 = 0x014,
- AMDGPU_DOORBELL_MEC_RING5 = 0x015,
- AMDGPU_DOORBELL_MEC_RING6 = 0x016,
- AMDGPU_DOORBELL_MEC_RING7 = 0x017,
- AMDGPU_DOORBELL_GFX_RING0 = 0x020,
- AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
- AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
- AMDGPU_DOORBELL_IH = 0x1E8,
- AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
- AMDGPU_DOORBELL_INVALID = 0xFFFF
-} AMDGPU_DOORBELL_ASSIGNMENT;
-
-struct amdgpu_doorbell {
- /* doorbell mmio */
- resource_size_t base;
- resource_size_t size;
- u32 __iomem *ptr;
- u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
-};
-
-/*
- * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
- */
-typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
-{
- /*
- * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
- * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
- * Compute related doorbells are allocated from 0x00 to 0x8a
- */
-
-
- /* kernel scheduling */
- AMDGPU_DOORBELL64_KIQ = 0x00,
-
- /* HSA interface queue and debug queue */
- AMDGPU_DOORBELL64_HIQ = 0x01,
- AMDGPU_DOORBELL64_DIQ = 0x02,
-
- /* Compute engines */
- AMDGPU_DOORBELL64_MEC_RING0 = 0x03,
- AMDGPU_DOORBELL64_MEC_RING1 = 0x04,
- AMDGPU_DOORBELL64_MEC_RING2 = 0x05,
- AMDGPU_DOORBELL64_MEC_RING3 = 0x06,
- AMDGPU_DOORBELL64_MEC_RING4 = 0x07,
- AMDGPU_DOORBELL64_MEC_RING5 = 0x08,
- AMDGPU_DOORBELL64_MEC_RING6 = 0x09,
- AMDGPU_DOORBELL64_MEC_RING7 = 0x0a,
-
- /* User queue doorbell range (128 doorbells) */
- AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b,
- AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a,
-
- /* Graphics engine */
- AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
-
- /*
- * Other graphics doorbells can be allocated here: from 0x8c to 0xdf
- * Graphics voltage island aperture 1
- * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
- */
-
- /* sDMA engines reserved from 0xe0 -0xef */
- AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xE0,
- AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xE1,
- AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xE8,
- AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xE9,
-
- /* For vega10 sriov, the sdma doorbell must be fixed as follow
- * to keep the same setting with host driver, or it will
- * happen conflicts
- */
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 = 0xF0,
- AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 = 0xF2,
- AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
-
- /* Interrupt handler */
- AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
- AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */
- AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */
-
- /* VCN engine use 32 bits doorbell */
- AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
- AMDGPU_DOORBELL64_VCN2_3 = 0xF9,
- AMDGPU_DOORBELL64_VCN4_5 = 0xFA,
- AMDGPU_DOORBELL64_VCN6_7 = 0xFB,
-
- /* overlap the doorbell assignment with VCN as they are mutually exclusive
- * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
- */
- AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
- AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
- AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
- AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
-
- AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
- AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
- AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
- AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
-
- AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
- AMDGPU_DOORBELL64_INVALID = 0xFFFF
-} AMDGPU_DOORBELL64_ASSIGNMENT;
-
-/*
* IRQS.
*/
@@ -654,6 +540,8 @@ struct amdgpu_asic_funcs {
struct amdgpu_ring *ring);
/* check if the asic needs a full reset of if soft reset will work */
bool (*need_full_reset)(struct amdgpu_device *adev);
+ /* initialize doorbell layout for specific asic*/
+ void (*init_doorbell_index)(struct amdgpu_device *adev);
};
/*
@@ -976,6 +864,9 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
+ /* KFD */
+ struct amdgpu_kfd_dev kfd;
+
/* display related functionality */
struct amdgpu_display_manager dm;
@@ -989,9 +880,6 @@ struct amdgpu_device {
atomic64_t visible_pin_size;
atomic64_t gart_pin_size;
- /* amdkfd interface */
- struct kfd_dev *kfd;
-
/* soc15 register offset based on ip, instance and segment */
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
@@ -1023,6 +911,10 @@ struct amdgpu_device {
unsigned long last_mm_index;
bool in_gpu_reset;
struct mutex lock_reset;
+ struct amdgpu_doorbell_index doorbell_index;
+
+ int asic_reset_res;
+ struct work_struct xgmi_reset_work;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1047,11 +939,6 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
-u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
-void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
-u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
-void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
-
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
@@ -1113,11 +1000,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
-#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
-#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
-#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
-#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
-
#define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
#define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
@@ -1159,6 +1041,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
+#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
/* Common functions */
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
@@ -1219,12 +1102,6 @@ void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
-
-/*
- * functions used by amdgpu_xgmi.c
- */
-int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
-
/*
* functions used by amdgpu_encoder.c
*/
@@ -1252,6 +1129,9 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade
int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
u8 perf_req, bool advertise);
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
+
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
+ struct amdgpu_dm_backlight_caps *caps);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 7f0afc526419..4376b17ca594 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -41,28 +41,21 @@ struct amdgpu_atif_notification_cfg {
};
struct amdgpu_atif_notifications {
- bool display_switch;
- bool expansion_mode_change;
bool thermal_state;
bool forced_power_state;
bool system_power_state;
- bool display_conf_change;
- bool px_gfx_switch;
bool brightness_change;
bool dgpu_display_event;
+ bool gpu_package_power_limit;
};
struct amdgpu_atif_functions {
bool system_params;
bool sbios_requests;
- bool select_active_disp;
- bool lid_state;
- bool get_tv_standard;
- bool set_tv_standard;
- bool get_panel_expansion_mode;
- bool set_panel_expansion_mode;
bool temperature_change;
- bool graphics_device_types;
+ bool query_backlight_transfer_characteristics;
+ bool ready_to_undock;
+ bool external_gpu_information;
};
struct amdgpu_atif {
@@ -72,6 +65,7 @@ struct amdgpu_atif {
struct amdgpu_atif_functions functions;
struct amdgpu_atif_notification_cfg notification_cfg;
struct amdgpu_encoder *encoder_for_bl;
+ struct amdgpu_dm_backlight_caps backlight_caps;
};
/* Call the ATIF method
@@ -137,15 +131,12 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
*/
static void amdgpu_atif_parse_notification(struct amdgpu_atif_notifications *n, u32 mask)
{
- n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED;
- n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED;
n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
- n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED;
- n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED;
n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
+ n->gpu_package_power_limit = mask & ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST_SUPPORTED;
}
/**
@@ -162,14 +153,11 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
{
f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
- f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED;
- f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED;
- f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED;
- f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED;
- f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED;
- f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED;
f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
- f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED;
+ f->query_backlight_transfer_characteristics =
+ mask & ATIF_QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS_SUPPORTED;
+ f->ready_to_undock = mask & ATIF_READY_TO_UNDOCK_NOTIFICATION_SUPPORTED;
+ f->external_gpu_information = mask & ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED;
}
/**
@@ -311,6 +299,65 @@ out:
}
/**
+ * amdgpu_atif_query_backlight_caps - get min and max backlight input signal
+ *
+ * @handle: acpi handle
+ *
+ * Execute the QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS ATIF function
+ * to determine the acceptable range of backlight values
+ *
+ * Backlight_caps.caps_valid will be set to true if the query is successful
+ *
+ * The input signals are in range 0-255
+ *
+ * This function assumes the display with backlight is the first LCD
+ *
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_atif_query_backlight_caps(struct amdgpu_atif *atif)
+{
+ union acpi_object *info;
+ struct atif_qbtc_output characteristics;
+ struct atif_qbtc_arguments arguments;
+ struct acpi_buffer params;
+ size_t size;
+ int err = 0;
+
+ arguments.size = sizeof(arguments);
+ arguments.requested_display = ATIF_QBTC_REQUEST_LCD1;
+
+ params.length = sizeof(arguments);
+ params.pointer = (void *)&arguments;
+
+ info = amdgpu_atif_call(atif,
+ ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS,
+ &params);
+ if (!info) {
+ err = -EIO;
+ goto out;
+ }
+
+ size = *(u16 *) info->buffer.pointer;
+ if (size < 10) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ memset(&characteristics, 0, sizeof(characteristics));
+ size = min(sizeof(characteristics), size);
+ memcpy(&characteristics, info->buffer.pointer, size);
+
+ atif->backlight_caps.caps_valid = true;
+ atif->backlight_caps.min_input_signal =
+ characteristics.min_input_signal;
+ atif->backlight_caps.max_input_signal =
+ characteristics.max_input_signal;
+out:
+ kfree(info);
+ return err;
+}
+
+/**
* amdgpu_atif_get_sbios_requests - get requested sbios event
*
* @handle: acpi handle
@@ -799,6 +846,17 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
}
}
+ if (atif->functions.query_backlight_transfer_characteristics) {
+ ret = amdgpu_atif_query_backlight_caps(atif);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Call to QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS failed: %d\n",
+ ret);
+ atif->backlight_caps.caps_valid = false;
+ }
+ } else {
+ atif->backlight_caps.caps_valid = false;
+ }
+
out:
adev->acpi_nb.notifier_call = amdgpu_acpi_event;
register_acpi_notifier(&adev->acpi_nb);
@@ -806,6 +864,18 @@ out:
return ret;
}
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
+ struct amdgpu_dm_backlight_caps *caps)
+{
+ if (!adev->atif) {
+ caps->caps_valid = false;
+ return;
+ }
+ caps->caps_valid = adev->atif->backlight_caps.caps_valid;
+ caps->min_input_signal = adev->atif->backlight_caps.min_input_signal;
+ caps->max_input_signal = adev->atif->backlight_caps.max_input_signal;
+}
+
/**
* amdgpu_acpi_fini - tear down driver acpi support
*
@@ -816,6 +886,5 @@ out:
void amdgpu_acpi_fini(struct amdgpu_device *adev)
{
unregister_acpi_notifier(&adev->acpi_nb);
- if (adev->atif)
- kfree(adev->atif);
+ kfree(adev->atif);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index bcf1666fb31d..2dfaf158ef07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -26,15 +26,26 @@
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include <linux/module.h>
+#include <linux/dma-buf.h>
const struct kgd2kfd_calls *kgd2kfd;
static const unsigned int compute_vmid_bitmap = 0xFF00;
+/* Total memory size in system memory and all GPU VRAM. Used to
+ * estimate worst case amount of memory to reserve for page tables
+ */
+uint64_t amdgpu_amdkfd_total_mem_size;
+
int amdgpu_amdkfd_init(void)
{
+ struct sysinfo si;
int ret;
+ si_meminfo(&si);
+ amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
+ amdgpu_amdkfd_total_mem_size *= si.mem_unit;
+
#ifdef CONFIG_HSA_AMD
ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
if (ret)
@@ -73,9 +84,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
@@ -85,8 +98,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
return;
}
- adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
- adev->pdev, kfd2kgd);
+ adev->kfd.dev = kgd2kfd->probe((struct kgd_dev *)adev,
+ adev->pdev, kfd2kgd);
+
+ if (adev->kfd.dev)
+ amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
}
/**
@@ -126,7 +142,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
int i, n;
int last_valid_bit;
- if (adev->kfd) {
+
+ if (adev->kfd.dev) {
struct kgd2kfd_shared_resources gpu_resources = {
.compute_vmid_bitmap = compute_vmid_bitmap,
.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
@@ -165,7 +182,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
&gpu_resources.doorbell_start_offset);
if (adev->asic_type < CHIP_VEGA10) {
- kgd2kfd->device_init(adev->kfd, &gpu_resources);
+ kgd2kfd->device_init(adev->kfd.dev, &gpu_resources);
return;
}
@@ -179,25 +196,14 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
* process in case of 64-bit doorbells so we
* can use each doorbell assignment twice.
*/
- if (adev->asic_type == CHIP_VEGA10) {
- gpu_resources.sdma_doorbell[0][i] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
- gpu_resources.sdma_doorbell[0][i+1] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i+1] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
- } else {
- gpu_resources.sdma_doorbell[0][i] =
- AMDGPU_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
- gpu_resources.sdma_doorbell[0][i+1] =
- AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i] =
- AMDGPU_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i+1] =
- AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
- }
+ gpu_resources.sdma_doorbell[0][i] =
+ adev->doorbell_index.sdma_engine0 + (i >> 1);
+ gpu_resources.sdma_doorbell[0][i+1] =
+ adev->doorbell_index.sdma_engine0 + 0x200 + (i >> 1);
+ gpu_resources.sdma_doorbell[1][i] =
+ adev->doorbell_index.sdma_engine1 + (i >> 1);
+ gpu_resources.sdma_doorbell[1][i+1] =
+ adev->doorbell_index.sdma_engine1 + 0x200 + (i >> 1);
}
/* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for
* SDMA, IH and VCN. So don't use them for the CP.
@@ -205,37 +211,37 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
gpu_resources.reserved_doorbell_mask = 0x1e0;
gpu_resources.reserved_doorbell_val = 0x0e0;
- kgd2kfd->device_init(adev->kfd, &gpu_resources);
+ kgd2kfd->device_init(adev->kfd.dev, &gpu_resources);
}
}
void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
{
- if (adev->kfd) {
- kgd2kfd->device_exit(adev->kfd);
- adev->kfd = NULL;
+ if (adev->kfd.dev) {
+ kgd2kfd->device_exit(adev->kfd.dev);
+ adev->kfd.dev = NULL;
}
}
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry)
{
- if (adev->kfd)
- kgd2kfd->interrupt(adev->kfd, ih_ring_entry);
+ if (adev->kfd.dev)
+ kgd2kfd->interrupt(adev->kfd.dev, ih_ring_entry);
}
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
{
- if (adev->kfd)
- kgd2kfd->suspend(adev->kfd);
+ if (adev->kfd.dev)
+ kgd2kfd->suspend(adev->kfd.dev);
}
int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
{
int r = 0;
- if (adev->kfd)
- r = kgd2kfd->resume(adev->kfd);
+ if (adev->kfd.dev)
+ r = kgd2kfd->resume(adev->kfd.dev);
return r;
}
@@ -244,8 +250,8 @@ int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
{
int r = 0;
- if (adev->kfd)
- r = kgd2kfd->pre_reset(adev->kfd);
+ if (adev->kfd.dev)
+ r = kgd2kfd->pre_reset(adev->kfd.dev);
return r;
}
@@ -254,8 +260,8 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
{
int r = 0;
- if (adev->kfd)
- r = kgd2kfd->post_reset(adev->kfd);
+ if (adev->kfd.dev)
+ r = kgd2kfd->post_reset(adev->kfd.dev);
return r;
}
@@ -428,6 +434,62 @@ void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
cu_info->lds_size = acu_info.lds_size;
}
+int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
+ struct kgd_dev **dma_buf_kgd,
+ uint64_t *bo_size, void *metadata_buffer,
+ size_t buffer_size, uint32_t *metadata_size,
+ uint32_t *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ uint64_t metadata_flags;
+ int r = -EINVAL;
+
+ dma_buf = dma_buf_get(dma_buf_fd);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
+ /* Can't handle non-graphics buffers */
+ goto out_put;
+
+ obj = dma_buf->priv;
+ if (obj->dev->driver != adev->ddev->driver)
+ /* Can't handle buffers from different drivers */
+ goto out_put;
+
+ adev = obj->dev->dev_private;
+ bo = gem_to_amdgpu_bo(obj);
+ if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT)))
+ /* Only VRAM and GTT BOs are supported */
+ goto out_put;
+
+ r = 0;
+ if (dma_buf_kgd)
+ *dma_buf_kgd = (struct kgd_dev *)adev;
+ if (bo_size)
+ *bo_size = amdgpu_bo_size(bo);
+ if (metadata_size)
+ *metadata_size = bo->metadata_size;
+ if (metadata_buffer)
+ r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
+ metadata_size, &metadata_flags);
+ if (flags) {
+ *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ *flags |= ALLOC_MEM_FLAGS_PUBLIC;
+ }
+
+out_put:
+ dma_buf_put(dma_buf);
+ return r;
+}
+
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
@@ -510,7 +572,7 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
{
- if (adev->kfd) {
+ if (adev->kfd.dev) {
if ((1 << vmid) & compute_vmid_bitmap)
return true;
}
@@ -524,7 +586,7 @@ bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
return false;
}
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
{
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index bcf587b4ba98..70429f7aa9a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -27,7 +27,6 @@
#include <linux/types.h>
#include <linux/mm.h>
-#include <linux/mmu_context.h>
#include <linux/workqueue.h>
#include <kgd_kfd_interface.h>
#include <drm/ttm/ttm_execbuf_util.h>
@@ -35,6 +34,7 @@
#include "amdgpu_vm.h"
extern const struct kgd2kfd_calls *kgd2kfd;
+extern uint64_t amdgpu_amdkfd_total_mem_size;
struct amdgpu_device;
@@ -77,6 +77,11 @@ struct amdgpu_amdkfd_fence {
char timeline_name[TASK_COMM_LEN];
};
+struct amdgpu_kfd_dev {
+ struct kfd_dev *dev;
+ uint64_t vram_used;
+};
+
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm);
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
@@ -144,6 +149,11 @@ uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
+int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
+ struct kgd_dev **dmabuf_kgd,
+ uint64_t *bo_size, void *metadata_buffer,
+ size_t buffer_size, uint32_t *metadata_size,
+ uint32_t *flags);
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
@@ -195,7 +205,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
struct kfd_vm_fault_info *info);
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+ struct dma_buf *dmabuf,
+ uint64_t va, void *vm,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset);
+
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo);
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 72a357dae070..ff7fac7df34b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -23,6 +23,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
+#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 0e2a56b6a9b6..56ea929f524b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -24,6 +24,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
+#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 03b604c96d94..5c51d4910650 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -26,6 +26,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
+#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index df0a059565f9..be1ab43473c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -25,6 +25,7 @@
#include <linux/list.h>
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
+#include <linux/dma-buf.h>
#include <drm/drmP.h>
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
@@ -46,9 +47,9 @@
/* Impose limit on how much memory KFD can use */
static struct {
uint64_t max_system_mem_limit;
- uint64_t max_userptr_mem_limit;
+ uint64_t max_ttm_mem_limit;
int64_t system_mem_used;
- int64_t userptr_mem_used;
+ int64_t ttm_mem_used;
spinlock_t mem_limit_lock;
} kfd_mem_limit;
@@ -90,8 +91,8 @@ static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
}
/* Set memory usage limits. Current, limits are
- * System (kernel) memory - 3/8th System RAM
- * Userptr memory - 3/4th System RAM
+ * System (TTM + userptr) memory - 3/4th System RAM
+ * TTM memory - 3/8th System RAM
*/
void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
{
@@ -103,48 +104,61 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
mem *= si.mem_unit;
spin_lock_init(&kfd_mem_limit.mem_limit_lock);
- kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3);
- kfd_mem_limit.max_userptr_mem_limit = mem - (mem >> 2);
- pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n",
+ kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
+ kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
+ pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
(kfd_mem_limit.max_system_mem_limit >> 20),
- (kfd_mem_limit.max_userptr_mem_limit >> 20));
+ (kfd_mem_limit.max_ttm_mem_limit >> 20));
}
-static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
- uint64_t size, u32 domain)
+static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
+ uint64_t size, u32 domain, bool sg)
{
- size_t acc_size;
+ size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
+ uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9;
int ret = 0;
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
sizeof(struct amdgpu_bo));
- spin_lock(&kfd_mem_limit.mem_limit_lock);
+ vram_needed = 0;
if (domain == AMDGPU_GEM_DOMAIN_GTT) {
- if (kfd_mem_limit.system_mem_used + (acc_size + size) >
- kfd_mem_limit.max_system_mem_limit) {
- ret = -ENOMEM;
- goto err_no_mem;
- }
- kfd_mem_limit.system_mem_used += (acc_size + size);
- } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
- if ((kfd_mem_limit.system_mem_used + acc_size >
- kfd_mem_limit.max_system_mem_limit) ||
- (kfd_mem_limit.userptr_mem_used + (size + acc_size) >
- kfd_mem_limit.max_userptr_mem_limit)) {
- ret = -ENOMEM;
- goto err_no_mem;
- }
- kfd_mem_limit.system_mem_used += acc_size;
- kfd_mem_limit.userptr_mem_used += size;
+ /* TTM GTT memory */
+ system_mem_needed = acc_size + size;
+ ttm_mem_needed = acc_size + size;
+ } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
+ /* Userptr */
+ system_mem_needed = acc_size + size;
+ ttm_mem_needed = acc_size;
+ } else {
+ /* VRAM and SG */
+ system_mem_needed = acc_size;
+ ttm_mem_needed = acc_size;
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+ vram_needed = size;
+ }
+
+ spin_lock(&kfd_mem_limit.mem_limit_lock);
+
+ if ((kfd_mem_limit.system_mem_used + system_mem_needed >
+ kfd_mem_limit.max_system_mem_limit) ||
+ (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
+ kfd_mem_limit.max_ttm_mem_limit) ||
+ (adev->kfd.vram_used + vram_needed >
+ adev->gmc.real_vram_size - reserved_for_pt)) {
+ ret = -ENOMEM;
+ } else {
+ kfd_mem_limit.system_mem_used += system_mem_needed;
+ kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
+ adev->kfd.vram_used += vram_needed;
}
-err_no_mem:
+
spin_unlock(&kfd_mem_limit.mem_limit_lock);
return ret;
}
-static void unreserve_system_mem_limit(struct amdgpu_device *adev,
- uint64_t size, u32 domain)
+static void unreserve_mem_limit(struct amdgpu_device *adev,
+ uint64_t size, u32 domain, bool sg)
{
size_t acc_size;
@@ -154,35 +168,39 @@ static void unreserve_system_mem_limit(struct amdgpu_device *adev,
spin_lock(&kfd_mem_limit.mem_limit_lock);
if (domain == AMDGPU_GEM_DOMAIN_GTT) {
kfd_mem_limit.system_mem_used -= (acc_size + size);
- } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
+ kfd_mem_limit.ttm_mem_used -= (acc_size + size);
+ } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
+ kfd_mem_limit.system_mem_used -= (acc_size + size);
+ kfd_mem_limit.ttm_mem_used -= acc_size;
+ } else {
kfd_mem_limit.system_mem_used -= acc_size;
- kfd_mem_limit.userptr_mem_used -= size;
+ kfd_mem_limit.ttm_mem_used -= acc_size;
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ adev->kfd.vram_used -= size;
+ WARN_ONCE(adev->kfd.vram_used < 0,
+ "kfd VRAM memory accounting unbalanced");
+ }
}
WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
"kfd system memory accounting unbalanced");
- WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
- "kfd userptr memory accounting unbalanced");
+ WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
+ "kfd TTM memory accounting unbalanced");
spin_unlock(&kfd_mem_limit.mem_limit_lock);
}
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
{
- spin_lock(&kfd_mem_limit.mem_limit_lock);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ u32 domain = bo->preferred_domains;
+ bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
- kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
- kfd_mem_limit.userptr_mem_used -= amdgpu_bo_size(bo);
- } else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
- kfd_mem_limit.system_mem_used -=
- (bo->tbo.acc_size + amdgpu_bo_size(bo));
+ domain = AMDGPU_GEM_DOMAIN_CPU;
+ sg = false;
}
- WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
- "kfd system memory accounting unbalanced");
- WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
- "kfd userptr memory accounting unbalanced");
- spin_unlock(&kfd_mem_limit.mem_limit_lock);
+ unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
}
@@ -395,23 +413,6 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
return 0;
}
-static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct dma_fence *f)
-{
- int ret = amdgpu_sync_fence(adev, sync, f, false);
-
- /* Sync objects can't handle multiple GPUs (contexts) updating
- * sync->last_vm_update. Fortunately we don't need it for
- * KFD's purposes, so we can just drop that fence.
- */
- if (sync->last_vm_update) {
- dma_fence_put(sync->last_vm_update);
- sync->last_vm_update = NULL;
- }
-
- return ret;
-}
-
static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
{
struct amdgpu_bo *pd = vm->root.base.bo;
@@ -422,7 +423,7 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
if (ret)
return ret;
- return sync_vm_fence(adev, sync, vm->last_update);
+ return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
}
/* add_bo_to_vm - Add a BO to a VM
@@ -536,7 +537,7 @@ static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
struct amdgpu_bo *bo = mem->bo;
INIT_LIST_HEAD(&entry->head);
- entry->shared = true;
+ entry->num_shared = 1;
entry->bo = &bo->tbo;
mutex_lock(&process_info->lock);
if (userptr)
@@ -677,7 +678,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo;
- ctx->kfd_bo.tv.shared = true;
+ ctx->kfd_bo.tv.num_shared = 1;
ctx->kfd_bo.user_pages = NULL;
list_add(&ctx->kfd_bo.tv.head, &ctx->list);
@@ -741,7 +742,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo;
- ctx->kfd_bo.tv.shared = true;
+ ctx->kfd_bo.tv.num_shared = 1;
ctx->kfd_bo.user_pages = NULL;
list_add(&ctx->kfd_bo.tv.head, &ctx->list);
@@ -826,7 +827,7 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
/* Add the eviction fence back */
amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
- sync_vm_fence(adev, sync, bo_va->last_pt_update);
+ amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
return 0;
}
@@ -851,7 +852,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
return ret;
}
- return sync_vm_fence(adev, sync, bo_va->last_pt_update);
+ return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
}
static int map_bo_to_gpuvm(struct amdgpu_device *adev,
@@ -886,6 +887,24 @@ update_gpuvm_pte_failed:
return ret;
}
+static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
+{
+ struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
+
+ if (!sg)
+ return NULL;
+ if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
+ kfree(sg);
+ return NULL;
+ }
+ sg->sgl->dma_address = addr;
+ sg->sgl->length = size;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->sgl->dma_length = size;
+#endif
+ return sg;
+}
+
static int process_validate_vms(struct amdkfd_process_info *process_info)
{
struct amdgpu_vm *peer_vm;
@@ -901,6 +920,26 @@ static int process_validate_vms(struct amdkfd_process_info *process_info)
return 0;
}
+static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
+ struct amdgpu_sync *sync)
+{
+ struct amdgpu_vm *peer_vm;
+ int ret;
+
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node) {
+ struct amdgpu_bo *pd = peer_vm->root.base.bo;
+
+ ret = amdgpu_sync_resv(NULL,
+ sync, pd->tbo.resv,
+ AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int process_update_pds(struct amdkfd_process_info *process_info,
struct amdgpu_sync *sync)
{
@@ -1149,6 +1188,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct sg_table *sg = NULL;
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
@@ -1177,13 +1218,25 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (!offset || !*offset)
return -EINVAL;
user_addr = *offset;
+ } else if (flags & ALLOC_MEM_FLAGS_DOORBELL) {
+ domain = AMDGPU_GEM_DOMAIN_GTT;
+ alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
+ bo_type = ttm_bo_type_sg;
+ alloc_flags = 0;
+ if (size > UINT_MAX)
+ return -EINVAL;
+ sg = create_doorbell_sg(*offset, size);
+ if (!sg)
+ return -ENOMEM;
} else {
return -EINVAL;
}
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
- if (!*mem)
- return -ENOMEM;
+ if (!*mem) {
+ ret = -ENOMEM;
+ goto err;
+ }
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
(*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
@@ -1199,7 +1252,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
byte_align = (adev->family == AMDGPU_FAMILY_VI &&
adev->asic_type != CHIP_FIJI &&
adev->asic_type != CHIP_POLARIS10 &&
- adev->asic_type != CHIP_POLARIS11) ?
+ adev->asic_type != CHIP_POLARIS11 &&
+ adev->asic_type != CHIP_POLARIS12) ?
VI_BO_SIZE_ALIGN : 1;
mapping_flags = AMDGPU_VM_PAGE_READABLE;
@@ -1215,10 +1269,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
amdgpu_sync_create(&(*mem)->sync);
- ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, alloc_domain);
+ ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
if (ret) {
pr_debug("Insufficient system memory\n");
- goto err_reserve_system_mem;
+ goto err_reserve_limit;
}
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
@@ -1229,7 +1283,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
bp.byte_align = byte_align;
bp.domain = alloc_domain;
bp.flags = alloc_flags;
- bp.type = ttm_bo_type_device;
+ bp.type = bo_type;
bp.resv = NULL;
ret = amdgpu_bo_create(adev, &bp, &bo);
if (ret) {
@@ -1237,6 +1291,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
domain_string(alloc_domain), ret);
goto err_bo_create;
}
+ if (bo_type == ttm_bo_type_sg) {
+ bo->tbo.sg = sg;
+ bo->tbo.ttm->sg = sg;
+ }
bo->kfd_bo = *mem;
(*mem)->bo = bo;
if (user_addr)
@@ -1266,12 +1324,17 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
allocate_init_user_pages_failed:
amdgpu_bo_unref(&bo);
/* Don't unreserve system mem limit twice */
- goto err_reserve_system_mem;
+ goto err_reserve_limit;
err_bo_create:
- unreserve_system_mem_limit(adev, size, alloc_domain);
-err_reserve_system_mem:
+ unreserve_mem_limit(adev, size, alloc_domain, !!sg);
+err_reserve_limit:
mutex_destroy(&(*mem)->lock);
kfree(*mem);
+err:
+ if (sg) {
+ sg_free_table(sg);
+ kfree(sg);
+ }
return ret;
}
@@ -1341,6 +1404,14 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
/* Free the sync object */
amdgpu_sync_free(&mem->sync);
+ /* If the SG is not NULL, it's one we created for a doorbell
+ * BO. We need to free it.
+ */
+ if (mem->bo->tbo.sg) {
+ sg_free_table(mem->bo->tbo.sg);
+ kfree(mem->bo->tbo.sg);
+ }
+
/* Free the BO*/
amdgpu_bo_unref(&mem->bo);
mutex_destroy(&mem->lock);
@@ -1405,7 +1476,8 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
* the queues are still stopped and we can leave mapping for
* the next restore worker
*/
- if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
+ bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
is_invalid_userptr = true;
if (check_if_add_bo_to_vm(avm, mem)) {
@@ -1642,6 +1714,60 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
return 0;
}
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+ struct dma_buf *dma_buf,
+ uint64_t va, void *vm,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
+ /* Can't handle non-graphics buffers */
+ return -EINVAL;
+
+ obj = dma_buf->priv;
+ if (obj->dev->dev_private != adev)
+ /* Can't handle buffers from other devices */
+ return -EINVAL;
+
+ bo = gem_to_amdgpu_bo(obj);
+ if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT)))
+ /* Only VRAM and GTT BOs are supported */
+ return -EINVAL;
+
+ *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+ if (!*mem)
+ return -ENOMEM;
+
+ if (size)
+ *size = amdgpu_bo_size(bo);
+
+ if (mmap_offset)
+ *mmap_offset = amdgpu_bo_mmap_offset(bo);
+
+ INIT_LIST_HEAD(&(*mem)->bo_va_list);
+ mutex_init(&(*mem)->lock);
+ (*mem)->mapping_flags =
+ AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
+ AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
+
+ (*mem)->bo = amdgpu_bo_ref(bo);
+ (*mem)->va = va;
+ (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
+ (*mem)->mapped_to_gpu_memory = 0;
+ (*mem)->process_info = avm->process_info;
+ add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
+ amdgpu_sync_create(&(*mem)->sync);
+
+ return 0;
+}
+
/* Evict a userptr BO by stopping the queues if necessary
*
* Runs in MMU notifier, may be in RECLAIM_FS context. This means it
@@ -1808,7 +1934,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
validate_list.head) {
list_add_tail(&mem->resv_list.head, &resv_list);
mem->resv_list.bo = mem->validate_list.bo;
- mem->resv_list.shared = mem->validate_list.shared;
+ mem->resv_list.num_shared = mem->validate_list.num_shared;
}
/* Reserve all BOs and page tables for validation */
@@ -2027,7 +2153,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
list_add_tail(&mem->resv_list.head, &ctx.list);
mem->resv_list.bo = mem->validate_list.bo;
- mem->resv_list.shared = mem->validate_list.shared;
+ mem->resv_list.num_shared = mem->validate_list.num_shared;
}
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
@@ -2044,13 +2170,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
if (ret)
goto validate_map_fail;
- /* Wait for PD/PTs validate to finish */
- /* FIXME: I think this isn't needed */
- list_for_each_entry(peer_vm, &process_info->vm_list_head,
- vm_list_node) {
- struct amdgpu_bo *bo = peer_vm->root.base.bo;
-
- ttm_bo_wait(&bo->tbo, false, false);
+ ret = process_sync_pds_resv(process_info, &sync_obj);
+ if (ret) {
+ pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
+ goto validate_map_fail;
}
/* Validate BOs and map them to GPUVM (update VM page tables). */
@@ -2066,7 +2189,11 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
pr_debug("Memory eviction: Validate BOs failed. Try again\n");
goto validate_map_fail;
}
-
+ ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
+ if (ret) {
+ pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
+ goto validate_map_fail;
+ }
list_for_each_entry(bo_va_entry, &mem->bo_va_list,
bo_list) {
ret = update_gpuvm_pte((struct amdgpu_device *)
@@ -2087,6 +2214,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
goto validate_map_fail;
}
+ /* Wait for validate and PT updates to finish */
amdgpu_sync_wait(&sync_obj, false);
/* Release old eviction fence and create new one, because fence only
@@ -2105,10 +2233,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
process_info->eviction_fence = new_fence;
*ef = dma_fence_get(&new_fence->base);
- /* Wait for validate to finish and attach new eviction fence */
- list_for_each_entry(mem, &process_info->kfd_bo_list,
- validate_list.head)
- ttm_bo_wait(&mem->bo->tbo, false, false);
+ /* Attach new eviction fence to all BOs */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list.head)
amdgpu_bo_fence(mem->bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 14d2982a47cc..5c79da8e1150 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -118,7 +118,6 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
entry->priority = min(info[i].bo_priority,
AMDGPU_BO_LIST_MAX_PRIORITY);
entry->tv.bo = &bo->tbo;
- entry->tv.shared = !bo->prime_shared_count;
if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
list->gds_obj = bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index ceadeeadfa56..387f1cf1dc20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -381,7 +381,8 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
(adev->pdev->revision == 0xe7) ||
(adev->pdev->revision == 0xef))) ||
((adev->pdev->device == 0x6fdf) &&
- (adev->pdev->revision == 0xef))) {
+ ((adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff)))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
} else if ((adev->pdev->device == 0x67df) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index dc54e9efd910..1c49b8266d69 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -50,7 +50,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &bo->tbo;
- p->uf_entry.tv.shared = true;
+ /* One for TTM and one for the CS job */
+ p->uf_entry.tv.num_shared = 2;
p->uf_entry.user_pages = NULL;
drm_gem_object_put_unlocked(gobj);
@@ -124,14 +125,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
goto free_chunk;
}
+ mutex_lock(&p->ctx->lock);
+
/* skip guilty context job */
if (atomic_read(&p->ctx->guilty) == 1) {
ret = -ECANCELED;
goto free_chunk;
}
- mutex_lock(&p->ctx->lock);
-
/* get chunks */
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
@@ -598,6 +599,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
return r;
}
+ /* One for TTM and one for the CS job */
+ amdgpu_bo_list_for_each_entry(e, p->bo_list)
+ e->tv.num_shared = 2;
+
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
if (p->bo_list->first_userptr != p->bo_list->num_entries)
p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
@@ -717,8 +722,14 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
gws = p->bo_list->gws_obj;
oa = p->bo_list->oa_obj;
- amdgpu_bo_list_for_each_entry(e, p->bo_list)
- e->bo_va = amdgpu_vm_bo_find(vm, ttm_to_amdgpu_bo(e->tv.bo));
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+
+ /* Make sure we use the exclusive slot for shared BOs */
+ if (bo->prime_shared_count)
+ e->tv.num_shared = 0;
+ e->bo_va = amdgpu_vm_bo_find(vm, bo);
+ }
if (gds) {
p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
@@ -955,10 +966,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1);
- if (r)
- return r;
-
p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
if (amdgpu_vm_debug) {
@@ -1421,6 +1428,9 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(fence))
return PTR_ERR(fence);
+ if (!fence)
+ fence = dma_fence_get_stub();
+
switch (info->in.what) {
case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
r = drm_syncobj_create(&syncobj, 0, fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 0c590ddf250a..7e22be7ca68a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -43,7 +43,7 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo
r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
domain, bo,
NULL, &ptr);
- if (!bo)
+ if (!*bo)
return -ENOMEM;
memset(ptr, 0, size);
@@ -74,7 +74,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&csa_tv.head);
csa_tv.bo = &bo->tbo;
- csa_tv.shared = true;
+ csa_tv.num_shared = 1;
list_add(&csa_tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &pd);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index f9b54236102d..d85184b5b35c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -39,6 +39,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_UVD_ENC] = 1,
[AMDGPU_HW_IP_VCN_DEC] = 1,
[AMDGPU_HW_IP_VCN_ENC] = 1,
+ [AMDGPU_HW_IP_VCN_JPEG] = 1,
};
static int amdgput_ctx_total_num_entities(void)
@@ -247,7 +248,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
return -ENOMEM;
mutex_lock(&mgr->lock);
- r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
+ r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
if (r < 0) {
mutex_unlock(&mgr->lock);
kfree(ctx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 590588a82471..8a078f4ae73d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -59,6 +59,8 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_pm.h"
+#include "amdgpu_xgmi.h"
+
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -513,6 +515,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
*/
static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
{
+
/* No doorbell on SI hardware generation */
if (adev->asic_type < CHIP_BONAIRE) {
adev->doorbell.base = 0;
@@ -525,15 +528,26 @@ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
return -EINVAL;
+ amdgpu_asic_init_doorbell_index(adev);
+
/* doorbell bar mapping */
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
- AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
+ adev->doorbell_index.max_assignment+1);
if (adev->doorbell.num_doorbells == 0)
return -EINVAL;
+ /* For Vega, reserve and map two pages on doorbell BAR since SDMA
+ * paging queue doorbell use the second page. The
+ * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
+ * doorbells are in the first page. So with paging queue enabled,
+ * the max num_doorbells should + 1 page (0x400 in dword)
+ */
+ if (adev->asic_type >= CHIP_VEGA10)
+ adev->doorbell.num_doorbells += 0x400;
+
adev->doorbell.ptr = ioremap(adev->doorbell.base,
adev->doorbell.num_doorbells *
sizeof(u32));
@@ -1851,6 +1865,9 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
{
int i, r;
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ amdgpu_xgmi_remove_device(adev);
+
amdgpu_amdkfd_device_fini(adev);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
@@ -2340,6 +2357,19 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
return amdgpu_device_asic_has_dc_support(adev->asic_type);
}
+
+static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
+{
+ struct amdgpu_device *adev =
+ container_of(__work, struct amdgpu_device, xgmi_reset_work);
+
+ adev->asic_reset_res = amdgpu_asic_reset(adev);
+ if (adev->asic_reset_res)
+ DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s",
+ adev->asic_reset_res, adev->ddev->unique);
+}
+
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -2438,6 +2468,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
amdgpu_device_delay_enable_gfx_off);
+ INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
+
adev->gfx.gfx_off_req_count = 1;
adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
@@ -2458,9 +2490,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
- /* doorbell bar mapping */
- amdgpu_device_doorbell_init(adev);
-
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
@@ -2479,6 +2508,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ /* doorbell bar mapping and doorbell index init*/
+ amdgpu_device_doorbell_init(adev);
+
/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
@@ -3151,86 +3183,6 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
return 0;
}
-/**
- * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
- *
- * @adev: amdgpu device pointer
- *
- * attempt to do soft-reset or full-reset and reinitialize Asic
- * return 0 means succeeded otherwise failed
- */
-static int amdgpu_device_reset(struct amdgpu_device *adev)
-{
- bool need_full_reset, vram_lost = 0;
- int r;
-
- need_full_reset = amdgpu_device_ip_need_full_reset(adev);
-
- if (!need_full_reset) {
- amdgpu_device_ip_pre_soft_reset(adev);
- r = amdgpu_device_ip_soft_reset(adev);
- amdgpu_device_ip_post_soft_reset(adev);
- if (r || amdgpu_device_ip_check_soft_reset(adev)) {
- DRM_INFO("soft reset failed, will fallback to full reset!\n");
- need_full_reset = true;
- }
- }
-
- if (need_full_reset) {
- r = amdgpu_device_ip_suspend(adev);
-
-retry:
- r = amdgpu_asic_reset(adev);
- /* post card */
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
-
- if (!r) {
- dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
- r = amdgpu_device_ip_resume_phase1(adev);
- if (r)
- goto out;
-
- vram_lost = amdgpu_device_check_vram_lost(adev);
- if (vram_lost) {
- DRM_ERROR("VRAM is lost!\n");
- atomic_inc(&adev->vram_lost_counter);
- }
-
- r = amdgpu_gtt_mgr_recover(
- &adev->mman.bdev.man[TTM_PL_TT]);
- if (r)
- goto out;
-
- r = amdgpu_device_fw_loading(adev);
- if (r)
- return r;
-
- r = amdgpu_device_ip_resume_phase2(adev);
- if (r)
- goto out;
-
- if (vram_lost)
- amdgpu_device_fill_reset_magic(adev);
- }
- }
-
-out:
- if (!r) {
- amdgpu_irq_gpu_reset_resume_helper(adev);
- r = amdgpu_ib_ring_tests(adev);
- if (r) {
- dev_err(adev->dev, "ib ring test failed (%d).\n", r);
- r = amdgpu_device_ip_suspend(adev);
- need_full_reset = true;
- goto retry;
- }
- }
-
- if (!r)
- r = amdgpu_device_recover_vram(adev);
-
- return r;
-}
/**
* amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
@@ -3306,6 +3258,8 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
if (amdgpu_gpu_recovery == -1) {
switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
case CHIP_TOPAZ:
case CHIP_TONGA:
case CHIP_FIJI:
@@ -3329,31 +3283,13 @@ disabled:
return false;
}
-/**
- * amdgpu_device_gpu_recover - reset the asic and recover scheduler
- *
- * @adev: amdgpu device pointer
- * @job: which job trigger hang
- *
- * Attempt to reset the GPU if it has hung (all asics).
- * Returns 0 for success or an error on failure.
- */
-int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
- struct amdgpu_job *job)
-{
- int i, r, resched;
-
- dev_info(adev->dev, "GPU reset begin!\n");
-
- mutex_lock(&adev->lock_reset);
- atomic_inc(&adev->gpu_reset_counter);
- adev->in_gpu_reset = 1;
-
- /* Block kfd */
- amdgpu_amdkfd_pre_reset(adev);
- /* block TTM */
- resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ bool *need_full_reset_arg)
+{
+ int i, r = 0;
+ bool need_full_reset = *need_full_reset_arg;
/* block all schedulers and reset given job's ring */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -3373,10 +3309,144 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
amdgpu_fence_driver_force_completion(ring);
}
- if (amdgpu_sriov_vf(adev))
- r = amdgpu_device_reset_sriov(adev, job ? false : true);
- else
- r = amdgpu_device_reset(adev);
+
+
+ if (!amdgpu_sriov_vf(adev)) {
+
+ if (!need_full_reset)
+ need_full_reset = amdgpu_device_ip_need_full_reset(adev);
+
+ if (!need_full_reset) {
+ amdgpu_device_ip_pre_soft_reset(adev);
+ r = amdgpu_device_ip_soft_reset(adev);
+ amdgpu_device_ip_post_soft_reset(adev);
+ if (r || amdgpu_device_ip_check_soft_reset(adev)) {
+ DRM_INFO("soft reset failed, will fallback to full reset!\n");
+ need_full_reset = true;
+ }
+ }
+
+ if (need_full_reset)
+ r = amdgpu_device_ip_suspend(adev);
+
+ *need_full_reset_arg = need_full_reset;
+ }
+
+ return r;
+}
+
+static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
+ struct list_head *device_list_handle,
+ bool *need_full_reset_arg)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ bool need_full_reset = *need_full_reset_arg, vram_lost = false;
+ int r = 0;
+
+ /*
+ * ASIC reset has to be done on all HGMI hive nodes ASAP
+ * to allow proper links negotiation in FW (within 1 sec)
+ */
+ if (need_full_reset) {
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ /* For XGMI run all resets in parallel to speed up the process */
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
+ r = -EALREADY;
+ } else
+ r = amdgpu_asic_reset(tmp_adev);
+
+ if (r) {
+ DRM_ERROR("ASIC reset failed with err r, %d for drm dev, %s",
+ r, tmp_adev->ddev->unique);
+ break;
+ }
+ }
+
+ /* For XGMI wait for all PSP resets to complete before proceed */
+ if (!r) {
+ list_for_each_entry(tmp_adev, device_list_handle,
+ gmc.xgmi.head) {
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ flush_work(&tmp_adev->xgmi_reset_work);
+ r = tmp_adev->asic_reset_res;
+ if (r)
+ break;
+ }
+ }
+ }
+ }
+
+
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ if (need_full_reset) {
+ /* post card */
+ if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
+ DRM_WARN("asic atom init failed!");
+
+ if (!r) {
+ dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
+ r = amdgpu_device_ip_resume_phase1(tmp_adev);
+ if (r)
+ goto out;
+
+ vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
+ if (vram_lost) {
+ DRM_ERROR("VRAM is lost!\n");
+ atomic_inc(&tmp_adev->vram_lost_counter);
+ }
+
+ r = amdgpu_gtt_mgr_recover(
+ &tmp_adev->mman.bdev.man[TTM_PL_TT]);
+ if (r)
+ goto out;
+
+ r = amdgpu_device_fw_loading(tmp_adev);
+ if (r)
+ return r;
+
+ r = amdgpu_device_ip_resume_phase2(tmp_adev);
+ if (r)
+ goto out;
+
+ if (vram_lost)
+ amdgpu_device_fill_reset_magic(tmp_adev);
+
+ /* Update PSP FW topology after reset */
+ if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+ r = amdgpu_xgmi_update_topology(hive, tmp_adev);
+ }
+ }
+
+
+out:
+ if (!r) {
+ amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+ r = amdgpu_ib_ring_tests(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
+ r = amdgpu_device_ip_suspend(tmp_adev);
+ need_full_reset = true;
+ r = -EAGAIN;
+ goto end;
+ }
+ }
+
+ if (!r)
+ r = amdgpu_device_recover_vram(tmp_adev);
+ else
+ tmp_adev->asic_reset_res = r;
+ }
+
+end:
+ *need_full_reset_arg = need_full_reset;
+ return r;
+}
+
+static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ int i;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -3388,7 +3458,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* or all rings (in the case @job is NULL)
* after above amdgpu_reset accomplished
*/
- if ((!job || job->base.sched == &ring->sched) && !r)
+ if ((!job || job->base.sched == &ring->sched) && !adev->asic_reset_res)
drm_sched_job_recovery(&ring->sched);
kthread_unpark(ring->sched.thread);
@@ -3398,21 +3468,144 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
drm_helper_resume_force_mode(adev->ddev);
}
- ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
+ adev->asic_reset_res = 0;
+}
- if (r) {
- /* bad news, how to tell it to userspace ? */
- dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
- amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
- } else {
- dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter));
- }
+static void amdgpu_device_lock_adev(struct amdgpu_device *adev)
+{
+ mutex_lock(&adev->lock_reset);
+ atomic_inc(&adev->gpu_reset_counter);
+ adev->in_gpu_reset = 1;
+ /* Block kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_pre_reset(adev);
+}
- /*unlock kfd */
- amdgpu_amdkfd_post_reset(adev);
+static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
+{
+ /*unlock kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_post_reset(adev);
amdgpu_vf_error_trans_all(adev);
adev->in_gpu_reset = 0;
mutex_unlock(&adev->lock_reset);
+}
+
+
+/**
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
+ *
+ * @adev: amdgpu device pointer
+ * @job: which job trigger hang
+ *
+ * Attempt to reset the GPU if it has hung (all asics).
+ * Attempt to do soft-reset or full-reset and reinitialize Asic
+ * Returns 0 for success or an error on failure.
+ */
+
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ int r;
+ struct amdgpu_hive_info *hive = NULL;
+ bool need_full_reset = false;
+ struct amdgpu_device *tmp_adev = NULL;
+ struct list_head device_list, *device_list_handle = NULL;
+
+ INIT_LIST_HEAD(&device_list);
+
+ dev_info(adev->dev, "GPU reset begin!\n");
+
+ /*
+ * In case of XGMI hive disallow concurrent resets to be triggered
+ * by different nodes. No point also since the one node already executing
+ * reset will also reset all the other nodes in the hive.
+ */
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1 &&
+ !mutex_trylock(&hive->hive_lock))
+ return 0;
+
+ /* Start with adev pre asic reset first for soft reset check.*/
+ amdgpu_device_lock_adev(adev);
+ r = amdgpu_device_pre_asic_reset(adev,
+ job,
+ &need_full_reset);
+ if (r) {
+ /*TODO Should we stop ?*/
+ DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
+ r, adev->ddev->unique);
+ adev->asic_reset_res = r;
+ }
+
+ /* Build list of devices to reset */
+ if (need_full_reset && adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!hive) {
+ amdgpu_device_unlock_adev(adev);
+ return -ENODEV;
+ }
+
+ /*
+ * In case we are in XGMI hive mode device reset is done for all the
+ * nodes in the hive to retrain all XGMI links and hence the reset
+ * sequence is executed in loop on all nodes.
+ */
+ device_list_handle = &hive->device_list;
+ } else {
+ list_add_tail(&adev->gmc.xgmi.head, &device_list);
+ device_list_handle = &device_list;
+ }
+
+retry: /* Rest of adevs pre asic reset from XGMI hive. */
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+
+ if (tmp_adev == adev)
+ continue;
+
+ amdgpu_device_lock_adev(tmp_adev);
+ r = amdgpu_device_pre_asic_reset(tmp_adev,
+ NULL,
+ &need_full_reset);
+ /*TODO Should we stop ?*/
+ if (r) {
+ DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
+ r, tmp_adev->ddev->unique);
+ tmp_adev->asic_reset_res = r;
+ }
+ }
+
+ /* Actual ASIC resets if needed.*/
+ /* TODO Implement XGMI hive reset logic for SRIOV */
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_device_reset_sriov(adev, job ? false : true);
+ if (r)
+ adev->asic_reset_res = r;
+ } else {
+ r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
+ if (r && r == -EAGAIN)
+ goto retry;
+ }
+
+ /* Post ASIC reset for all devs .*/
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ amdgpu_device_post_asic_reset(tmp_adev, tmp_adev == adev ? job : NULL);
+
+ if (r) {
+ /* bad news, how to tell it to userspace ? */
+ dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
+ amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+ } else {
+ dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
+ }
+
+ amdgpu_device_unlock_adev(tmp_adev);
+ }
+
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
+ mutex_unlock(&hive->hive_lock);
+
+ if (r)
+ dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 686a26de50f9..15ce7e681d67 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -631,6 +631,11 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16);
if (!adev->mode_info.max_bpc_property)
return -ENOMEM;
+ adev->mode_info.abm_level_property =
+ drm_property_create_range(adev->ddev, 0,
+ "abm level", 0, 4);
+ if (!adev->mode_info.abm_level_property)
+ return -ENOMEM;
}
return 0;
@@ -857,7 +862,12 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
/* Inside "upper part" of vblank area? Apply corrective offset if so: */
if (in_vbl && (*vpos >= vbl_start)) {
vtotal = mode->crtc_vtotal;
- *vpos = *vpos - vtotal;
+
+ /* With variable refresh rate displays the vpos can exceed
+ * the vtotal value. Clamp to 0 to return -vbl_end instead
+ * of guessing the remaining number of lines until scanout.
+ */
+ *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
}
/* Correct for shifted end of vbl at vbl_end. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
new file mode 100644
index 000000000000..be620b29f4aa
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * GPU doorbell structures, functions & helpers
+ */
+struct amdgpu_doorbell {
+ /* doorbell mmio */
+ resource_size_t base;
+ resource_size_t size;
+ u32 __iomem *ptr;
+ u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
+};
+
+/* Reserved doorbells for amdgpu (including multimedia).
+ * KFD can use all the rest in the 2M doorbell bar.
+ * For asic before vega10, doorbell is 32-bit, so the
+ * index/offset is in dword. For vega10 and after, doorbell
+ * can be 64-bit, so the index defined is in qword.
+ */
+struct amdgpu_doorbell_index {
+ uint32_t kiq;
+ uint32_t mec_ring0;
+ uint32_t mec_ring1;
+ uint32_t mec_ring2;
+ uint32_t mec_ring3;
+ uint32_t mec_ring4;
+ uint32_t mec_ring5;
+ uint32_t mec_ring6;
+ uint32_t mec_ring7;
+ uint32_t userqueue_start;
+ uint32_t userqueue_end;
+ uint32_t gfx_ring0;
+ uint32_t sdma_engine0;
+ uint32_t sdma_engine1;
+ uint32_t sdma_engine2;
+ uint32_t sdma_engine3;
+ uint32_t sdma_engine4;
+ uint32_t sdma_engine5;
+ uint32_t sdma_engine6;
+ uint32_t sdma_engine7;
+ uint32_t ih;
+ union {
+ struct {
+ uint32_t vcn_ring0_1;
+ uint32_t vcn_ring2_3;
+ uint32_t vcn_ring4_5;
+ uint32_t vcn_ring6_7;
+ } vcn;
+ struct {
+ uint32_t uvd_ring0_1;
+ uint32_t uvd_ring2_3;
+ uint32_t uvd_ring4_5;
+ uint32_t uvd_ring6_7;
+ uint32_t vce_ring0_1;
+ uint32_t vce_ring2_3;
+ uint32_t vce_ring4_5;
+ uint32_t vce_ring6_7;
+ } uvd_vce;
+ };
+ uint32_t max_assignment;
+};
+
+typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
+{
+ AMDGPU_DOORBELL_KIQ = 0x000,
+ AMDGPU_DOORBELL_HIQ = 0x001,
+ AMDGPU_DOORBELL_DIQ = 0x002,
+ AMDGPU_DOORBELL_MEC_RING0 = 0x010,
+ AMDGPU_DOORBELL_MEC_RING1 = 0x011,
+ AMDGPU_DOORBELL_MEC_RING2 = 0x012,
+ AMDGPU_DOORBELL_MEC_RING3 = 0x013,
+ AMDGPU_DOORBELL_MEC_RING4 = 0x014,
+ AMDGPU_DOORBELL_MEC_RING5 = 0x015,
+ AMDGPU_DOORBELL_MEC_RING6 = 0x016,
+ AMDGPU_DOORBELL_MEC_RING7 = 0x017,
+ AMDGPU_DOORBELL_GFX_RING0 = 0x020,
+ AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
+ AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
+ AMDGPU_DOORBELL_IH = 0x1E8,
+ AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
+ AMDGPU_DOORBELL_INVALID = 0xFFFF
+} AMDGPU_DOORBELL_ASSIGNMENT;
+
+typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT
+{
+ /* Compute + GFX: 0~255 */
+ AMDGPU_VEGA20_DOORBELL_KIQ = 0x000,
+ AMDGPU_VEGA20_DOORBELL_HIQ = 0x001,
+ AMDGPU_VEGA20_DOORBELL_DIQ = 0x002,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING0 = 0x003,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING1 = 0x004,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING2 = 0x005,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING3 = 0x006,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING4 = 0x007,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING5 = 0x008,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING6 = 0x009,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING7 = 0x00A,
+ AMDGPU_VEGA20_DOORBELL_USERQUEUE_START = 0x00B,
+ AMDGPU_VEGA20_DOORBELL_USERQUEUE_END = 0x08A,
+ AMDGPU_VEGA20_DOORBELL_GFX_RING0 = 0x08B,
+ /* SDMA:256~335*/
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0 = 0x100,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1 = 0x10A,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2 = 0x114,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3 = 0x11E,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4 = 0x128,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5 = 0x132,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6 = 0x13C,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7 = 0x146,
+ /* IH: 376~391 */
+ AMDGPU_VEGA20_DOORBELL_IH = 0x178,
+ /* MMSCH: 392~407
+ * overlap the doorbell assignment with VCN as they are mutually exclusive
+ * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
+ */
+ AMDGPU_VEGA20_DOORBELL64_VCN0_1 = 0x188, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
+ AMDGPU_VEGA20_DOORBELL64_VCN2_3 = 0x189,
+ AMDGPU_VEGA20_DOORBELL64_VCN4_5 = 0x18A,
+ AMDGPU_VEGA20_DOORBELL64_VCN6_7 = 0x18B,
+
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1 = 0x188,
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3 = 0x189,
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5 = 0x18A,
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7 = 0x18B,
+
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1 = 0x18C,
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3 = 0x18D,
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5 = 0x18E,
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7 = 0x18F,
+ AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x18F,
+ AMDGPU_VEGA20_DOORBELL_INVALID = 0xFFFF
+} AMDGPU_VEGA20_DOORBELL_ASSIGNMENT;
+
+/*
+ * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
+ */
+typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
+{
+ /*
+ * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
+ * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
+ * Compute related doorbells are allocated from 0x00 to 0x8a
+ */
+
+
+ /* kernel scheduling */
+ AMDGPU_DOORBELL64_KIQ = 0x00,
+
+ /* HSA interface queue and debug queue */
+ AMDGPU_DOORBELL64_HIQ = 0x01,
+ AMDGPU_DOORBELL64_DIQ = 0x02,
+
+ /* Compute engines */
+ AMDGPU_DOORBELL64_MEC_RING0 = 0x03,
+ AMDGPU_DOORBELL64_MEC_RING1 = 0x04,
+ AMDGPU_DOORBELL64_MEC_RING2 = 0x05,
+ AMDGPU_DOORBELL64_MEC_RING3 = 0x06,
+ AMDGPU_DOORBELL64_MEC_RING4 = 0x07,
+ AMDGPU_DOORBELL64_MEC_RING5 = 0x08,
+ AMDGPU_DOORBELL64_MEC_RING6 = 0x09,
+ AMDGPU_DOORBELL64_MEC_RING7 = 0x0a,
+
+ /* User queue doorbell range (128 doorbells) */
+ AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b,
+ AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a,
+
+ /* Graphics engine */
+ AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
+
+ /*
+ * Other graphics doorbells can be allocated here: from 0x8c to 0xdf
+ * Graphics voltage island aperture 1
+ * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
+ */
+
+ /* For vega10 sriov, the sdma doorbell must be fixed as follow
+ * to keep the same setting with host driver, or it will
+ * happen conflicts
+ */
+ AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0,
+ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
+ AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2,
+ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
+
+ /* Interrupt handler */
+ AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
+ AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */
+ AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */
+
+ /* VCN engine use 32 bits doorbell */
+ AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
+ AMDGPU_DOORBELL64_VCN2_3 = 0xF9,
+ AMDGPU_DOORBELL64_VCN4_5 = 0xFA,
+ AMDGPU_DOORBELL64_VCN6_7 = 0xFB,
+
+ /* overlap the doorbell assignment with VCN as they are mutually exclusive
+ * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
+ */
+ AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
+ AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
+ AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
+ AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
+
+ AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
+ AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
+ AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
+ AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
+
+ AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
+ AMDGPU_DOORBELL64_INVALID = 0xFFFF
+} AMDGPU_DOORBELL64_ASSIGNMENT;
+
+u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
+void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
+u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
+void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
+
+#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
+#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
+#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
+#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8de55f7f1a3a..c806f984bcc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -454,9 +454,10 @@ module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
/**
* DOC: param_buf_per_se (int)
- * Override the size of Off-Chip Pramater Cache per Shader Engine in Byte. The default is 0 (depending on gfx).
+ * Override the size of Off-Chip Parameter Cache per Shader Engine in Byte.
+ * The default is 0 (depending on gfx).
*/
-MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)");
+MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Parameter Cache per Shader Engine (default depending on gfx)");
module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
/**
@@ -864,6 +865,7 @@ static const struct pci_device_id pciidlist[] = {
/* VEGAM */
{0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
{0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
+ {0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
/* Vega 10 */
{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
@@ -872,7 +874,13 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
/* Vega 12 */
{0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
@@ -885,6 +893,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+ {0x1002, 0x66A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
/* Raven */
@@ -1220,9 +1229,6 @@ static struct drm_driver kms_driver = {
.patchlevel = KMS_DRIVER_PATCHLEVEL,
};
-static struct drm_driver *driver;
-static struct pci_driver *pdriver;
-
static struct pci_driver amdgpu_kms_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
@@ -1252,16 +1258,14 @@ static int __init amdgpu_init(void)
goto error_fence;
DRM_INFO("amdgpu kernel modesetting enabled.\n");
- driver = &kms_driver;
- pdriver = &amdgpu_kms_pci_driver;
- driver->num_ioctls = amdgpu_max_kms_ioctl;
+ kms_driver.num_ioctls = amdgpu_max_kms_ioctl;
amdgpu_register_atpx_handler();
/* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */
amdgpu_amdkfd_init();
/* let modprobe override vga console setting */
- return pci_register_driver(pdriver);
+ return pci_register_driver(&amdgpu_kms_pci_driver);
error_fence:
amdgpu_sync_fini();
@@ -1273,7 +1277,7 @@ error_sync:
static void __exit amdgpu_exit(void)
{
amdgpu_amdkfd_fini();
- pci_unregister_driver(pdriver);
+ pci_unregister_driver(&amdgpu_kms_pci_driver);
amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
amdgpu_fence_slab_fini();
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7b3d1ebda9df..f4f00217546e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -169,7 +169,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo;
- tv.shared = true;
+ tv.num_shared = 1;
list_add(&tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
@@ -604,7 +604,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
abo = gem_to_amdgpu_bo(gobj);
tv.bo = &abo->tbo;
- tv.shared = !!(abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
+ if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
+ tv.num_shared = 1;
+ else
+ tv.num_shared = 0;
list_add(&tv.head, &list);
} else {
gobj = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index d63daba9b17c..f1ddfc50bcc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -54,6 +54,8 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+extern const struct dma_buf_ops amdgpu_dmabuf_ops;
+
/*
* GEM objects.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 6a70c0b7105f..97a60da62004 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -250,7 +250,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
ring->adev = NULL;
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_KIQ;
+ ring->doorbell_index = adev->doorbell_index.kiq;
r = amdgpu_gfx_kiq_acquire(adev, ring);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 8c57924c075f..81e6070d255b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -99,6 +99,7 @@ struct amdgpu_xgmi {
unsigned num_physical_nodes;
/* gpu list in the same hive */
struct list_head head;
+ bool supported;
};
struct amdgpu_gmc {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 9ce8c93ec19b..f877bb78d10a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -51,14 +51,12 @@ struct amdgpu_ih_ring {
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
u32 (*get_wptr)(struct amdgpu_device *adev);
- bool (*prescreen_iv)(struct amdgpu_device *adev);
void (*decode_iv)(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
void (*set_rptr)(struct amdgpu_device *adev);
};
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
-#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 6b6524f04ce0..b7968f426862 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -145,13 +145,6 @@ static void amdgpu_irq_callback(struct amdgpu_device *adev,
u32 ring_index = ih->rptr >> 2;
struct amdgpu_iv_entry entry;
- /* Prescreening of high-frequency interrupts */
- if (!amdgpu_ih_prescreen_iv(adev))
- return;
-
- /* Before dispatching irq to IP blocks, send it to amdkfd */
- amdgpu_amdkfd_interrupt(adev, (const void *) &ih->ring[ring_index]);
-
entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
amdgpu_ih_decode_iv(adev, &entry);
@@ -371,39 +364,38 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
unsigned client_id = entry->client_id;
unsigned src_id = entry->src_id;
struct amdgpu_irq_src *src;
+ bool handled = false;
int r;
trace_amdgpu_iv(entry);
if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
- return;
- }
- if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
+ } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
- return;
- }
- if (adev->irq.virq[src_id]) {
+ } else if (adev->irq.virq[src_id]) {
generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
- } else {
- if (!adev->irq.client[client_id].sources) {
- DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
- client_id, src_id);
- return;
- }
- src = adev->irq.client[client_id].sources[src_id];
- if (!src) {
- DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
- return;
- }
+ } else if (!adev->irq.client[client_id].sources) {
+ DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
+ client_id, src_id);
+ } else if ((src = adev->irq.client[client_id].sources[src_id])) {
r = src->funcs->process(adev, src, entry);
- if (r)
+ if (r < 0)
DRM_ERROR("error processing interrupt (%d)\n", r);
+ else if (r)
+ handled = true;
+
+ } else {
+ DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
}
+
+ /* Send it to amdkfd as well if it isn't already handled */
+ if (!handled)
+ amdgpu_amdkfd_interrupt(adev, entry->iv_entry);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index e0af44fd6a0c..0a17fb1af204 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -32,6 +32,9 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
+ struct amdgpu_task_info ti;
+
+ memset(&ti, 0, sizeof(struct amdgpu_task_info));
if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
DRM_ERROR("ring %s timeout, but soft recovered\n",
@@ -39,9 +42,12 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
return;
}
+ amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
ring->fence_drv.sync_seq);
+ DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
+ ti.process_name, ti.tgid, ti.task_name, ti.pid);
if (amdgpu_device_should_recover_gpu(ring->adev))
amdgpu_device_gpu_recover(ring->adev, job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 9b3164c0f861..bc62bf41b7e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -467,9 +467,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (!info->return_size || !info->return_pointer)
return -EINVAL;
- /* Ensure IB tests are run on ring */
- flush_delayed_work(&adev->late_init_work);
-
switch (info->query) {
case AMDGPU_INFO_ACCEL_WORKING:
ui32 = adev->accel_working;
@@ -950,6 +947,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
struct amdgpu_fpriv *fpriv;
int r, pasid;
+ /* Ensure IB tests are run on ring */
+ flush_delayed_work(&adev->late_init_work);
+
file_priv->driver_priv = NULL;
r = pm_runtime_get_sync(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index e55508b39496..3e6823fdd939 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -238,44 +238,40 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
* amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change
*
* @mn: our notifier
- * @mm: the mm this callback is about
- * @start: start of updated range
- * @end: end of updated range
+ * @range: mmu notifier context
*
* Block for operations on BOs to finish and mark pages as accessed and
* potentially dirty.
*/
static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end,
- bool blockable)
+ const struct mmu_notifier_range *range)
{
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
+ unsigned long end;
/* notification is exclusive, but interval is inclusive */
- end -= 1;
+ end = range->end - 1;
/* TODO we should be able to split locking for interval tree and
* amdgpu_mn_invalidate_node
*/
- if (amdgpu_mn_read_lock(amn, blockable))
+ if (amdgpu_mn_read_lock(amn, range->blockable))
return -EAGAIN;
- it = interval_tree_iter_first(&amn->objects, start, end);
+ it = interval_tree_iter_first(&amn->objects, range->start, end);
while (it) {
struct amdgpu_mn_node *node;
- if (!blockable) {
+ if (!range->blockable) {
amdgpu_mn_read_unlock(amn);
return -EAGAIN;
}
node = container_of(it, struct amdgpu_mn_node, it);
- it = interval_tree_iter_next(it, start, end);
+ it = interval_tree_iter_next(it, range->start, end);
- amdgpu_mn_invalidate_node(node, start, end);
+ amdgpu_mn_invalidate_node(node, range->start, end);
}
return 0;
@@ -294,39 +290,38 @@ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
* are restorted in amdgpu_mn_invalidate_range_end_hsa.
*/
static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end,
- bool blockable)
+ const struct mmu_notifier_range *range)
{
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
+ unsigned long end;
/* notification is exclusive, but interval is inclusive */
- end -= 1;
+ end = range->end - 1;
- if (amdgpu_mn_read_lock(amn, blockable))
+ if (amdgpu_mn_read_lock(amn, range->blockable))
return -EAGAIN;
- it = interval_tree_iter_first(&amn->objects, start, end);
+ it = interval_tree_iter_first(&amn->objects, range->start, end);
while (it) {
struct amdgpu_mn_node *node;
struct amdgpu_bo *bo;
- if (!blockable) {
+ if (!range->blockable) {
amdgpu_mn_read_unlock(amn);
return -EAGAIN;
}
node = container_of(it, struct amdgpu_mn_node, it);
- it = interval_tree_iter_next(it, start, end);
+ it = interval_tree_iter_next(it, range->start, end);
list_for_each_entry(bo, &node->bos, mn_list) {
struct kgd_mem *mem = bo->kfd_bo;
if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
- start, end))
- amdgpu_amdkfd_evict_userptr(mem, mm);
+ range->start,
+ end))
+ amdgpu_amdkfd_evict_userptr(mem, range->mm);
}
}
@@ -344,9 +339,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
* Release the lock again to allow new command submissions.
*/
static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
+ const struct mmu_notifier_range *range)
{
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 0dc2c5c57015..aadd0fa42e43 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -38,7 +38,6 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
-#include <drm/drm_fb_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/hrtimer.h>
@@ -294,13 +293,6 @@ struct amdgpu_display_funcs {
uint16_t connector_object_id,
struct amdgpu_hpd *hpd,
struct amdgpu_router *router);
- /* it is used to enter or exit into free sync mode */
- int (*notify_freesync)(struct drm_device *dev, void *data,
- struct drm_file *filp);
- /* it is used to allow enablement of freesync mode */
- int (*set_freesync_property)(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val);
};
@@ -340,6 +332,8 @@ struct amdgpu_mode_info {
struct drm_property *dither_property;
/* maximum number of bits per channel for monitor color */
struct drm_property *max_bpc_property;
+ /* Adaptive Backlight Modulation (power feature) */
+ struct drm_property *abm_level_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index cf768acb51dc..728e15e5d68a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -81,7 +81,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
amdgpu_bo_subtract_pin_size(bo);
if (bo->kfd_bo)
- amdgpu_amdkfd_unreserve_system_memory_limit(bo);
+ amdgpu_amdkfd_unreserve_memory_limit(bo);
amdgpu_bo_kunmap(bo);
@@ -608,53 +608,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
}
/**
- * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object
- * @adev: amdgpu device object
- * @ring: amdgpu_ring for the engine handling the buffer operations
- * @bo: &amdgpu_bo buffer to be backed up
- * @resv: reservation object with embedded fence
- * @fence: dma_fence associated with the operation
- * @direct: whether to submit the job directly
- *
- * Copies an &amdgpu_bo buffer object to its shadow object.
- * Not used for now.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct reservation_object *resv,
- struct dma_fence **fence,
- bool direct)
-
-{
- struct amdgpu_bo *shadow = bo->shadow;
- uint64_t bo_addr, shadow_addr;
- int r;
-
- if (!shadow)
- return -EINVAL;
-
- bo_addr = amdgpu_bo_gpu_offset(bo);
- shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
-
- r = reservation_object_reserve_shared(bo->tbo.resv, 1);
- if (r)
- goto err;
-
- r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
- amdgpu_bo_size(bo), resv, fence,
- direct, false);
- if (!r)
- amdgpu_bo_fence(bo, *fence, true);
-
-err:
- return r;
-}
-
-/**
* amdgpu_bo_validate - validate an &amdgpu_bo buffer object
* @bo: pointer to the buffer object
*
@@ -959,7 +912,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
struct ttm_operation_ctx ctx = { false, false };
int r, i;
- if (!bo->pin_count) {
+ if (WARN_ON_ONCE(!bo->pin_count)) {
dev_warn(adev->dev, "%p unpin not necessary\n", bo);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 7d3312d0da11..9291c2f837e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -267,11 +267,6 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
-int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct reservation_object *resv,
- struct dma_fence **fence, bool direct);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
struct dma_fence **fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 7235cd0b0fa9..1f61ed95727c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -33,6 +33,8 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/nospec.h>
+#include "hwmgr.h"
+#define WIDTH_4K 3840
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
@@ -1642,6 +1644,19 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
return 0;
+ /* Skip fan attributes on APU */
+ if ((adev->flags & AMD_IS_APU) &&
+ (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
+ return 0;
+
/* Skip limit attributes if DPM is not enabled */
if (!adev->pm.dpm_enabled &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
@@ -1956,6 +1971,17 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
mutex_unlock(&adev->pm.mutex);
}
+ /* enable/disable Low Memory PState for UVD (4k videos) */
+ if (adev->asic_type == CHIP_STONEY &&
+ adev->uvd.decode_image_width >= WIDTH_4K) {
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
+ if (hwmgr && hwmgr->hwmgr_func &&
+ hwmgr->hwmgr_func->update_nbdpm_pstate)
+ hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
+ !enable,
+ true);
+ }
}
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 3e44d889f7af..71913a18d142 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -39,8 +39,6 @@
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
-static const struct dma_buf_ops amdgpu_dmabuf_ops;
-
/**
* amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
* implementation
@@ -332,7 +330,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
return ret;
}
-static const struct dma_buf_ops amdgpu_dmabuf_ops = {
+const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_gem_map_attach,
.detach = amdgpu_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index e05dc66b1090..8fab0d637ee5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -155,10 +155,14 @@ psp_cmd_submit_buf(struct psp_context *psp,
return ret;
}
-static void psp_prep_tmr_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
+ struct psp_gfx_cmd_resp *cmd,
uint64_t tmr_mc, uint32_t size)
{
- cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
+ if (psp_support_vmr_ring(psp))
+ cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
+ else
+ cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_size = size;
@@ -192,7 +196,7 @@ static int psp_tmr_load(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, PSP_TMR_SIZE);
+ psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, PSP_TMR_SIZE);
DRM_INFO("reserve 0x%x from 0x%llx for PSP TMR SIZE\n",
PSP_TMR_SIZE, psp->tmr_mc_addr);
@@ -536,8 +540,10 @@ static int psp_load_fw(struct amdgpu_device *adev)
int ret;
struct psp_context *psp = &adev->psp;
- if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset != 0)
+ if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) {
+ psp_ring_destroy(psp, PSP_RING_TYPE__KM);
goto skip_memalloc;
+ }
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!psp->cmd)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 9ec5d1a666a6..3ee573b4016e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -83,12 +83,13 @@ struct psp_funcs
enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp);
int (*mode1_reset)(struct psp_context *psp);
- uint64_t (*xgmi_get_node_id)(struct psp_context *psp);
- uint64_t (*xgmi_get_hive_id)(struct psp_context *psp);
+ int (*xgmi_get_node_id)(struct psp_context *psp, uint64_t *node_id);
+ int (*xgmi_get_hive_id)(struct psp_context *psp, uint64_t *hive_id);
int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices,
struct psp_xgmi_topology_info *topology);
int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
struct psp_xgmi_topology_info *topology);
+ bool (*support_vmr_ring)(struct psp_context *psp);
};
struct psp_xgmi_context {
@@ -192,12 +193,14 @@ struct psp_xgmi_topology_info {
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
+#define psp_support_vmr_ring(psp) \
+ ((psp)->funcs->support_vmr_ring ? (psp)->funcs->support_vmr_ring((psp)) : false)
#define psp_mode1_reset(psp) \
((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
-#define psp_xgmi_get_node_id(psp) \
- ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp)) : 0)
-#define psp_xgmi_get_hive_id(psp) \
- ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp)) : 0)
+#define psp_xgmi_get_node_id(psp, node_id) \
+ ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp), (node_id)) : -EINVAL)
+#define psp_xgmi_get_hive_id(psp, hive_id) \
+ ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp), (hive_id)) : -EINVAL)
#define psp_xgmi_get_topology_info(psp, num_device, topology) \
((psp)->funcs->xgmi_get_topology_info ? \
(psp)->funcs->xgmi_get_topology_info((psp), (num_device), (topology)) : -EINVAL)
@@ -217,7 +220,6 @@ extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
int psp_gpu_reset(struct amdgpu_device *adev);
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
-
extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 5b75bdc8dc28..335a0edf114b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -397,7 +397,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
{
ktime_t deadline = ktime_add_us(ktime_get(), 10000);
- if (!ring->funcs->soft_recovery)
+ if (!ring->funcs->soft_recovery || !fence)
return false;
atomic_inc(&ring->adev->gpu_reset_counter);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 0beb01fef83f..d87e828a084b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -29,7 +29,7 @@
#include <drm/drm_print.h>
/* max number of rings */
-#define AMDGPU_MAX_RINGS 21
+#define AMDGPU_MAX_RINGS 23
#define AMDGPU_MAX_GFX_RINGS 1
#define AMDGPU_MAX_COMPUTE_RINGS 8
#define AMDGPU_MAX_VCE_RINGS 3
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 69896f451e8a..4e5d13e41f6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -692,6 +692,8 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
buf_sizes[0x1] = dpb_size;
buf_sizes[0x2] = image_size;
buf_sizes[0x4] = min_ctx_size;
+ /* store image width to adjust nb memory pstate */
+ adev->uvd.decode_image_width = width;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index a3ab1a41060f..5eb63288d157 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -65,6 +65,8 @@ struct amdgpu_uvd {
struct drm_sched_entity entity;
struct delayed_work idle_work;
unsigned harvest_config;
+ /* store image width to adjust nb memory state */
+ unsigned decode_image_width;
};
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index e2e42e3fbcf3..ecf6f96df2ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -262,7 +262,7 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
ring = &adev->vcn.ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
- RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2));
+ RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
@@ -322,7 +322,7 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
ring = &adev->vcn.ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
- RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2));
+ RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
@@ -396,16 +396,26 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
struct dpg_pause_state new_state;
+ unsigned int fences = 0;
+ unsigned int i;
- if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
+ }
+ if (fences)
new_state.fw_based = VCN_DPG_STATE__PAUSE;
else
- new_state.fw_based = adev->vcn.pause_state.fw_based;
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
- if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+ if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
new_state.jpeg = VCN_DPG_STATE__PAUSE;
else
- new_state.jpeg = adev->vcn.pause_state.jpeg;
+ new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+ new_state.jpeg = VCN_DPG_STATE__PAUSE;
amdgpu_vcn_pause_dpg_mode(adev, &new_state);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index cfee74732edb..462a04e0f5e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -334,7 +334,7 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
if (adev->fw_vram_usage.va != NULL) {
adev->virt.fw_reserve.p_pf2vf =
- (struct amdgim_pf2vf_info_header *)(
+ (struct amd_sriov_msg_pf2vf_info_header *)(
adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 0728fbc9a692..722deefc0a7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -63,8 +63,8 @@ struct amdgpu_virt_ops {
* Firmware Reserve Frame buffer
*/
struct amdgpu_virt_fw_reserve {
- struct amdgim_pf2vf_info_header *p_pf2vf;
- struct amdgim_vf2pf_info_header *p_vf2pf;
+ struct amd_sriov_msg_pf2vf_info_header *p_pf2vf;
+ struct amd_sriov_msg_vf2pf_info_header *p_vf2pf;
unsigned int checksum_key;
};
/*
@@ -85,15 +85,17 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
};
-struct amdgim_pf2vf_info_header {
+struct amd_sriov_msg_pf2vf_info_header {
/* the total structure size in byte. */
uint32_t size;
/* version of this structure, written by the GIM */
uint32_t version;
+ /* reserved */
+ uint32_t reserved[2];
} __aligned(4);
struct amdgim_pf2vf_info_v1 {
/* header contains size and version */
- struct amdgim_pf2vf_info_header header;
+ struct amd_sriov_msg_pf2vf_info_header header;
/* max_width * max_height */
unsigned int uvd_enc_max_pixels_count;
/* 16x16 pixels/sec, codec independent */
@@ -112,7 +114,7 @@ struct amdgim_pf2vf_info_v1 {
struct amdgim_pf2vf_info_v2 {
/* header contains size and version */
- struct amdgim_pf2vf_info_header header;
+ struct amd_sriov_msg_pf2vf_info_header header;
/* use private key from mailbox 2 to create chueksum */
uint32_t checksum;
/* The features flags of the GIM driver supports. */
@@ -137,20 +139,22 @@ struct amdgim_pf2vf_info_v2 {
uint64_t vcefw_kboffset;
/* VCE FW size in KB */
uint32_t vcefw_ksize;
- uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amdgim_pf2vf_info_header)/sizeof(uint32_t)), 3)];
+ uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amd_sriov_msg_pf2vf_info_header)/sizeof(uint32_t)), 3)];
} __aligned(4);
-struct amdgim_vf2pf_info_header {
+struct amd_sriov_msg_vf2pf_info_header {
/* the total structure size in byte. */
uint32_t size;
/*version of this structure, written by the guest */
uint32_t version;
+ /* reserved */
+ uint32_t reserved[2];
} __aligned(4);
struct amdgim_vf2pf_info_v1 {
/* header contains size and version */
- struct amdgim_vf2pf_info_header header;
+ struct amd_sriov_msg_vf2pf_info_header header;
/* driver version */
char driver_version[64];
/* driver certification, 1=WHQL, 0=None */
@@ -180,7 +184,7 @@ struct amdgim_vf2pf_info_v1 {
struct amdgim_vf2pf_info_v2 {
/* header contains size and version */
- struct amdgim_vf2pf_info_header header;
+ struct amd_sriov_msg_vf2pf_info_header header;
uint32_t checksum;
/* driver version */
uint8_t driver_version[64];
@@ -206,7 +210,7 @@ struct amdgim_vf2pf_info_v2 {
uint32_t uvd_enc_usage;
/* guest uvd engine usage percentage. 0xffff means N/A. */
uint32_t uvd_enc_health;
- uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amdgim_vf2pf_info_header)/sizeof(uint32_t)), 0)];
+ uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)];
} __aligned(4);
#define AMDGPU_FW_VRAM_VF2PF_VER 2
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 58a2363040dd..e73d152659a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -181,7 +181,7 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
if (level == adev->vm_manager.root_level)
/* For the root directory */
- return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
+ return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift;
else if (level != AMDGPU_VM_PTB)
/* Everything in between */
return 512;
@@ -617,7 +617,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
{
entry->priority = 0;
entry->tv.bo = &vm->root.base.bo->tbo;
- entry->tv.shared = true;
+ /* One for the VM updates, one for TTM and one for the CS job */
+ entry->tv.num_shared = 3;
entry->user_pages = NULL;
list_add(&entry->tv.head, validated);
}
@@ -773,10 +774,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
- r = reservation_object_reserve_shared(bo->tbo.resv, 1);
- if (r)
- return r;
-
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto error;
@@ -1656,9 +1653,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
if (!amdgpu_vm_pt_descendant(adev, &cursor))
return -ENOENT;
continue;
- } else if (frag >= parent_shift) {
+ } else if (frag >= parent_shift &&
+ cursor.level - 1 != adev->vm_manager.root_level) {
/* If the fragment size is even larger than the parent
- * shift we should go up one level and check it again.
+ * shift we should go up one level and check it again
+ * unless one level up is the root level.
*/
if (!amdgpu_vm_pt_ancestor(&cursor))
return -ENOENT;
@@ -1666,10 +1665,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
}
/* Looks good so far, calculate parameters for the update */
- incr = AMDGPU_GPU_PAGE_SIZE << shift;
+ incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
mask = amdgpu_vm_entries_mask(adev, cursor.level);
pe_start = ((cursor.pfn >> shift) & mask) * 8;
- entry_end = (mask + 1) << shift;
+ entry_end = (uint64_t)(mask + 1) << shift;
entry_end += cursor.pfn & ~(entry_end - 1);
entry_end = min(entry_end, end);
@@ -1682,7 +1681,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
flags | AMDGPU_PTE_FRAG(frag));
pe_start += nptes * 8;
- dst += nptes * AMDGPU_GPU_PAGE_SIZE << shift;
+ dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
frag_start = upd_end;
if (frag_start >= frag_end) {
@@ -1842,10 +1841,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1);
- if (r)
- goto error_free;
-
r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
if (r)
goto error_free;
@@ -3026,6 +3021,10 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
goto error_free_root;
+ r = reservation_object_reserve_shared(root->tbo.resv, 1);
+ if (r)
+ goto error_unreserve;
+
r = amdgpu_vm_clear_bo(adev, vm, root,
adev->vm_manager.root_level,
vm->pte_support_ats);
@@ -3055,7 +3054,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
INIT_KFIFO(vm->faults);
- vm->fault_credit = 16;
return 0;
@@ -3268,42 +3266,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
/**
- * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
- *
- * @adev: amdgpu_device pointer
- * @pasid: PASID do identify the VM
- *
- * This function is expected to be called in interrupt context.
- *
- * Returns:
- * True if there was fault credit, false otherwise
- */
-bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
- unsigned int pasid)
-{
- struct amdgpu_vm *vm;
-
- spin_lock(&adev->vm_manager.pasid_lock);
- vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
- if (!vm) {
- /* VM not found, can't track fault credit */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
- }
-
- /* No lock needed. only accessed by IRQ handler */
- if (!vm->fault_credit) {
- /* Too many faults in this VM */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return false;
- }
-
- vm->fault_credit--;
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
-}
-
-/**
* amdgpu_vm_manager_init - init the VM manager
*
* @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 2a8898d19c8b..e8dcfd59fc93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -229,9 +229,6 @@ struct amdgpu_vm {
/* Up to 128 pending retry page faults */
DECLARE_KFIFO(faults, u64, 128);
- /* Limit non-retry fault storms */
- unsigned int fault_credit;
-
/* Points to the KFD process VM info */
struct amdkfd_process_info *process_info;
@@ -299,8 +296,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
- unsigned int pasid);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 909216a9b447..8a8bc60cb6b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -23,7 +23,7 @@
*/
#include <linux/list.h>
#include "amdgpu.h"
-#include "amdgpu_psp.h"
+#include "amdgpu_xgmi.h"
static DEFINE_MUTEX(xgmi_mutex);
@@ -31,15 +31,16 @@ static DEFINE_MUTEX(xgmi_mutex);
#define AMDGPU_MAX_XGMI_HIVE 8
#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4
-struct amdgpu_hive_info {
- uint64_t hive_id;
- struct list_head device_list;
-};
-
static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
static unsigned hive_count = 0;
-static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
+
+void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
+{
+ return &hive->device_list;
+}
+
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
{
int i;
struct amdgpu_hive_info *tmp;
@@ -58,39 +59,73 @@ static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
tmp = &xgmi_hives[hive_count++];
tmp->hive_id = adev->gmc.xgmi.hive_id;
INIT_LIST_HEAD(&tmp->device_list);
+ mutex_init(&tmp->hive_lock);
+
return tmp;
}
+int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
+{
+ int ret = -EINVAL;
+
+ /* Each psp need to set the latest topology */
+ ret = psp_xgmi_set_topology_info(&adev->psp,
+ hive->number_devices,
+ &hive->topology_info);
+ if (ret)
+ dev_err(adev->dev,
+ "XGMI: Set topology failure on device %llx, hive %llx, ret %d",
+ adev->gmc.xgmi.node_id,
+ adev->gmc.xgmi.hive_id, ret);
+ else
+ dev_info(adev->dev, "XGMI: Set topology for node %d, hive 0x%llx.\n",
+ adev->gmc.xgmi.physical_node_id,
+ adev->gmc.xgmi.hive_id);
+
+ return ret;
+}
+
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
{
- struct psp_xgmi_topology_info *tmp_topology;
+ struct psp_xgmi_topology_info *hive_topology;
struct amdgpu_hive_info *hive;
struct amdgpu_xgmi *entry;
- struct amdgpu_device *tmp_adev;
+ struct amdgpu_device *tmp_adev = NULL;
int count = 0, ret = -EINVAL;
- if ((adev->asic_type < CHIP_VEGA20) ||
- (adev->flags & AMD_IS_APU) )
+ if (!adev->gmc.xgmi.supported)
return 0;
- adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp);
- adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp);
- tmp_topology = kzalloc(sizeof(struct psp_xgmi_topology_info), GFP_KERNEL);
- if (!tmp_topology)
- return -ENOMEM;
+ ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
+ if (ret) {
+ dev_err(adev->dev,
+ "XGMI: Failed to get node id\n");
+ return ret;
+ }
+
+ ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
+ if (ret) {
+ dev_err(adev->dev,
+ "XGMI: Failed to get hive id\n");
+ return ret;
+ }
+
mutex_lock(&xgmi_mutex);
hive = amdgpu_get_xgmi_hive(adev);
if (!hive)
goto exit;
+ hive_topology = &hive->topology_info;
+
list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
list_for_each_entry(entry, &hive->device_list, head)
- tmp_topology->nodes[count++].node_id = entry->node_id;
+ hive_topology->nodes[count++].node_id = entry->node_id;
+ hive->number_devices = count;
/* Each psp need to get the latest topology */
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, tmp_topology);
+ ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, hive_topology);
if (ret) {
dev_err(tmp_adev->dev,
"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
@@ -101,25 +136,33 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
}
}
- /* Each psp need to set the latest topology */
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, tmp_topology);
- if (ret) {
- dev_err(tmp_adev->dev,
- "XGMI: Set topology failure on device %llx, hive %llx, ret %d",
- tmp_adev->gmc.xgmi.node_id,
- tmp_adev->gmc.xgmi.hive_id, ret);
- /* To do : continue with some node failed or disable the whole hive */
+ ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
+ if (ret)
break;
- }
}
- if (!ret)
- dev_info(adev->dev, "XGMI: Add node %d to hive 0x%llx.\n",
- adev->gmc.xgmi.physical_node_id,
- adev->gmc.xgmi.hive_id);
exit:
mutex_unlock(&xgmi_mutex);
- kfree(tmp_topology);
return ret;
}
+
+void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
+{
+ struct amdgpu_hive_info *hive;
+
+ if (!adev->gmc.xgmi.supported)
+ return;
+
+ mutex_lock(&xgmi_mutex);
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (!hive)
+ goto exit;
+
+ if (!(hive->number_devices--))
+ mutex_destroy(&hive->hive_lock);
+
+exit:
+ mutex_unlock(&xgmi_mutex);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
new file mode 100644
index 000000000000..6151eb9c8ad3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_XGMI_H__
+#define __AMDGPU_XGMI_H__
+
+#include "amdgpu_psp.h"
+
+struct amdgpu_hive_info {
+ uint64_t hive_id;
+ struct list_head device_list;
+ struct psp_xgmi_topology_info topology_info;
+ int number_devices;
+ struct mutex hive_lock;
+};
+
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
+int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
+int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
+void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index f41f5f57e9f3..71c50d8900e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1755,6 +1755,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.flush_hdp = &cik_flush_hdp,
.invalidate_hdp = &cik_invalidate_hdp,
.need_full_reset = &cik_need_full_reset,
+ .init_doorbell_index = &legacy_doorbell_index_init,
};
static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index e49c6f15a0a0..54c625a2e570 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -30,4 +30,5 @@ void cik_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int cik_set_ip_blocks(struct amdgpu_device *adev);
+void legacy_doorbell_index_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index b5775c6a857b..8a8b4967a101 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -228,34 +228,6 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
* [127:96] - reserved
*/
-/**
- * cik_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool cik_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
/**
* cik_ih_decode_iv - decode an interrupt vector
*
@@ -461,7 +433,6 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cik_ih_funcs = {
.get_wptr = cik_ih_get_wptr,
- .prescreen_iv = cik_ih_prescreen_iv,
.decode_iv = cik_ih_decode_iv,
.set_rptr = cik_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index df5ac4d85a00..9d3ea298e116 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -208,34 +208,6 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * cz_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool cz_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
-/**
* cz_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -442,7 +414,6 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cz_ih_funcs = {
.get_wptr = cz_ih_get_wptr,
- .prescreen_iv = cz_ih_prescreen_iv,
.decode_iv = cz_ih_decode_iv,
.set_rptr = cz_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index f467b9bd090d..3a9fb6018c16 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4363,7 +4363,7 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id;
+ ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index cb066a8dccd7..381f593b0cda 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -44,7 +44,6 @@
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_enum.h"
#include "gca/gfx_8_0_sh_mask.h"
-#include "gca/gfx_8_0_enum.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
@@ -1891,7 +1890,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id;
+ ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
+ (ring_id * GFX8_MEC_HPD_SIZE);
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
@@ -2002,7 +2001,7 @@ static int gfx_v8_0_sw_init(void *handle)
/* no gfx doorbells on iceland */
if (adev->asic_type != CHIP_TOPAZ) {
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
+ ring->doorbell_index = adev->doorbell_index.gfx_ring0;
}
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
@@ -4069,6 +4068,11 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
{
+ if (amdgpu_sriov_vf(adev)) {
+ gfx_v8_0_init_csb(adev);
+ return 0;
+ }
+
adev->gfx.rlc.funcs->stop(adev);
adev->gfx.rlc.funcs->reset(adev);
gfx_v8_0_init_pg(adev);
@@ -4216,7 +4220,7 @@ static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu
tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
DOORBELL_RANGE_LOWER,
- AMDGPU_DOORBELL_GFX_RING0);
+ adev->doorbell_index.gfx_ring0);
WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
@@ -4645,8 +4649,8 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
{
if (adev->asic_type > CHIP_TONGA) {
- WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2);
- WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, AMDGPU_DOORBELL_MEC_RING7 << 2);
+ WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, adev->doorbell_index.kiq << 2);
+ WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, adev->doorbell_index.mec_ring7 << 2);
}
/* enable doorbells */
WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
@@ -4948,14 +4952,13 @@ static bool gfx_v8_0_check_soft_reset(void *handle)
static int gfx_v8_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 grbm_soft_reset = 0;
if ((!adev->gfx.grbm_soft_reset) &&
(!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
- srbm_soft_reset = adev->gfx.srbm_soft_reset;
/* stop the rlc */
adev->gfx.rlc.funcs->stop(adev);
@@ -5052,14 +5055,13 @@ static int gfx_v8_0_soft_reset(void *handle)
static int gfx_v8_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 grbm_soft_reset = 0;
if ((!adev->gfx.grbm_soft_reset) &&
(!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
- srbm_soft_reset = adev->gfx.srbm_soft_reset;
if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index c27caa144c57..7556716038d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -86,6 +86,7 @@ MODULE_FIRMWARE("amdgpu/picasso_me.bin");
MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
+MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
@@ -645,7 +646,20 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
+ /*
+ * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
+ * instead of picasso_rlc.bin.
+ * Judgment method:
+ * PCO AM4: revision >= 0xC8 && revision <= 0xCF
+ * or revision >= 0xD8 && revision <= 0xDF
+ * otherwise is PCO FP5
+ */
+ if (!strcmp(chip_name, "picasso") &&
+ (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
+ ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -1566,7 +1580,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1;
+ ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
+ (ring_id * GFX9_MEC_HPD_SIZE);
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
@@ -1655,7 +1669,7 @@ static int gfx_v9_0_sw_init(void *handle)
else
sprintf(ring->name, "gfx_%d", i);
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
+ ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
r = amdgpu_ring_init(adev, ring, 1024,
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
if (r)
@@ -2326,12 +2340,13 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
#endif
WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
+ udelay(50);
/* carrizo do enable cp interrupt after cp inited */
- if (!(adev->flags & AMD_IS_APU))
+ if (!(adev->flags & AMD_IS_APU)) {
gfx_v9_0_enable_gui_idle_interrupt(adev, true);
-
- udelay(50);
+ udelay(50);
+ }
#ifdef AMDGPU_RLC_DEBUG_RETRY
/* RLC_GPM_GENERAL_6 : RLC Ucode version */
@@ -2981,9 +2996,9 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
/* enable the doorbell if requested */
if (ring->use_doorbell) {
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
- (AMDGPU_DOORBELL64_KIQ *2) << 2);
+ (adev->doorbell_index.kiq * 2) << 2);
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
- (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
+ (adev->doorbell_index.userqueue_end * 2) << 2);
}
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 531aaf377592..1ad7e6b8ed1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -56,6 +56,9 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
static const u32 golden_settings_tonga_a11[] =
{
@@ -224,13 +227,39 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
chip_name = "tonga";
break;
case CHIP_POLARIS11:
- chip_name = "polaris11";
+ if (((adev->pdev->device == 0x67ef) &&
+ ((adev->pdev->revision == 0xe0) ||
+ (adev->pdev->revision == 0xe5))) ||
+ ((adev->pdev->device == 0x67ff) &&
+ ((adev->pdev->revision == 0xcf) ||
+ (adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff))))
+ chip_name = "polaris11_k";
+ else if ((adev->pdev->device == 0x67ef) &&
+ (adev->pdev->revision == 0xe2))
+ chip_name = "polaris11_k";
+ else
+ chip_name = "polaris11";
break;
case CHIP_POLARIS10:
- chip_name = "polaris10";
+ if ((adev->pdev->device == 0x67df) &&
+ ((adev->pdev->revision == 0xe1) ||
+ (adev->pdev->revision == 0xf7)))
+ chip_name = "polaris10_k";
+ else
+ chip_name = "polaris10";
break;
case CHIP_POLARIS12:
- chip_name = "polaris12";
+ if (((adev->pdev->device == 0x6987) &&
+ ((adev->pdev->revision == 0xc0) ||
+ (adev->pdev->revision == 0xc3))) ||
+ ((adev->pdev->device == 0x6981) &&
+ ((adev->pdev->revision == 0x00) ||
+ (adev->pdev->revision == 0x01) ||
+ (adev->pdev->revision == 0x10))))
+ chip_name = "polaris12_k";
+ else
+ chip_name = "polaris12";
break;
case CHIP_FIJI:
case CHIP_CARRIZO:
@@ -337,7 +366,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
const struct mc_firmware_header_v1_0 *hdr;
const __le32 *fw_data = NULL;
const __le32 *io_mc_regs = NULL;
- u32 data, vbios_version;
+ u32 data;
int i, ucode_size, regs_size;
/* Skip MC ucode loading on SR-IOV capable boards.
@@ -348,13 +377,6 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
if (amdgpu_sriov_bios(adev))
return 0;
- WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
- data = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
- vbios_version = data & 0xf;
-
- if (vbios_version == 0)
- return 0;
-
if (!adev->gmc.fw)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 811231e4ec53..bacdaef77b6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -244,6 +244,62 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+/**
+ * vega10_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool gmc_v9_0_prescreen_iv(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry,
+ uint64_t addr)
+{
+ struct amdgpu_vm *vm;
+ u64 key;
+ int r;
+
+ /* No PASID, can't identify faulting process */
+ if (!entry->pasid)
+ return true;
+
+ /* Not a retry fault */
+ if (!(entry->src_data[1] & 0x80))
+ return true;
+
+ /* Track retry faults in per-VM fault FIFO. */
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, entry->pasid);
+ if (!vm) {
+ /* VM not found, process it normally */
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ return true;
+ }
+
+ key = AMDGPU_VM_FAULT(entry->pasid, addr);
+ r = amdgpu_vm_add_fault(vm->fault_hash, key);
+
+ /* Hash table is full or the fault is already being processed,
+ * ignore further page faults
+ */
+ if (r != 0) {
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ return false;
+ }
+ /* No locking required with single writer and single reader */
+ r = kfifo_put(&vm->faults, key);
+ if (!r) {
+ /* FIFO is full. Ignore it until there is space */
+ amdgpu_vm_clear_fault(vm->fault_hash, key);
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ return false;
+ }
+
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ /* It's the first fault for this address, process it normally */
+ return true;
+}
+
static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -255,6 +311,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+ if (!gmc_v9_0_prescreen_iv(adev, entry, addr))
+ return 1; /* This also prevents sending it to KFD */
+
if (!amdgpu_sriov_vf(adev)) {
status = RREG32(hub->vm_l2_pro_fault_status);
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
@@ -338,9 +397,12 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
struct amdgpu_vmhub *hub = &adev->vmhub[i];
u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
- if (i == AMDGPU_GFXHUB && !adev->in_gpu_reset &&
- adev->gfx.kiq.ring.sched.ready &&
- (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
+ /* This is necessary for a HW workaround under SRIOV as well
+ * as GFXOFF under bare metal
+ */
+ if (adev->gfx.kiq.ring.sched.ready &&
+ (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
+ !adev->in_gpu_reset) {
uint32_t req = hub->vm_inv_eng0_req + eng;
uint32_t ack = hub->vm_inv_eng0_ack + eng;
@@ -656,37 +718,46 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
}
}
-static int gmc_v9_0_late_init(void *handle)
+static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /*
- * The latest engine allocation on gfx9 is:
- * Engine 0, 1: idle
- * Engine 2, 3: firmware
- * Engine 4~13: amdgpu ring, subject to change when ring number changes
- * Engine 14~15: idle
- * Engine 16: kfd tlb invalidation
- * Engine 17: Gart flushes
- */
- unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
+ struct amdgpu_ring *ring;
+ unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
+ {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP};
unsigned i;
- int r;
+ unsigned vmhub, inv_eng;
- if (!gmc_v9_0_keep_stolen_memory(adev))
- amdgpu_bo_late_init(adev);
+ for (i = 0; i < adev->num_rings; ++i) {
+ ring = adev->rings[i];
+ vmhub = ring->funcs->vmhub;
+
+ inv_eng = ffs(vm_inv_engs[vmhub]);
+ if (!inv_eng) {
+ dev_err(adev->dev, "no VM inv eng for ring %s\n",
+ ring->name);
+ return -EINVAL;
+ }
- for(i = 0; i < adev->num_rings; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
- unsigned vmhub = ring->funcs->vmhub;
+ ring->vm_inv_eng = inv_eng - 1;
+ change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub]));
- ring->vm_inv_eng = vm_inv_eng[vmhub]++;
dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
}
- /* Engine 16 is used for KFD and 17 for GART flushes */
- for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
- BUG_ON(vm_inv_eng[i] > 16);
+ return 0;
+}
+
+static int gmc_v9_0_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ if (!gmc_v9_0_keep_stolen_memory(adev))
+ amdgpu_bo_late_init(adev);
+
+ r = gmc_v9_0_allocate_vm_inv_eng(adev);
+ if (r)
+ return r;
if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
r = gmc_v9_0_ecc_available(adev);
@@ -899,6 +970,9 @@ static int gmc_v9_0_sw_init(void *handle)
/* This interrupt is VMC page fault.*/
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
&adev->gmc.vm_fault);
+ if (r)
+ return r;
+
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
&adev->gmc.vm_fault);
@@ -931,7 +1005,7 @@ static int gmc_v9_0_sw_init(void *handle)
}
adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
- if (adev->asic_type == CHIP_VEGA20) {
+ if (adev->gmc.xgmi.supported) {
r = gfxhub_v1_1_get_xgmi_info(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
index b030ca5ea107..5c8deac65580 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
@@ -24,6 +24,16 @@
#ifndef __GMC_V9_0_H__
#define __GMC_V9_0_H__
+ /*
+ * The latest engine allocation on gfx9 is:
+ * Engine 2, 3: firmware
+ * Engine 0, 1, 4~16: amdgpu ring,
+ * subject to change when ring number changes
+ * Engine 17: Gart flushes
+ */
+#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
+#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
+
extern const struct amd_ip_funcs gmc_v9_0_ip_funcs;
extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index cf0fc61aebe6..a3984d10b604 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -208,34 +208,6 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * iceland_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
-/**
* iceland_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -440,7 +412,6 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = {
static const struct amdgpu_ih_funcs iceland_ih_funcs = {
.get_wptr = iceland_ih_get_wptr,
- .prescreen_iv = iceland_ih_prescreen_iv,
.decode_iv = iceland_ih_decode_iv,
.set_rptr = iceland_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 64e875d528dd..6a0fcd67662a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -37,7 +37,6 @@
#include "gmc/gmc_8_2_sh_mask.h"
#include "oss/oss_3_0_d.h"
#include "oss/oss_3_0_sh_mask.h"
-#include "gca/gfx_8_0_sh_mask.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
#include "smu/smu_7_1_3_d.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 6f9c54978cc1..accdedd63c98 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -32,6 +32,7 @@
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
#define smnPCIE_CONFIG_CNTL 0x11180044
+#define smnPCIE_CI_CNTL 0x11180080
static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
{
@@ -270,6 +271,12 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
+ data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
+
+ if (def != data)
+ WREG32_PCIE(smnPCIE_CI_CNTL, data);
}
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index f8cee95d61cc..4cd31a276dcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -31,6 +31,7 @@
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
+#define smnPCIE_CI_CNTL 0x11180080
static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
{
@@ -222,7 +223,13 @@ static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
+ uint32_t def, data;
+
+ def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
+ data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
+ if (def != data)
+ WREG32_PCIE(smnPCIE_CI_CNTL, data);
}
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 882bd83a28c4..0de00fbe9233 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -43,6 +43,8 @@ enum psp_gfx_crtl_cmd_id
GFX_CTRL_CMD_ID_ENABLE_INT = 0x00050000, /* enable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
+ GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */
};
@@ -89,7 +91,8 @@ enum psp_gfx_cmd_id
GFX_CMD_ID_LOAD_IP_FW = 0x00000006, /* load HW IP FW */
GFX_CMD_ID_DESTROY_TMR = 0x00000007, /* destroy TMR region */
GFX_CMD_ID_SAVE_RESTORE = 0x00000008, /* save/restore HW IP FW */
-
+ GFX_CMD_ID_SETUP_VMR = 0x00000009, /* setup VMR region */
+ GFX_CMD_ID_DESTROY_VMR = 0x0000000A, /* destroy VMR region */
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 295c2205485a..d78b4306a36f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -240,12 +240,9 @@ static int psp_v10_0_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
- struct psp_ring *ring;
unsigned int psp_ring_reg = 0;
struct amdgpu_device *adev = psp->adev;
- ring = &psp->km_ring;
-
/* Write the ring destroy command to C2PMSG_64 */
psp_ring_reg = 3 << 16;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index e5dd052d9e06..0c6e7f9b143f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -34,6 +34,7 @@
#include "nbio/nbio_7_4_offset.h"
MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
+MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
/* address block */
@@ -100,6 +101,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
char fw_name[30];
int err = 0;
const struct psp_firmware_header_v1_0 *sos_hdr;
+ const struct psp_firmware_header_v1_0 *asd_hdr;
const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
@@ -132,14 +134,30 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
le32_to_cpu(sos_hdr->sos_offset_bytes);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
+ err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
+ if (err)
+ goto out1;
+
+ err = amdgpu_ucode_validate(adev->psp.asd_fw);
+ if (err)
+ goto out1;
+
+ asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
+ adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
+ adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
+ adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
+ adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
+ le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
+
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
if (err)
- goto out;
+ goto out2;
err = amdgpu_ucode_validate(adev->psp.ta_fw);
if (err)
- goto out;
+ goto out2;
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
@@ -148,14 +166,18 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
return 0;
+
+out2:
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+out1:
+ release_firmware(adev->psp.asd_fw);
+ adev->psp.asd_fw = NULL;
out:
- if (err) {
- dev_err(adev->dev,
- "psp v11.0: Failed to load firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->psp.sos_fw);
- adev->psp.sos_fw = NULL;
- }
+ dev_err(adev->dev,
+ "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
+ release_firmware(adev->psp.sos_fw);
+ adev->psp.sos_fw = NULL;
return err;
}
@@ -171,8 +193,11 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
* are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg)
+ if (sol_reg) {
+ psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
+ printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
+ }
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -288,6 +313,13 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
return 0;
}
+static bool psp_v11_0_support_vmr_ring(struct psp_context *psp)
+{
+ if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
+ return true;
+ return false;
+}
+
static int psp_v11_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -296,26 +328,47 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
- /* Write low address of the ring to C2PMSG_69 */
- psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
- /* Write high address of the ring to C2PMSG_70 */
- psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
- /* Write size of ring to C2PMSG_71 */
- psp_ring_reg = ring->ring_size;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
- /* Write the ring initialization command to C2PMSG_64 */
- psp_ring_reg = ring_type;
- psp_ring_reg = psp_ring_reg << 16;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ if (psp_v11_0_support_vmr_ring(psp)) {
+ /* Write low address of the ring to C2PMSG_102 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_103 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg);
+
+ /* Write the ring initialization command to C2PMSG_101 */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_101 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x8000FFFF, false);
+
+ } else {
+ /* Write low address of the ring to C2PMSG_69 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_70 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
+ /* Write size of ring to C2PMSG_71 */
+ psp_ring_reg = ring->ring_size;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
+ /* Write the ring initialization command to C2PMSG_64 */
+ psp_ring_reg = ring_type;
+ psp_ring_reg = psp_ring_reg << 16;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+ }
return ret;
}
@@ -326,15 +379,24 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
int ret = 0;
struct amdgpu_device *adev = psp->adev;
- /* Write the ring destroy command to C2PMSG_64 */
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_DESTROY_RINGS);
+ /* Write the ring destroy command*/
+ if (psp_v11_0_support_vmr_ring(psp))
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ /* Wait for response flag (bit 31) */
+ if (psp_v11_0_support_vmr_ring(psp))
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ else
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
return ret;
}
@@ -373,7 +435,10 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
/* KM (GPCOM) prepare write pointer */
- psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+ if (psp_v11_0_support_vmr_ring(psp))
+ psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+ else
+ psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
/* Update KM RB frame pointer to new frame */
/* write_frame ptr increments by size of rb_frame in bytes */
@@ -402,7 +467,11 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
+ if (psp_v11_0_support_vmr_ring(psp)) {
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+ } else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
return 0;
}
@@ -547,7 +616,7 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
/*send the mode 1 reset command*/
WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
- mdelay(1000);
+ msleep(500);
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
@@ -640,7 +709,7 @@ static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
}
-static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp)
+static int psp_v11_0_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
@@ -653,12 +722,14 @@ static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp)
/* Invoke xgmi ta to get hive id */
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
if (ret)
- return 0;
- else
- return xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+ return ret;
+
+ *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+
+ return 0;
}
-static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp)
+static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
@@ -671,9 +742,11 @@ static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp)
/* Invoke xgmi ta to get the node id */
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
if (ret)
- return 0;
- else
- return xgmi_cmd->xgmi_out_message.get_node_id.node_id;
+ return ret;
+
+ *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
+
+ return 0;
}
static const struct psp_funcs psp_v11_0_funcs = {
@@ -692,6 +765,7 @@ static const struct psp_funcs psp_v11_0_funcs = {
.xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info,
.xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
.xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
+ .support_vmr_ring = psp_v11_0_support_vmr_ring,
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 9cea0bbe4525..79694ff16969 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -240,8 +240,11 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
* are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg)
+ if (sol_reg) {
+ psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
+ printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
+ }
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -356,12 +359,9 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
- struct psp_ring *ring;
unsigned int psp_ring_reg = 0;
struct amdgpu_device *adev = psp->adev;
- ring = &psp->km_ring;
-
/* Write the ring destroy command to C2PMSG_64 */
psp_ring_reg = 3 << 16;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
@@ -593,9 +593,9 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
}
/*send the mode 1 reset command*/
- WREG32(offset, 0x70000);
+ WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
- mdelay(1000);
+ msleep(500);
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index b6a25f92d566..1bccc5fe2d9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1146,7 +1146,7 @@ static int sdma_v3_0_sw_init(void *handle)
if (!amdgpu_sriov_vf(adev)) {
ring->use_doorbell = true;
ring->doorbell_index = (i == 0) ?
- AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
+ adev->doorbell_index.sdma_engine0 : adev->doorbell_index.sdma_engine1;
} else {
ring->use_pollmem = true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index f4490cdd9804..fd0bfe140ee0 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -925,11 +925,9 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
OFFSET, ring->doorbell_index);
WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell);
WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset);
- /* TODO: enable doorbell support */
- /*adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
- ring->doorbell_index);*/
- sdma_v4_0_ring_set_wptr(ring);
+ /* paging queue doorbell range is setup at sdma_v4_0_gfx_resume */
+ sdma_v4_0_page_ring_set_wptr(ring);
/* set minor_ptr_update to 0 after wptr programed */
WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0);
@@ -1449,23 +1447,45 @@ static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
}
+static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
+{
+ uint fw_version = adev->sdma.instance[0].fw_version;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ return fw_version >= 430;
+ case CHIP_VEGA12:
+ /*return fw_version >= 31;*/
+ return false;
+ case CHIP_VEGA20:
+ return fw_version >= 123;
+ default:
+ return false;
+ }
+}
+
static int sdma_v4_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
- if (adev->asic_type == CHIP_RAVEN) {
+ if (adev->asic_type == CHIP_RAVEN)
adev->sdma.num_instances = 1;
- adev->sdma.has_page_queue = false;
- } else {
+ else
adev->sdma.num_instances = 2;
- /* TODO: Page queue breaks driver reload under SRIOV */
- if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev)))
- adev->sdma.has_page_queue = false;
- else if (adev->asic_type != CHIP_VEGA20 &&
- adev->asic_type != CHIP_VEGA12)
- adev->sdma.has_page_queue = true;
+
+ r = sdma_v4_0_init_microcode(adev);
+ if (r) {
+ DRM_ERROR("Failed to load sdma firmware!\n");
+ return r;
}
+ /* TODO: Page queue breaks driver reload under SRIOV */
+ if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev)))
+ adev->sdma.has_page_queue = false;
+ else if (sdma_v4_0_fw_support_paging_queue(adev))
+ adev->sdma.has_page_queue = true;
+
sdma_v4_0_set_ring_funcs(adev);
sdma_v4_0_set_buffer_funcs(adev);
sdma_v4_0_set_vm_pte_funcs(adev);
@@ -1474,7 +1494,6 @@ static int sdma_v4_0_early_init(void *handle)
return 0;
}
-
static int sdma_v4_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
@@ -1493,12 +1512,6 @@ static int sdma_v4_0_sw_init(void *handle)
if (r)
return r;
- r = sdma_v4_0_init_microcode(adev);
- if (r) {
- DRM_ERROR("Failed to load sdma firmware!\n");
- return r;
- }
-
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
@@ -1507,15 +1520,10 @@ static int sdma_v4_0_sw_init(void *handle)
DRM_INFO("use_doorbell being set to: [%s]\n",
ring->use_doorbell?"true":"false");
- if (adev->asic_type == CHIP_VEGA10)
- ring->doorbell_index = (i == 0) ?
- (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
- : (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
- else
- ring->doorbell_index = (i == 0) ?
- (AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
- : (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
-
+ /* doorbell size is 2 dwords, get DWORD offset */
+ ring->doorbell_index = (i == 0) ?
+ (adev->doorbell_index.sdma_engine0 << 1)
+ : (adev->doorbell_index.sdma_engine1 << 1);
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
@@ -1529,7 +1537,15 @@ static int sdma_v4_0_sw_init(void *handle)
if (adev->sdma.has_page_queue) {
ring = &adev->sdma.instance[i].page;
ring->ring_obj = NULL;
- ring->use_doorbell = false;
+ ring->use_doorbell = true;
+
+ /* paging queue use same doorbell index/routing as gfx queue
+ * with 0x400 (4096 dwords) offset on second doorbell page
+ */
+ ring->doorbell_index = (i == 0) ?
+ (adev->doorbell_index.sdma_engine0 << 1)
+ : (adev->doorbell_index.sdma_engine1 << 1);
+ ring->doorbell_index += 0x400;
sprintf(ring->name, "page%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
@@ -1689,13 +1705,15 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
amdgpu_fence_process(&adev->sdma.instance[instance].ring);
break;
case 1:
- /* XXX compute */
+ if (adev->asic_type == CHIP_VEGA20)
+ amdgpu_fence_process(&adev->sdma.instance[instance].page);
break;
case 2:
/* XXX compute */
break;
case 3:
- amdgpu_fence_process(&adev->sdma.instance[instance].page);
+ if (adev->asic_type != CHIP_VEGA20)
+ amdgpu_fence_process(&adev->sdma.instance[instance].page);
break;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index b3d7d9f83202..2938fb9f17cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -118,19 +118,6 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev)
return (wptr & adev->irq.ih.ptr_mask);
}
-/**
- * si_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool si_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- /* Process all interrupts */
- return true;
-}
-
static void si_ih_decode_iv(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
@@ -301,7 +288,6 @@ static const struct amd_ip_funcs si_ih_ip_funcs = {
static const struct amdgpu_ih_funcs si_ih_funcs = {
.get_wptr = si_ih_get_wptr,
- .prescreen_iv = si_ih_prescreen_iv,
.decode_iv = si_ih_decode_iv,
.set_rptr = si_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 4cc0dcb1a187..8849b74078d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -507,6 +507,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
return -EINVAL;
}
+ if (adev->asic_type == CHIP_VEGA20)
+ adev->gmc.xgmi.supported = true;
+
if (adev->flags & AMD_IS_APU)
adev->nbio_funcs = &nbio_v7_0_funcs;
else if (adev->asic_type == CHIP_VEGA20)
@@ -613,6 +616,24 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.flush_hdp = &soc15_flush_hdp,
.invalidate_hdp = &soc15_invalidate_hdp,
.need_full_reset = &soc15_need_full_reset,
+ .init_doorbell_index = &vega10_doorbell_index_init,
+};
+
+static const struct amdgpu_asic_funcs vega20_asic_funcs =
+{
+ .read_disabled_bios = &soc15_read_disabled_bios,
+ .read_bios_from_rom = &soc15_read_bios_from_rom,
+ .read_register = &soc15_read_register,
+ .reset = &soc15_asic_reset,
+ .set_vga_state = &soc15_vga_set_state,
+ .get_xclk = &soc15_get_xclk,
+ .set_uvd_clocks = &soc15_set_uvd_clocks,
+ .set_vce_clocks = &soc15_set_vce_clocks,
+ .get_config_memsize = &soc15_get_config_memsize,
+ .flush_hdp = &soc15_flush_hdp,
+ .invalidate_hdp = &soc15_invalidate_hdp,
+ .need_full_reset = &soc15_need_full_reset,
+ .init_doorbell_index = &vega20_doorbell_index_init,
};
static int soc15_common_early_init(void *handle)
@@ -632,11 +653,11 @@ static int soc15_common_early_init(void *handle)
adev->se_cac_rreg = &soc15_se_cac_rreg;
adev->se_cac_wreg = &soc15_se_cac_wreg;
- adev->asic_funcs = &soc15_asic_funcs;
adev->external_rev_id = 0xFF;
switch (adev->asic_type) {
case CHIP_VEGA10:
+ adev->asic_funcs = &soc15_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_RLC_LS |
@@ -660,6 +681,7 @@ static int soc15_common_early_init(void *handle)
adev->external_rev_id = 0x1;
break;
case CHIP_VEGA12:
+ adev->asic_funcs = &soc15_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
@@ -682,6 +704,7 @@ static int soc15_common_early_init(void *handle)
adev->external_rev_id = adev->rev_id + 0x14;
break;
case CHIP_VEGA20:
+ adev->asic_funcs = &vega20_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
@@ -704,6 +727,7 @@ static int soc15_common_early_init(void *handle)
adev->external_rev_id = adev->rev_id + 0x28;
break;
case CHIP_RAVEN:
+ adev->asic_funcs = &soc15_asic_funcs;
if (adev->rev_id >= 0x8)
adev->external_rev_id = adev->rev_id + 0x81;
else if (adev->pdev->device == 0x15d8)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index f8ad7804dc40..a66c8bfbbaa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -58,4 +58,6 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
int vega10_reg_base_init(struct amdgpu_device *adev);
int vega20_reg_base_init(struct amdgpu_device *adev);
+void vega10_doorbell_index_init(struct amdgpu_device *adev);
+void vega20_doorbell_index_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 958b10a57073..49c262540940 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -49,14 +49,19 @@
#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask, ret) \
do { \
+ uint32_t old_ = 0; \
uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
uint32_t loop = adev->usec_timeout; \
while ((tmp_ & (mask)) != (expected_value)) { \
- udelay(2); \
+ if (old_ != tmp_) { \
+ loop = adev->usec_timeout; \
+ old_ = tmp_; \
+ } else \
+ udelay(1); \
tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
loop--; \
if (!loop) { \
- DRM_ERROR("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \
+ DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \
inst, #reg, (unsigned)expected_value, (unsigned)(tmp_ & (mask))); \
ret = -ETIMEDOUT; \
break; \
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 3abffd06b5c7..15da06ddeb75 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -219,34 +219,6 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * tonga_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool tonga_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
-/**
* tonga_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -322,7 +294,7 @@ static int tonga_ih_sw_init(void *handle)
return r;
adev->irq.ih.use_doorbell = true;
- adev->irq.ih.doorbell_index = AMDGPU_DOORBELL_IH;
+ adev->irq.ih.doorbell_index = adev->doorbell_index.ih;
r = amdgpu_irq_init(adev);
@@ -506,7 +478,6 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = {
static const struct amdgpu_ih_funcs tonga_ih_funcs = {
.get_wptr = tonga_ih_get_wptr,
- .prescreen_iv = tonga_ih_prescreen_iv,
.decode_iv = tonga_ih_decode_iv,
.set_rptr = tonga_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 90bbcee00f28..d69c8f6daaf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -116,16 +116,16 @@ static int uvd_v4_2_sw_init(void *handle)
if (r)
return r;
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
r = amdgpu_uvd_entity_init(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 1c5e12703103..ee8cd06ddc38 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -113,16 +113,16 @@ static int uvd_v5_0_sw_init(void *handle)
if (r)
return r;
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
r = amdgpu_uvd_entity_init(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index f184842ef2a2..d4f4a66f8324 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -400,16 +400,16 @@ static int uvd_v6_0_sw_init(void *handle)
DRM_INFO("UVD ENC is disabled\n");
}
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst->ring_enc[i];
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 8a4595968d98..aef924026a28 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -430,16 +430,12 @@ static int uvd_v7_0_sw_init(void *handle)
DRM_INFO("PSP loading UVD firmware\n");
}
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
if (adev->uvd.harvest_config & (1 << j))
continue;
if (!amdgpu_sriov_vf(adev)) {
ring = &adev->uvd.inst[j].ring;
- sprintf(ring->name, "uvd<%d>", j);
+ sprintf(ring->name, "uvd_%d", ring->me);
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
if (r)
return r;
@@ -447,7 +443,7 @@ static int uvd_v7_0_sw_init(void *handle)
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst[j].ring_enc[i];
- sprintf(ring->name, "uvd_enc%d<%d>", i, j);
+ sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
if (amdgpu_sriov_vf(adev)) {
ring->use_doorbell = true;
@@ -455,9 +451,9 @@ static int uvd_v7_0_sw_init(void *handle)
* sriov, so set unused location for other unused rings.
*/
if (i == 0)
- ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
else
- ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
if (r)
@@ -465,6 +461,10 @@ static int uvd_v7_0_sw_init(void *handle)
}
}
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
r = amdgpu_uvd_entity_init(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 3e84840859a7..2668effadd27 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -37,7 +37,6 @@
#include "gca/gfx_8_0_d.h"
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
-#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_sh_mask.h"
#include "ivsrcid/ivsrcid_vislands30.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 0054ba1b9a68..9fb34b7d8e03 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -466,9 +466,9 @@ static int vce_v4_0_sw_init(void *handle)
* so set unused location for other unused rings.
*/
if (i == 0)
- ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING0_1 * 2;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring0_1 * 2;
else
- ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING2_3 * 2 + 1;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index c1a03505f956..89bb2fef90eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -48,6 +48,7 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
+static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
/**
* vcn_v1_0_early_init - set function pointers
@@ -213,8 +214,9 @@ static int vcn_v1_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
- if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
- vcn_v1_0_stop(adev);
+ if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
+ RREG32_SOC15(VCN, 0, mmUVD_STATUS))
+ vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
ring->sched.ready = false;
@@ -1086,7 +1088,8 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
- /* initialize wptr */
+ /* initialize JPEG wptr */
+ ring = &adev->vcn.ring_jpeg;
ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
/* copy patch commands to the jpeg ring */
@@ -1158,21 +1161,29 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
{
int ret_code = 0;
+ uint32_t tmp;
/* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
- if (!ret_code) {
- int tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
- /* wait for read ptr to be equal to write ptr */
- SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+ /* wait for read ptr to be equal to write ptr */
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
- SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
- UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
- }
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
+
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index a0fda6f9252a..2c250b01a903 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -220,90 +220,6 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * vega10_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u32 dw0, dw3, dw4, dw5;
- u16 pasid;
- u64 addr, key;
- struct amdgpu_vm *vm;
- int r;
-
- dw0 = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
- dw3 = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
- dw4 = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
- dw5 = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
-
- /* Filter retry page faults, let only the first one pass. If
- * there are too many outstanding faults, ignore them until
- * some faults get cleared.
- */
- switch (dw0 & 0xff) {
- case SOC15_IH_CLIENTID_VMC:
- case SOC15_IH_CLIENTID_UTCL2:
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- pasid = dw3 & 0xffff;
- /* No PASID, can't identify faulting process */
- if (!pasid)
- return true;
-
- /* Not a retry fault, check fault credit */
- if (!(dw5 & 0x80)) {
- if (!amdgpu_vm_pasid_fault_credit(adev, pasid))
- goto ignore_iv;
- return true;
- }
-
- /* Track retry faults in per-VM fault FIFO. */
- spin_lock(&adev->vm_manager.pasid_lock);
- vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
- addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
- key = AMDGPU_VM_FAULT(pasid, addr);
- if (!vm) {
- /* VM not found, process it normally */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
- } else {
- r = amdgpu_vm_add_fault(vm->fault_hash, key);
-
- /* Hash table is full or the fault is already being processed,
- * ignore further page faults
- */
- if (r != 0) {
- spin_unlock(&adev->vm_manager.pasid_lock);
- goto ignore_iv;
- }
- }
- /* No locking required with single writer and single reader */
- r = kfifo_put(&vm->faults, key);
- if (!r) {
- /* FIFO is full. Ignore it until there is space */
- amdgpu_vm_clear_fault(vm->fault_hash, key);
- spin_unlock(&adev->vm_manager.pasid_lock);
- goto ignore_iv;
- }
-
- spin_unlock(&adev->vm_manager.pasid_lock);
- /* It's the first fault for this address, process it normally */
- return true;
-
-ignore_iv:
- adev->irq.ih.rptr += 32;
- return false;
-}
-
-/**
* vega10_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -385,7 +301,7 @@ static int vega10_ih_sw_init(void *handle)
return r;
adev->irq.ih.use_doorbell = true;
- adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1;
+ adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
r = amdgpu_irq_init(adev);
@@ -487,7 +403,6 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = {
static const struct amdgpu_ih_funcs vega10_ih_funcs = {
.get_wptr = vega10_ih_get_wptr,
- .prescreen_iv = vega10_ih_prescreen_iv,
.decode_iv = vega10_ih_decode_iv,
.set_rptr = vega10_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index c5c9b2bc190d..422674bb3cdf 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -56,4 +56,32 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
return 0;
}
+void vega10_doorbell_index_init(struct amdgpu_device *adev)
+{
+ adev->doorbell_index.kiq = AMDGPU_DOORBELL64_KIQ;
+ adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL64_MEC_RING0;
+ adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL64_MEC_RING1;
+ adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL64_MEC_RING2;
+ adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL64_MEC_RING3;
+ adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL64_MEC_RING4;
+ adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL64_MEC_RING5;
+ adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL64_MEC_RING6;
+ adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL64_MEC_RING7;
+ adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL64_USERQUEUE_START;
+ adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL64_USERQUEUE_END;
+ adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL64_GFX_RING0;
+ adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL64_sDMA_ENGINE0;
+ adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL64_sDMA_ENGINE1;
+ adev->doorbell_index.ih = AMDGPU_DOORBELL64_IH;
+ adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_DOORBELL64_UVD_RING0_1;
+ adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_DOORBELL64_UVD_RING2_3;
+ adev->doorbell_index.uvd_vce.uvd_ring4_5 = AMDGPU_DOORBELL64_UVD_RING4_5;
+ adev->doorbell_index.uvd_vce.uvd_ring6_7 = AMDGPU_DOORBELL64_UVD_RING6_7;
+ adev->doorbell_index.uvd_vce.vce_ring0_1 = AMDGPU_DOORBELL64_VCE_RING0_1;
+ adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_DOORBELL64_VCE_RING2_3;
+ adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_DOORBELL64_VCE_RING4_5;
+ adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_DOORBELL64_VCE_RING6_7;
+ /* In unit of dword doorbell */
+ adev->doorbell_index.max_assignment = AMDGPU_DOORBELL64_MAX_ASSIGNMENT << 1;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index d13fc4fcb517..edce413fda9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -54,4 +54,37 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
return 0;
}
+void vega20_doorbell_index_init(struct amdgpu_device *adev)
+{
+ adev->doorbell_index.kiq = AMDGPU_VEGA20_DOORBELL_KIQ;
+ adev->doorbell_index.mec_ring0 = AMDGPU_VEGA20_DOORBELL_MEC_RING0;
+ adev->doorbell_index.mec_ring1 = AMDGPU_VEGA20_DOORBELL_MEC_RING1;
+ adev->doorbell_index.mec_ring2 = AMDGPU_VEGA20_DOORBELL_MEC_RING2;
+ adev->doorbell_index.mec_ring3 = AMDGPU_VEGA20_DOORBELL_MEC_RING3;
+ adev->doorbell_index.mec_ring4 = AMDGPU_VEGA20_DOORBELL_MEC_RING4;
+ adev->doorbell_index.mec_ring5 = AMDGPU_VEGA20_DOORBELL_MEC_RING5;
+ adev->doorbell_index.mec_ring6 = AMDGPU_VEGA20_DOORBELL_MEC_RING6;
+ adev->doorbell_index.mec_ring7 = AMDGPU_VEGA20_DOORBELL_MEC_RING7;
+ adev->doorbell_index.userqueue_start = AMDGPU_VEGA20_DOORBELL_USERQUEUE_START;
+ adev->doorbell_index.userqueue_end = AMDGPU_VEGA20_DOORBELL_USERQUEUE_END;
+ adev->doorbell_index.gfx_ring0 = AMDGPU_VEGA20_DOORBELL_GFX_RING0;
+ adev->doorbell_index.sdma_engine0 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0;
+ adev->doorbell_index.sdma_engine1 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1;
+ adev->doorbell_index.sdma_engine2 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2;
+ adev->doorbell_index.sdma_engine3 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3;
+ adev->doorbell_index.sdma_engine4 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4;
+ adev->doorbell_index.sdma_engine5 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5;
+ adev->doorbell_index.sdma_engine6 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6;
+ adev->doorbell_index.sdma_engine7 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7;
+ adev->doorbell_index.ih = AMDGPU_VEGA20_DOORBELL_IH;
+ adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1;
+ adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3;
+ adev->doorbell_index.uvd_vce.uvd_ring4_5 = AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5;
+ adev->doorbell_index.uvd_vce.uvd_ring6_7 = AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7;
+ adev->doorbell_index.uvd_vce.vce_ring0_1 = AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1;
+ adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3;
+ adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5;
+ adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7;
+ adev->doorbell_index.max_assignment = AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT << 1;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 07880d35e9de..77e367459101 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -87,9 +87,9 @@ static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
u32 r;
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(mmPCIE_INDEX, reg);
- (void)RREG32(mmPCIE_INDEX);
- r = RREG32(mmPCIE_DATA);
+ WREG32_NO_KIQ(mmPCIE_INDEX, reg);
+ (void)RREG32_NO_KIQ(mmPCIE_INDEX);
+ r = RREG32_NO_KIQ(mmPCIE_DATA);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
return r;
}
@@ -99,10 +99,10 @@ static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
unsigned long flags;
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(mmPCIE_INDEX, reg);
- (void)RREG32(mmPCIE_INDEX);
- WREG32(mmPCIE_DATA, v);
- (void)RREG32(mmPCIE_DATA);
+ WREG32_NO_KIQ(mmPCIE_INDEX, reg);
+ (void)RREG32_NO_KIQ(mmPCIE_INDEX);
+ WREG32_NO_KIQ(mmPCIE_DATA, v);
+ (void)RREG32_NO_KIQ(mmPCIE_DATA);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
@@ -123,8 +123,8 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_11, (reg));
- WREG32(mmSMC_IND_DATA_11, (v));
+ WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
+ WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
@@ -955,6 +955,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.flush_hdp = &vi_flush_hdp,
.invalidate_hdp = &vi_invalidate_hdp,
.need_full_reset = &vi_need_full_reset,
+ .init_doorbell_index = &legacy_doorbell_index_init,
};
#define CZ_REV_BRISTOL(rev) \
@@ -1712,3 +1713,21 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
return 0;
}
+
+void legacy_doorbell_index_init(struct amdgpu_device *adev)
+{
+ adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
+ adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
+ adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
+ adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
+ adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
+ adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
+ adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
+ adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
+ adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
+ adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
+ adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL_sDMA_ENGINE0;
+ adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL_sDMA_ENGINE1;
+ adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
+ adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 0429fe332269..8de0772f986c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -30,4 +30,5 @@ void vi_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int vi_set_ip_blocks(struct amdgpu_device *adev);
+void legacy_doorbell_index_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 5f4062b41add..083bd8114db1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -33,6 +33,7 @@
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/mman.h>
+#include <linux/dma-buf.h>
#include <asm/processor.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
@@ -157,8 +158,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
}
if ((args->ring_base_address) &&
- (!access_ok(VERIFY_WRITE,
- (const void __user *) args->ring_base_address,
+ (!access_ok((const void __user *) args->ring_base_address,
sizeof(uint64_t)))) {
pr_err("Can't access ring base address\n");
return -EFAULT;
@@ -169,31 +169,27 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
return -EINVAL;
}
- if (!access_ok(VERIFY_WRITE,
- (const void __user *) args->read_pointer_address,
+ if (!access_ok((const void __user *) args->read_pointer_address,
sizeof(uint32_t))) {
pr_err("Can't access read pointer\n");
return -EFAULT;
}
- if (!access_ok(VERIFY_WRITE,
- (const void __user *) args->write_pointer_address,
+ if (!access_ok((const void __user *) args->write_pointer_address,
sizeof(uint32_t))) {
pr_err("Can't access write pointer\n");
return -EFAULT;
}
if (args->eop_buffer_address &&
- !access_ok(VERIFY_WRITE,
- (const void __user *) args->eop_buffer_address,
+ !access_ok((const void __user *) args->eop_buffer_address,
sizeof(uint32_t))) {
pr_debug("Can't access eop buffer");
return -EFAULT;
}
if (args->ctx_save_restore_address &&
- !access_ok(VERIFY_WRITE,
- (const void __user *) args->ctx_save_restore_address,
+ !access_ok((const void __user *) args->ctx_save_restore_address,
sizeof(uint32_t))) {
pr_debug("Can't access ctx save restore buffer");
return -EFAULT;
@@ -364,8 +360,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
}
if ((args->ring_base_address) &&
- (!access_ok(VERIFY_WRITE,
- (const void __user *) args->ring_base_address,
+ (!access_ok((const void __user *) args->ring_base_address,
sizeof(uint64_t)))) {
pr_err("Can't access ring base address\n");
return -EFAULT;
@@ -1273,6 +1268,12 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
return -EINVAL;
}
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
+ if (args->size != kfd_doorbell_process_slice(dev))
+ return -EINVAL;
+ offset = kfd_get_process_doorbells(dev, p);
+ }
+
mutex_lock(&p->mutex);
pdd = kfd_bind_process_to_device(dev, p);
@@ -1550,6 +1551,115 @@ copy_from_user_failed:
return err;
}
+static int kfd_ioctl_get_dmabuf_info(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_get_dmabuf_info_args *args = data;
+ struct kfd_dev *dev = NULL;
+ struct kgd_dev *dma_buf_kgd;
+ void *metadata_buffer = NULL;
+ uint32_t flags;
+ unsigned int i;
+ int r;
+
+ /* Find a KFD GPU device that supports the get_dmabuf_info query */
+ for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
+ if (dev)
+ break;
+ if (!dev)
+ return -EINVAL;
+
+ if (args->metadata_ptr) {
+ metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL);
+ if (!metadata_buffer)
+ return -ENOMEM;
+ }
+
+ /* Get dmabuf info from KGD */
+ r = amdgpu_amdkfd_get_dmabuf_info(dev->kgd, args->dmabuf_fd,
+ &dma_buf_kgd, &args->size,
+ metadata_buffer, args->metadata_size,
+ &args->metadata_size, &flags);
+ if (r)
+ goto exit;
+
+ /* Reverse-lookup gpu_id from kgd pointer */
+ dev = kfd_device_by_kgd(dma_buf_kgd);
+ if (!dev) {
+ r = -EINVAL;
+ goto exit;
+ }
+ args->gpu_id = dev->id;
+ args->flags = flags;
+
+ /* Copy metadata buffer to user mode */
+ if (metadata_buffer) {
+ r = copy_to_user((void __user *)args->metadata_ptr,
+ metadata_buffer, args->metadata_size);
+ if (r != 0)
+ r = -EFAULT;
+ }
+
+exit:
+ kfree(metadata_buffer);
+
+ return r;
+}
+
+static int kfd_ioctl_import_dmabuf(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_import_dmabuf_args *args = data;
+ struct kfd_process_device *pdd;
+ struct dma_buf *dmabuf;
+ struct kfd_dev *dev;
+ int idr_handle;
+ uint64_t size;
+ void *mem;
+ int r;
+
+ dev = kfd_device_by_id(args->gpu_id);
+ if (!dev)
+ return -EINVAL;
+
+ dmabuf = dma_buf_get(args->dmabuf_fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_bind_process_to_device(dev, p);
+ if (IS_ERR(pdd)) {
+ r = PTR_ERR(pdd);
+ goto err_unlock;
+ }
+
+ r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->kgd, dmabuf,
+ args->va_addr, pdd->vm,
+ (struct kgd_mem **)&mem, &size,
+ NULL);
+ if (r)
+ goto err_unlock;
+
+ idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
+ if (idr_handle < 0) {
+ r = -EFAULT;
+ goto err_free;
+ }
+
+ mutex_unlock(&p->mutex);
+
+ args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
+
+ return 0;
+
+err_free:
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+err_unlock:
+ mutex_unlock(&p->mutex);
+ return r;
+}
+
#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
.cmd_drv = 0, .name = #ioctl}
@@ -1635,7 +1745,13 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
kfd_ioctl_set_cu_mask, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
- kfd_ioctl_get_queue_wave_state, 0)
+ kfd_ioctl_get_queue_wave_state, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
+ kfd_ioctl_get_dmabuf_info, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
+ kfd_ioctl_import_dmabuf, 0),
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 3783d122f283..b7bc7d7d048f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -133,6 +133,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
#define fiji_cache_info carrizo_cache_info
#define polaris10_cache_info carrizo_cache_info
#define polaris11_cache_info carrizo_cache_info
+#define polaris12_cache_info carrizo_cache_info
/* TODO - check & update Vega10 cache details */
#define vega10_cache_info carrizo_cache_info
#define raven_cache_info carrizo_cache_info
@@ -647,7 +648,12 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
pcache_info = polaris11_cache_info;
num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
break;
+ case CHIP_POLARIS12:
+ pcache_info = polaris12_cache_info;
+ num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
+ break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
pcache_info = vega10_cache_info;
num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
@@ -847,7 +853,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
*/
pgdat = NODE_DATA(numa_node_id);
for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
- mem_in_bytes += pgdat->node_zones[zone_type].managed_pages;
+ mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);
mem_in_bytes <<= PAGE_SHIFT;
sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index c004647c8cb4..8be9677c0c07 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -205,6 +205,22 @@ static const struct kfd_device_info polaris11_device_info = {
.num_sdma_queues_per_engine = 2,
};
+static const struct kfd_device_info polaris12_device_info = {
+ .asic_family = CHIP_POLARIS12,
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 4,
+ .ih_ring_entry_size = 4 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_cik,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 2,
+ .num_sdma_queues_per_engine = 2,
+};
+
static const struct kfd_device_info vega10_device_info = {
.asic_family = CHIP_VEGA10,
.max_pasid_bits = 16,
@@ -237,6 +253,22 @@ static const struct kfd_device_info vega10_vf_device_info = {
.num_sdma_queues_per_engine = 2,
};
+static const struct kfd_device_info vega12_device_info = {
+ .asic_family = CHIP_VEGA12,
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ .num_sdma_queues_per_engine = 2,
+};
+
static const struct kfd_device_info vega20_device_info = {
.asic_family = CHIP_VEGA20,
.max_pasid_bits = 16,
@@ -331,6 +363,14 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x67EB, &polaris11_device_info }, /* Polaris11 */
{ 0x67EF, &polaris11_device_info }, /* Polaris11 */
{ 0x67FF, &polaris11_device_info }, /* Polaris11 */
+ { 0x6980, &polaris12_device_info }, /* Polaris12 */
+ { 0x6981, &polaris12_device_info }, /* Polaris12 */
+ { 0x6985, &polaris12_device_info }, /* Polaris12 */
+ { 0x6986, &polaris12_device_info }, /* Polaris12 */
+ { 0x6987, &polaris12_device_info }, /* Polaris12 */
+ { 0x6995, &polaris12_device_info }, /* Polaris12 */
+ { 0x6997, &polaris12_device_info }, /* Polaris12 */
+ { 0x699F, &polaris12_device_info }, /* Polaris12 */
{ 0x6860, &vega10_device_info }, /* Vega10 */
{ 0x6861, &vega10_device_info }, /* Vega10 */
{ 0x6862, &vega10_device_info }, /* Vega10 */
@@ -338,12 +378,24 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x6864, &vega10_device_info }, /* Vega10 */
{ 0x6867, &vega10_device_info }, /* Vega10 */
{ 0x6868, &vega10_device_info }, /* Vega10 */
+ { 0x6869, &vega10_device_info }, /* Vega10 */
+ { 0x686A, &vega10_device_info }, /* Vega10 */
+ { 0x686B, &vega10_device_info }, /* Vega10 */
{ 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
+ { 0x686D, &vega10_device_info }, /* Vega10 */
+ { 0x686E, &vega10_device_info }, /* Vega10 */
+ { 0x686F, &vega10_device_info }, /* Vega10 */
{ 0x687F, &vega10_device_info }, /* Vega10 */
+ { 0x69A0, &vega12_device_info }, /* Vega12 */
+ { 0x69A1, &vega12_device_info }, /* Vega12 */
+ { 0x69A2, &vega12_device_info }, /* Vega12 */
+ { 0x69A3, &vega12_device_info }, /* Vega12 */
+ { 0x69AF, &vega12_device_info }, /* Vega12 */
{ 0x66a0, &vega20_device_info }, /* Vega20 */
{ 0x66a1, &vega20_device_info }, /* Vega20 */
{ 0x66a2, &vega20_device_info }, /* Vega20 */
{ 0x66a3, &vega20_device_info }, /* Vega20 */
+ { 0x66a4, &vega20_device_info }, /* Vega20 */
{ 0x66a7, &vega20_device_info }, /* Vega20 */
{ 0x66af, &vega20_device_info } /* Vega20 */
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index fb9d66ea13b7..8372556b52eb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1547,7 +1547,7 @@ static int get_wave_state(struct device_queue_manager *dqm,
u32 *ctl_stack_used_size,
u32 *save_area_used_size)
{
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
int r;
dqm_lock(dqm);
@@ -1558,19 +1558,19 @@ static int get_wave_state(struct device_queue_manager *dqm,
goto dqm_unlock;
}
- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
- if (!mqd) {
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+ if (!mqd_mgr) {
r = -ENOMEM;
goto dqm_unlock;
}
- if (!mqd->get_wave_state) {
+ if (!mqd_mgr->get_wave_state) {
r = -EINVAL;
goto dqm_unlock;
}
- r = mqd->get_wave_state(mqd, q->mqd, ctl_stack, ctl_stack_used_size,
- save_area_used_size);
+ r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
+ ctl_stack_used_size, save_area_used_size);
dqm_unlock:
dqm_unlock(dqm);
@@ -1741,10 +1741,12 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
device_queue_manager_init_vi_tonga(&dqm->asic_ops);
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
device_queue_manager_init_v9(&dqm->asic_ops);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index fd60a116be37..c3a5dcfe877a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -24,7 +24,6 @@
#include "kfd_device_queue_manager.h"
#include "gca/gfx_8_0_enum.h"
#include "gca/gfx_8_0_sh_mask.h"
-#include "gca/gfx_8_0_enum.h"
#include "oss/oss_3_0_sh_mask.h"
static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 3d66cec414af..213ea5454d11 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -397,9 +397,11 @@ int kfd_init_apertures(struct kfd_process *process)
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
kfd_init_apertures_vi(pdd, id);
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
kfd_init_apertures_v9(pdd, id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index f836897bbf58..a85904ad0d5f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -23,7 +23,7 @@
#include "kfd_priv.h"
#include "kfd_events.h"
#include "soc15_int.h"
-
+#include "kfd_device_queue_manager.h"
static bool event_interrupt_isr_v9(struct kfd_dev *dev,
const uint32_t *ih_ring_entry,
@@ -39,20 +39,39 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
vmid > dev->vm_info.last_vmid_kfd)
return 0;
- /* If there is no valid PASID, it's likely a firmware bug */
- pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
- if (WARN_ONCE(pasid == 0, "FW bug: No PASID in KFD interrupt"))
- return 0;
-
source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
+ pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
+
+ /* This is a known issue for gfx9. Under non HWS, pasid is not set
+ * in the interrupt payload, so we need to find out the pasid on our
+ * own.
+ */
+ if (!pasid && dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+ const uint32_t pasid_mask = 0xffff;
- pr_debug("client id 0x%x, source id %d, pasid 0x%x. raw data:\n",
- client_id, source_id, pasid);
+ *patched_flag = true;
+ memcpy(patched_ihre, ih_ring_entry,
+ dev->device_info->ih_ring_entry_size);
+
+ pasid = dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid(
+ dev->kgd, vmid);
+
+ /* Patch the pasid field */
+ patched_ihre[3] = cpu_to_le32((le32_to_cpu(patched_ihre[3])
+ & ~pasid_mask) | pasid);
+ }
+
+ pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
data[0], data[1], data[2], data[3],
data[4], data[5], data[6], data[7]);
+ /* If there is no valid PASID, it's likely a bug */
+ if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
+ return 0;
+
/* Interrupt types we care about: various signals and faults.
* They will be forwarded to a work queue (see below).
*/
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 6c31f7370193..f1596881f20a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -313,6 +313,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
kernel_queue_init_vi(&kq->ops_asic_specific);
break;
@@ -322,6 +323,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
kernel_queue_init_v9(&kq->ops_asic_specific);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index 6910028010d6..aed9b9b82213 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -38,8 +38,10 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
return mqd_manager_init_vi_tonga(type, dev);
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
return mqd_manager_init_v9(type, dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index c6080ed3b6a7..045a229436a0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -226,9 +226,11 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
pm->pmf = &kfd_vi_pm_funcs;
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
pm->pmf = &kfd_v9_pm_funcs;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index dec8e64f36bd..0689d4ccbbc0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -793,6 +793,7 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
+struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd);
int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
int kfd_numa_node_to_apic_id(int numa_node_id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index c73b4ff61f99..5f5b2acedbac 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -101,7 +101,25 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list)
- if (top_dev->gpu->pdev == pdev) {
+ if (top_dev->gpu && top_dev->gpu->pdev == pdev) {
+ device = top_dev->gpu;
+ break;
+ }
+
+ up_read(&topology_lock);
+
+ return device;
+}
+
+struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd)
+{
+ struct kfd_topology_device *top_dev;
+ struct kfd_dev *device = NULL;
+
+ down_read(&topology_lock);
+
+ list_for_each_entry(top_dev, &topology_device_list, list)
+ if (top_dev->gpu && top_dev->gpu->kgd == kgd) {
device = top_dev->gpu;
break;
}
@@ -1272,12 +1290,14 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
pr_debug("Adding doorbell packet type capability\n");
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_1_0 <<
HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index c97dc9613325..cfde1568c79a 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -32,11 +32,12 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power
#TODO: remove when Timing Sync feature is complete
subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
-DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet
+DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power
AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d8d0b206a79c..8a626d16e8e3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -23,6 +23,9 @@
*
*/
+/* The caprices of the preprocessor require that this be declared right here */
+#define CREATE_TRACE_POINTS
+
#include "dm_services_types.h"
#include "dc.h"
#include "dc/inc/core_types.h"
@@ -38,7 +41,6 @@
#include "amd_shared.h"
#include "amdgpu_dm_irq.h"
#include "dm_helpers.h"
-#include "dm_services_types.h"
#include "amdgpu_dm_mst_types.h"
#if defined(CONFIG_DEBUG_FS)
#include "amdgpu_dm_debugfs.h"
@@ -55,6 +57,7 @@
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_fb_helper.h>
@@ -72,6 +75,8 @@
#endif
#include "modules/inc/mod_freesync.h"
+#include "modules/power/power_helpers.h"
+#include "modules/inc/mod_info_packet.h"
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
@@ -129,6 +134,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state);
+static void handle_cursor_update(struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state);
@@ -324,12 +331,29 @@ static void dm_crtc_high_irq(void *interrupt_params)
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_crtc *acrtc;
+ struct dm_crtc_state *acrtc_state;
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
if (acrtc) {
drm_crtc_handle_vblank(&acrtc->base);
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
+
+ acrtc_state = to_dm_crtc_state(acrtc->base.state);
+
+ if (acrtc_state->stream &&
+ acrtc_state->vrr_params.supported &&
+ acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
+ mod_freesync_handle_v_update(
+ adev->dm.freesync_module,
+ acrtc_state->stream,
+ &acrtc_state->vrr_params);
+
+ dc_stream_adjust_vmin_vmax(
+ adev->dm.dc,
+ acrtc_state->stream,
+ &acrtc_state->vrr_params.adjust);
+ }
}
}
@@ -398,6 +422,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
/* Zero all the fields */
memset(&init_data, 0, sizeof(init_data));
+ mutex_init(&adev->dm.dc_lock);
+
if(amdgpu_dm_irq_init(adev)) {
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
goto error;
@@ -512,6 +538,9 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
/* DC Destroy TODO: Replace destroy DAL */
if (adev->dm.dc)
dc_destroy(&adev->dm.dc);
+
+ mutex_destroy(&adev->dm.dc_lock);
+
return;
}
@@ -643,6 +672,26 @@ static int dm_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct dmcu_iram_parameters params;
+ unsigned int linear_lut[16];
+ int i;
+ struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
+ bool ret;
+
+ for (i = 0; i < 16; i++)
+ linear_lut[i] = 0xFFFF * i / 15;
+
+ params.set = 0;
+ params.backlight_ramping_start = 0xCCCC;
+ params.backlight_ramping_reduction = 0xCCCCCCCC;
+ params.backlight_lut_array_size = 16;
+ params.backlight_lut_array = linear_lut;
+
+ ret = dmcu_load_iram(dmcu, params);
+
+ if (!ret)
+ return -EINVAL;
+
return detect_mst_link_for_all_connectors(adev->ddev);
}
@@ -969,45 +1018,6 @@ const struct amdgpu_ip_block_version dm_ip_block =
};
-static struct drm_atomic_state *
-dm_atomic_state_alloc(struct drm_device *dev)
-{
- struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
-
- if (!state)
- return NULL;
-
- if (drm_atomic_state_init(dev, &state->base) < 0)
- goto fail;
-
- return &state->base;
-
-fail:
- kfree(state);
- return NULL;
-}
-
-static void
-dm_atomic_state_clear(struct drm_atomic_state *state)
-{
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
-
- if (dm_state->context) {
- dc_release_state(dm_state->context);
- dm_state->context = NULL;
- }
-
- drm_atomic_state_default_clear(state);
-}
-
-static void
-dm_atomic_state_alloc_free(struct drm_atomic_state *state)
-{
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
- drm_atomic_state_default_release(state);
- kfree(dm_state);
-}
-
/**
* DOC: atomic
*
@@ -1019,9 +1029,6 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = amdgpu_dm_atomic_check,
.atomic_commit = amdgpu_dm_atomic_commit,
- .atomic_state_alloc = dm_atomic_state_alloc,
- .atomic_state_clear = dm_atomic_state_clear,
- .atomic_state_free = dm_atomic_state_alloc_free
};
static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
@@ -1543,8 +1550,117 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
}
#endif
+/*
+ * Acquires the lock for the atomic state object and returns
+ * the new atomic state.
+ *
+ * This should only be called during atomic check.
+ */
+static int dm_atomic_get_state(struct drm_atomic_state *state,
+ struct dm_atomic_state **dm_state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_private_state *priv_state;
+ int ret;
+
+ if (*dm_state)
+ return 0;
+
+ ret = drm_modeset_lock(&dm->atomic_obj_lock, state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
+ if (IS_ERR(priv_state))
+ return PTR_ERR(priv_state);
+
+ *dm_state = to_dm_atomic_state(priv_state);
+
+ return 0;
+}
+
+struct dm_atomic_state *
+dm_atomic_get_new_state(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_private_obj *obj;
+ struct drm_private_state *new_obj_state;
+ int i;
+
+ for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
+ if (obj->funcs == dm->atomic_obj.funcs)
+ return to_dm_atomic_state(new_obj_state);
+ }
+
+ return NULL;
+}
+
+struct dm_atomic_state *
+dm_atomic_get_old_state(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_private_obj *obj;
+ struct drm_private_state *old_obj_state;
+ int i;
+
+ for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
+ if (obj->funcs == dm->atomic_obj.funcs)
+ return to_dm_atomic_state(old_obj_state);
+ }
+
+ return NULL;
+}
+
+static struct drm_private_state *
+dm_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+ struct dm_atomic_state *old_state, *new_state;
+
+ new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
+ if (!new_state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
+
+ new_state->context = dc_create_state();
+ if (!new_state->context) {
+ kfree(new_state);
+ return NULL;
+ }
+
+ old_state = to_dm_atomic_state(obj->state);
+ if (old_state && old_state->context)
+ dc_resource_state_copy_construct(old_state->context,
+ new_state->context);
+
+ return &new_state->base;
+}
+
+static void dm_atomic_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+
+ if (dm_state && dm_state->context)
+ dc_release_state(dm_state->context);
+
+ kfree(dm_state);
+}
+
+static struct drm_private_state_funcs dm_atomic_state_funcs = {
+ .atomic_duplicate_state = dm_atomic_duplicate_state,
+ .atomic_destroy_state = dm_atomic_destroy_state,
+};
+
static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
{
+ struct dm_atomic_state *state;
int r;
adev->mode_info.mode_config_initialized = true;
@@ -1562,6 +1678,25 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+ drm_modeset_lock_init(&adev->dm.atomic_obj_lock);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->context = dc_create_state();
+ if (!state->context) {
+ kfree(state);
+ return -ENOMEM;
+ }
+
+ dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
+
+ drm_atomic_private_obj_init(adev->ddev,
+ &adev->dm.atomic_obj,
+ &state->base,
+ &dm_atomic_state_funcs);
+
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
@@ -1569,27 +1704,60 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
return 0;
}
+#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
+#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
+
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
+{
+#if defined(CONFIG_ACPI)
+ struct amdgpu_dm_backlight_caps caps;
+
+ if (dm->backlight_caps.caps_valid)
+ return;
+
+ amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
+ if (caps.caps_valid) {
+ dm->backlight_caps.min_input_signal = caps.min_input_signal;
+ dm->backlight_caps.max_input_signal = caps.max_input_signal;
+ dm->backlight_caps.caps_valid = true;
+ } else {
+ dm->backlight_caps.min_input_signal =
+ AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ dm->backlight_caps.max_input_signal =
+ AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ }
+#else
+ dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+#endif
+}
+
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
{
struct amdgpu_display_manager *dm = bl_get_data(bd);
+ struct amdgpu_dm_backlight_caps caps;
+ uint32_t brightness = bd->props.brightness;
- /* backlight_pwm_u16_16 parameter is in unsigned 32 bit, 16 bit integer
- * and 16 bit fractional, where 1.0 is max backlight value.
- * bd->props.brightness is 8 bit format and needs to be converted by
- * scaling via copy lower byte to upper byte of 16 bit value.
- */
- uint32_t brightness = bd->props.brightness * 0x101;
-
+ amdgpu_dm_update_backlight_caps(dm);
+ caps = dm->backlight_caps;
/*
- * PWM interperts 0 as 100% rather than 0% because of HW
- * limitation for level 0. So limiting minimum brightness level
- * to 1.
+ * The brightness input is in the range 0-255
+ * It needs to be rescaled to be between the
+ * requested min and max input signal
+ *
+ * It also needs to be scaled up by 0x101 to
+ * match the DC interface which has a range of
+ * 0 to 0xffff
*/
- if (bd->props.brightness < 1)
- brightness = 0x101;
+ brightness =
+ brightness
+ * 0x101
+ * (caps.max_input_signal - caps.min_input_signal)
+ / AMDGPU_MAX_BL_LEVEL
+ + caps.min_input_signal * 0x101;
if (dc_link_set_backlight_level(dm->backlight_link,
brightness, 0, 0))
@@ -1619,6 +1787,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
char bl_name[16];
struct backlight_properties props = { 0 };
+ amdgpu_dm_update_backlight_caps(dm);
+
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
props.brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
@@ -1850,6 +2020,7 @@ fail:
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
{
drm_mode_config_cleanup(dm->ddev);
+ drm_atomic_private_obj_fini(&dm->atomic_obj);
return;
}
@@ -1869,73 +2040,6 @@ static void dm_bandwidth_update(struct amdgpu_device *adev)
/* TODO: implement later */
}
-static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
- struct drm_file *filp)
-{
- struct drm_atomic_state *state;
- struct drm_modeset_acquire_ctx ctx;
- struct drm_crtc *crtc;
- struct drm_connector *connector;
- struct drm_connector_state *old_con_state, *new_con_state;
- int ret = 0;
- uint8_t i;
- bool enable = false;
-
- drm_modeset_acquire_init(&ctx, 0);
-
- state = drm_atomic_state_alloc(dev);
- if (!state) {
- ret = -ENOMEM;
- goto out;
- }
- state->acquire_ctx = &ctx;
-
-retry:
- drm_for_each_crtc(crtc, dev) {
- ret = drm_atomic_add_affected_connectors(state, crtc);
- if (ret)
- goto fail;
-
- /* TODO rework amdgpu_dm_commit_planes so we don't need this */
- ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
- goto fail;
- }
-
- for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
- struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
- struct drm_crtc_state *new_crtc_state;
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct dm_crtc_state *dm_new_crtc_state;
-
- if (!acrtc) {
- ASSERT(0);
- continue;
- }
-
- new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
-
- dm_new_crtc_state->freesync_enabled = enable;
- }
-
- ret = drm_atomic_commit(state);
-
-fail:
- if (ret == -EDEADLK) {
- drm_atomic_state_clear(state);
- drm_modeset_backoff(&ctx);
- goto retry;
- }
-
- drm_atomic_state_put(state);
-
-out:
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
- return ret;
-}
-
static const struct amdgpu_display_funcs dm_display_funcs = {
.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
@@ -1948,8 +2052,6 @@ static const struct amdgpu_display_funcs dm_display_funcs = {
dm_crtc_get_scanoutpos,/* called unconditionally */
.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
.add_connector = NULL, /* VBIOS parsing. DAL does it. */
- .notify_freesync = amdgpu_notify_freesync,
-
};
#if defined(CONFIG_DEBUG_KERNEL_DC)
@@ -2550,7 +2652,8 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_
static void
fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
const struct drm_display_mode *mode_in,
- const struct drm_connector *connector)
+ const struct drm_connector *connector,
+ const struct dc_stream_state *old_stream)
{
struct dc_crtc_timing *timing_out = &stream->timing;
const struct drm_display_info *info = &connector->display_info;
@@ -2576,7 +2679,18 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
connector);
timing_out->scan_type = SCANNING_TYPE_NODATA;
timing_out->hdmi_vic = 0;
- timing_out->vic = drm_match_cea_mode(mode_in);
+
+ if(old_stream) {
+ timing_out->vic = old_stream->timing.vic;
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
+ } else {
+ timing_out->vic = drm_match_cea_mode(mode_in);
+ if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
+ if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
+ }
timing_out->h_addressable = mode_in->crtc_hdisplay;
timing_out->h_total = mode_in->crtc_htotal;
@@ -2592,10 +2706,6 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
timing_out->pix_clk_khz = mode_in->crtc_clock;
timing_out->aspect_ratio = get_aspect_ratio(mode_in);
- if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
- timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
- if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
- timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
stream->output_color_space = get_output_color_space(timing_out);
@@ -2618,9 +2728,9 @@ static void fill_audio_info(struct audio_info *audio_info,
cea_revision = drm_connector->display_info.cea_rev;
- strncpy(audio_info->display_name,
+ strscpy(audio_info->display_name,
edid_caps->display_name,
- AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
+ AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
if (cea_revision >= 3) {
audio_info->mode_count = edid_caps->audio_mode_count;
@@ -2758,13 +2868,18 @@ static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
const struct drm_display_mode *drm_mode,
- const struct dm_connector_state *dm_state)
+ const struct dm_connector_state *dm_state,
+ const struct dc_stream_state *old_stream)
{
struct drm_display_mode *preferred_mode = NULL;
struct drm_connector *drm_connector;
struct dc_stream_state *stream = NULL;
struct drm_display_mode mode = *drm_mode;
bool native_mode_found = false;
+ bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
+ int mode_refresh;
+ int preferred_refresh = 0;
+
struct dc_sink *sink = NULL;
if (aconnector == NULL) {
DRM_ERROR("aconnector is NULL!\n");
@@ -2803,6 +2918,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
struct drm_display_mode,
head);
+ mode_refresh = drm_mode_vrefresh(&mode);
+
if (preferred_mode == NULL) {
/*
* This may not be an error, the use case is when we have no
@@ -2815,13 +2932,23 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
decide_crtc_timing_for_drm_display_mode(
&mode, preferred_mode,
dm_state ? (dm_state->scaling != RMX_OFF) : false);
+ preferred_refresh = drm_mode_vrefresh(preferred_mode);
}
if (!dm_state)
drm_mode_set_crtcinfo(&mode, 0);
- fill_stream_properties_from_drm_display_mode(stream,
- &mode, &aconnector->base);
+ /*
+ * If scaling is enabled and refresh rate didn't change
+ * we copy the vic and polarities of the old timings
+ */
+ if (!scale || mode_refresh != preferred_refresh)
+ fill_stream_properties_from_drm_display_mode(stream,
+ &mode, &aconnector->base, NULL);
+ else
+ fill_stream_properties_from_drm_display_mode(stream,
+ &mode, &aconnector->base, old_stream);
+
update_stream_scaling_settings(&mode, dm_state, stream);
fill_audio_info(
@@ -2833,6 +2960,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (dm_state && dm_state->freesync_capable)
stream->ignore_msa_timing_param = true;
+
finish:
if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
dc_sink_release(sink);
@@ -2899,9 +3027,12 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
dc_stream_retain(state->stream);
}
- state->adjust = cur->adjust;
+ state->vrr_params = cur->vrr_params;
state->vrr_infopacket = cur->vrr_infopacket;
- state->freesync_enabled = cur->freesync_enabled;
+ state->abm_level = cur->abm_level;
+ state->vrr_supported = cur->vrr_supported;
+ state->freesync_config = cur->freesync_config;
+ state->crc_enabled = cur->crc_enabled;
/* TODO Duplicate dc_stream after objects are stream object is flattened */
@@ -3017,6 +3148,9 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
} else if (property == adev->mode_info.max_bpc_property) {
dm_new_state->max_bpc = val;
ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+ dm_new_state->abm_level = val;
+ ret = 0;
}
return ret;
@@ -3062,7 +3196,11 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
} else if (property == adev->mode_info.max_bpc_property) {
*val = dm_state->max_bpc;
ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+ *val = dm_state->abm_level;
+ ret = 0;
}
+
return ret;
}
@@ -3106,6 +3244,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state->underscan_enable = false;
state->underscan_hborder = 0;
state->underscan_vborder = 0;
+ state->max_bpc = 8;
__drm_atomic_helper_connector_reset(connector, &state->base);
}
@@ -3126,7 +3265,12 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
new_state->freesync_capable = state->freesync_capable;
- new_state->freesync_enable = state->freesync_enable;
+ new_state->abm_level = state->abm_level;
+ new_state->scaling = state->scaling;
+ new_state->underscan_enable = state->underscan_enable;
+ new_state->underscan_hborder = state->underscan_hborder;
+ new_state->underscan_vborder = state->underscan_vborder;
+ new_state->max_bpc = state->max_bpc;
return &new_state->base;
}
@@ -3228,7 +3372,7 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
goto fail;
}
- stream = create_stream_for_sink(aconnector, mode, NULL);
+ stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
if (stream == NULL) {
DRM_ERROR("Failed to create stream for sink!\n");
goto fail;
@@ -3499,10 +3643,53 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
+static int dm_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *new_plane_state)
+{
+ struct drm_plane_state *old_plane_state =
+ drm_atomic_get_old_plane_state(new_plane_state->state, plane);
+
+ /* Only support async updates on cursor planes. */
+ if (plane->type != DRM_PLANE_TYPE_CURSOR)
+ return -EINVAL;
+
+ /*
+ * DRM calls prepare_fb and cleanup_fb on new_plane_state for
+ * async commits so don't allow fb changes.
+ */
+ if (old_plane_state->fb != new_plane_state->fb)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void dm_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_plane_state *old_state =
+ drm_atomic_get_old_plane_state(new_state->state, plane);
+
+ if (plane->state->fb != new_state->fb)
+ drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
+
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_w = new_state->src_w;
+ plane->state->src_h = new_state->src_h;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+ plane->state->crtc_w = new_state->crtc_w;
+ plane->state->crtc_h = new_state->crtc_h;
+
+ handle_cursor_update(plane, old_state);
+}
+
static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
.prepare_fb = dm_plane_helper_prepare_fb,
.cleanup_fb = dm_plane_helper_cleanup_fb,
.atomic_check = dm_plane_atomic_check,
+ .atomic_async_check = dm_plane_atomic_async_check,
+ .atomic_async_update = dm_plane_atomic_async_update
};
/*
@@ -3716,7 +3903,7 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
mode->hdisplay = hdisplay;
mode->vdisplay = vdisplay;
mode->type &= ~DRM_MODE_TYPE_PREFERRED;
- strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+ strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
return mode;
@@ -3876,6 +4063,17 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
adev->mode_info.max_bpc_property,
0);
+ if (connector_type == DRM_MODE_CONNECTOR_eDP &&
+ dc_is_dmcu_initialized(adev->dm.dc)) {
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.abm_level_property, 0);
+ }
+
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ drm_connector_attach_vrr_capable_property(
+ &aconnector->base);
+ }
}
static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
@@ -4180,6 +4378,7 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
static void handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state)
{
+ struct amdgpu_device *adev = plane->dev->dev_private;
struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
@@ -4204,9 +4403,12 @@ static void handle_cursor_update(struct drm_plane *plane,
if (!position.enable) {
/* turn off cursor */
- if (crtc_state && crtc_state->stream)
+ if (crtc_state && crtc_state->stream) {
+ mutex_lock(&adev->dm.dc_lock);
dc_stream_set_cursor_position(crtc_state->stream,
&position);
+ mutex_unlock(&adev->dm.dc_lock);
+ }
return;
}
@@ -4224,6 +4426,7 @@ static void handle_cursor_update(struct drm_plane *plane,
attributes.pitch = attributes.width;
if (crtc_state->stream) {
+ mutex_lock(&adev->dm.dc_lock);
if (!dc_stream_set_cursor_attributes(crtc_state->stream,
&attributes))
DRM_ERROR("DC failed to set cursor attributes\n");
@@ -4231,6 +4434,7 @@ static void handle_cursor_update(struct drm_plane *plane,
if (!dc_stream_set_cursor_position(crtc_state->stream,
&position))
DRM_ERROR("DC failed to set cursor position\n");
+ mutex_unlock(&adev->dm.dc_lock);
}
}
@@ -4252,6 +4456,102 @@ static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
acrtc->crtc_id);
}
+struct dc_stream_status *dc_state_get_stream_status(
+ struct dc_state *state,
+ struct dc_stream_state *stream)
+{
+ uint8_t i;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (stream == state->streams[i])
+ return &state->stream_status[i];
+ }
+
+ return NULL;
+}
+
+static void update_freesync_state_on_stream(
+ struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *new_crtc_state,
+ struct dc_stream_state *new_stream,
+ struct dc_plane_state *surface,
+ u32 flip_timestamp_in_us)
+{
+ struct mod_vrr_params vrr_params = new_crtc_state->vrr_params;
+ struct dc_info_packet vrr_infopacket = {0};
+ struct mod_freesync_config config = new_crtc_state->freesync_config;
+
+ if (!new_stream)
+ return;
+
+ /*
+ * TODO: Determine why min/max totals and vrefresh can be 0 here.
+ * For now it's sufficient to just guard against these conditions.
+ */
+
+ if (!new_stream->timing.h_total || !new_stream->timing.v_total)
+ return;
+
+ if (new_crtc_state->vrr_supported &&
+ config.min_refresh_in_uhz &&
+ config.max_refresh_in_uhz) {
+ config.state = new_crtc_state->base.vrr_enabled ?
+ VRR_STATE_ACTIVE_VARIABLE :
+ VRR_STATE_INACTIVE;
+ } else {
+ config.state = VRR_STATE_UNSUPPORTED;
+ }
+
+ mod_freesync_build_vrr_params(dm->freesync_module,
+ new_stream,
+ &config, &vrr_params);
+
+ if (surface) {
+ mod_freesync_handle_preflip(
+ dm->freesync_module,
+ surface,
+ new_stream,
+ flip_timestamp_in_us,
+ &vrr_params);
+ }
+
+ mod_freesync_build_vrr_infopacket(
+ dm->freesync_module,
+ new_stream,
+ &vrr_params,
+ PACKET_TYPE_VRR,
+ TRANSFER_FUNC_UNKNOWN,
+ &vrr_infopacket);
+
+ new_crtc_state->freesync_timing_changed =
+ (memcmp(&new_crtc_state->vrr_params.adjust,
+ &vrr_params.adjust,
+ sizeof(vrr_params.adjust)) != 0);
+
+ new_crtc_state->freesync_vrr_info_changed =
+ (memcmp(&new_crtc_state->vrr_infopacket,
+ &vrr_infopacket,
+ sizeof(vrr_infopacket)) != 0);
+
+ new_crtc_state->vrr_params = vrr_params;
+ new_crtc_state->vrr_infopacket = vrr_infopacket;
+
+ new_stream->adjust = new_crtc_state->vrr_params.adjust;
+ new_stream->vrr_infopacket = vrr_infopacket;
+
+ if (new_crtc_state->freesync_vrr_info_changed)
+ DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
+ new_crtc_state->base.crtc->base.id,
+ (int)new_crtc_state->base.vrr_enabled,
+ (int)vrr_params.state);
+
+ if (new_crtc_state->freesync_timing_changed)
+ DRM_DEBUG_KMS("VRR timing update: crtc=%u min=%u max=%u\n",
+ new_crtc_state->base.crtc->base.id,
+ vrr_params.adjust.v_total_min,
+ vrr_params.adjust.v_total_max);
+}
+
/*
* Executes flip
*
@@ -4263,6 +4563,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
struct dc_state *state)
{
unsigned long flags;
+ uint64_t timestamp_ns;
uint32_t target_vblank;
int r, vpos, hpos;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
@@ -4273,8 +4574,10 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
struct dc_flip_addrs addr = { {0} };
/* TODO eliminate or rename surface_update */
struct dc_surface_update surface_updates[1] = { {0} };
+ struct dc_stream_update stream_update = {0};
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
struct dc_stream_status *stream_status;
+ struct dc_plane_state *surface;
/* Prepare wait for target vblank early - before the fence-waits */
@@ -4324,6 +4627,9 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
addr.address.grph.addr.high_part = upper_32_bits(afb->address);
addr.flip_immediate = async_flip;
+ timestamp_ns = ktime_get_ns();
+ addr.flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
+
if (acrtc->base.state->event)
prepare_flip_isr(acrtc);
@@ -4337,21 +4643,51 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
return;
}
- surface_updates->surface = stream_status->plane_states[0];
- if (!surface_updates->surface) {
+ surface = stream_status->plane_states[0];
+ surface_updates->surface = surface;
+
+ if (!surface) {
DRM_ERROR("No surface for CRTC: id=%d\n",
acrtc->crtc_id);
return;
}
surface_updates->flip_addr = &addr;
+ if (acrtc_state->stream) {
+ update_freesync_state_on_stream(
+ &adev->dm,
+ acrtc_state,
+ acrtc_state->stream,
+ surface,
+ addr.flip_timestamp_in_us);
+
+ if (acrtc_state->freesync_timing_changed)
+ stream_update.adjust =
+ &acrtc_state->stream->adjust;
+
+ if (acrtc_state->freesync_vrr_info_changed)
+ stream_update.vrr_infopacket =
+ &acrtc_state->stream->vrr_infopacket;
+ }
+
+ /* Update surface timing information. */
+ surface->time.time_elapsed_in_us[surface->time.index] =
+ addr.flip_timestamp_in_us - surface->time.prev_update_time_in_us;
+ surface->time.prev_update_time_in_us = addr.flip_timestamp_in_us;
+ surface->time.index++;
+ if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
+ surface->time.index = 0;
+
+ mutex_lock(&adev->dm.dc_lock);
+
dc_commit_updates_for_stream(adev->dm.dc,
surface_updates,
1,
acrtc_state->stream,
- NULL,
+ &stream_update,
&surface_updates->surface,
state);
+ mutex_unlock(&adev->dm.dc_lock);
DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
__func__,
@@ -4366,6 +4702,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
* with a dc_plane_state and follow the atomic model a bit more closely here.
*/
static bool commit_planes_to_stream(
+ struct amdgpu_display_manager *dm,
struct dc *dc,
struct dc_plane_state **plane_states,
uint8_t new_plane_count,
@@ -4382,6 +4719,7 @@ static bool commit_planes_to_stream(
struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
struct dc_stream_update *stream_update =
kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
+ unsigned int abm_level;
if (!stream_update) {
BREAK_TO_DEBUGGER();
@@ -4409,9 +4747,9 @@ static bool commit_planes_to_stream(
stream_update->dst = dc_stream->dst;
stream_update->out_transfer_func = dc_stream->out_transfer_func;
- if (dm_new_crtc_state->freesync_enabled != dm_old_crtc_state->freesync_enabled) {
- stream_update->vrr_infopacket = &dc_stream->vrr_infopacket;
- stream_update->adjust = &dc_stream->adjust;
+ if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) {
+ abm_level = dm_new_crtc_state->abm_level;
+ stream_update->abm_level = &abm_level;
}
for (i = 0; i < new_plane_count; i++) {
@@ -4441,11 +4779,13 @@ static bool commit_planes_to_stream(
updates[i].scaling_info = &scaling_info[i];
}
+ mutex_lock(&dm->dc_lock);
dc_commit_updates_for_stream(
dc,
updates,
new_plane_count,
dc_stream, stream_update, plane_states, state);
+ mutex_unlock(&dm->dc_lock);
kfree(flip_addr);
kfree(plane_info);
@@ -4455,6 +4795,7 @@ static bool commit_planes_to_stream(
}
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ struct dc_state *dc_state,
struct drm_device *dev,
struct amdgpu_display_manager *dm,
struct drm_crtc *pcrtc,
@@ -4471,7 +4812,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
struct dm_crtc_state *dm_old_crtc_state =
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
int planes_count = 0;
unsigned long flags;
@@ -4532,7 +4872,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
crtc,
fb,
(uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
- dm_state->context);
+ dc_state);
}
}
@@ -4549,15 +4889,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
- dc_stream_attach->adjust = acrtc_state->adjust;
- dc_stream_attach->vrr_infopacket = acrtc_state->vrr_infopacket;
+ dc_stream_attach->abm_level = acrtc_state->abm_level;
- if (false == commit_planes_to_stream(dm->dc,
+ if (false == commit_planes_to_stream(dm,
+ dm->dc,
plane_states_constructed,
planes_count,
acrtc_state,
dm_old_crtc_state,
- dm_state->context))
+ dc_state))
dm_error("%s: Failed to attach plane!\n", __func__);
} else {
/*TODO BUG Here should go disable planes on CRTC. */
@@ -4625,6 +4965,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_display_manager *dm = &adev->dm;
struct dm_atomic_state *dm_state;
+ struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
uint32_t i, j;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
@@ -4637,7 +4978,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_update_legacy_modeset_state(dev, state);
- dm_state = to_dm_atomic_state(state);
+ dm_state = dm_atomic_get_new_state(state);
+ if (dm_state && dm_state->context) {
+ dc_state = dm_state->context;
+ } else {
+ /* No state changes, retain current state. */
+ dc_state_temp = dc_create_state();
+ ASSERT(dc_state_temp);
+ dc_state = dc_state_temp;
+ dc_resource_state_copy_construct_current(dm->dc, dc_state);
+ }
/* update changed items */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -4710,9 +5060,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
} /* for_each_crtc_in_state() */
- if (dm_state->context) {
- dm_enable_per_frame_crtc_master_sync(dm_state->context);
- WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
+ if (dc_state) {
+ dm_enable_per_frame_crtc_master_sync(dc_state);
+ mutex_lock(&dm->dc_lock);
+ WARN_ON(!dc_commit_state(dm->dc, dc_state));
+ mutex_unlock(&dm->dc_lock);
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -4725,13 +5077,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_stream_get_status(dm_new_crtc_state->stream);
if (!status)
+ status = dc_state_get_stream_status(dc_state,
+ dm_new_crtc_state->stream);
+
+ if (!status)
DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
else
acrtc->otg_inst = status->primary_otg_inst;
}
}
- /* Handle scaling and underscan changes*/
+ /* Handle scaling, underscan, and abm changes*/
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
@@ -4747,11 +5103,14 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
- /* Skip anything that is not scaling or underscan changes */
- if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
- continue;
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ /* Skip anything that is not scaling or underscan changes */
+ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) &&
+ (dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level))
+ continue;
update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
@@ -4763,17 +5122,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
WARN_ON(!status);
WARN_ON(!status->plane_count);
- dm_new_crtc_state->stream->adjust = dm_new_crtc_state->adjust;
- dm_new_crtc_state->stream->vrr_infopacket = dm_new_crtc_state->vrr_infopacket;
+ dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
/*TODO How it works with MPO ?*/
if (!commit_planes_to_stream(
+ dm,
dm->dc,
status->plane_states,
status->plane_count,
dm_new_crtc_state,
to_dm_crtc_state(old_crtc_state),
- dm_state->context))
+ dc_state))
dm_error("%s: Failed to update stream scaling!\n", __func__);
}
@@ -4806,7 +5165,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (dm_new_crtc_state->stream)
- amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
+ amdgpu_dm_commit_planes(state, dc_state, dev,
+ dm, crtc, &wait_for_vblank);
}
@@ -4846,6 +5206,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for (i = 0; i < crtc_disable_count; i++)
pm_runtime_put_autosuspend(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
+
+ if (dc_state_temp)
+ dc_release_state(dc_state_temp);
}
@@ -4989,20 +5352,18 @@ static int do_aquire_global_lock(struct drm_device *dev,
return ret < 0 ? ret : 0;
}
-void set_freesync_on_stream(struct amdgpu_display_manager *dm,
- struct dm_crtc_state *new_crtc_state,
- struct dm_connector_state *new_con_state,
- struct dc_stream_state *new_stream)
+static void get_freesync_config_for_crtc(
+ struct dm_crtc_state *new_crtc_state,
+ struct dm_connector_state *new_con_state)
{
struct mod_freesync_config config = {0};
- struct mod_vrr_params vrr = {0};
- struct dc_info_packet vrr_infopacket = {0};
struct amdgpu_dm_connector *aconnector =
to_amdgpu_dm_connector(new_con_state->base.connector);
- if (new_con_state->freesync_capable &&
- new_con_state->freesync_enable) {
- config.state = new_crtc_state->freesync_enabled ?
+ new_crtc_state->vrr_supported = new_con_state->freesync_capable;
+
+ if (new_con_state->freesync_capable) {
+ config.state = new_crtc_state->base.vrr_enabled ?
VRR_STATE_ACTIVE_VARIABLE :
VRR_STATE_INACTIVE;
config.min_refresh_in_uhz =
@@ -5010,21 +5371,21 @@ void set_freesync_on_stream(struct amdgpu_display_manager *dm,
config.max_refresh_in_uhz =
aconnector->max_vfreq * 1000000;
config.vsif_supported = true;
+ config.btr = true;
}
- mod_freesync_build_vrr_params(dm->freesync_module,
- new_stream,
- &config, &vrr);
+ new_crtc_state->freesync_config = config;
+}
- mod_freesync_build_vrr_infopacket(dm->freesync_module,
- new_stream,
- &vrr,
- packet_type_fs1,
- NULL,
- &vrr_infopacket);
+static void reset_freesync_config_for_crtc(
+ struct dm_crtc_state *new_crtc_state)
+{
+ new_crtc_state->vrr_supported = false;
- new_crtc_state->adjust = vrr.adjust;
- new_crtc_state->vrr_infopacket = vrr_infopacket;
+ memset(&new_crtc_state->vrr_params, 0,
+ sizeof(new_crtc_state->vrr_params));
+ memset(&new_crtc_state->vrr_infopacket, 0,
+ sizeof(new_crtc_state->vrr_infopacket));
}
static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
@@ -5032,11 +5393,11 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
bool enable,
bool *lock_and_validation_needed)
{
+ struct dm_atomic_state *dm_state = NULL;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
struct dc_stream_state *new_stream;
int ret = 0;
@@ -5084,7 +5445,8 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
new_stream = create_stream_for_sink(aconnector,
&new_crtc_state->mode,
- dm_new_conn_state);
+ dm_new_conn_state,
+ dm_old_crtc_state->stream);
/*
* we can have no stream on ACTION_SET if a display
@@ -5099,8 +5461,7 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
break;
}
- set_freesync_on_stream(dm, dm_new_crtc_state,
- dm_new_conn_state, new_stream);
+ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
@@ -5110,9 +5471,6 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
}
}
- if (dm_old_crtc_state->freesync_enabled != dm_new_crtc_state->freesync_enabled)
- new_crtc_state->mode_changed = true;
-
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
goto next_crtc;
@@ -5134,6 +5492,10 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
if (!dm_old_crtc_state->stream)
goto next_crtc;
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto fail;
+
DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
crtc->base.id);
@@ -5149,6 +5511,8 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
dc_stream_release(dm_old_crtc_state->stream);
dm_new_crtc_state->stream = NULL;
+ reset_freesync_config_for_crtc(dm_new_crtc_state);
+
*lock_and_validation_needed = true;
} else {/* Add stream for any updated/enabled CRTC */
@@ -5168,6 +5532,10 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
WARN_ON(dm_new_crtc_state->stream);
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto fail;
+
dm_new_crtc_state->stream = new_stream;
dc_stream_retain(new_stream);
@@ -5226,7 +5594,9 @@ next_crtc:
amdgpu_dm_set_ctm(dm_new_crtc_state);
}
-
+ /* Update Freesync settings. */
+ get_freesync_config_for_crtc(dm_new_crtc_state,
+ dm_new_conn_state);
}
return ret;
@@ -5242,12 +5612,13 @@ static int dm_update_planes_state(struct dc *dc,
bool enable,
bool *lock_and_validation_needed)
{
+
+ struct dm_atomic_state *dm_state = NULL;
struct drm_crtc *new_plane_crtc, *old_plane_crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
int i ;
/* TODO return page_flip_needed() function */
@@ -5285,6 +5656,10 @@ static int dm_update_planes_state(struct dc *dc,
DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
plane->base.id, old_plane_crtc->base.id);
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ return ret;
+
if (!dc_remove_plane_from_context(
dc,
dm_old_crtc_state->stream,
@@ -5339,6 +5714,12 @@ static int dm_update_planes_state(struct dc *dc,
return ret;
}
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret) {
+ dc_plane_state_release(dc_new_plane_state);
+ return ret;
+ }
+
/*
* Any atomic check errors that occur after this will
* not need a release. The plane state will be attached
@@ -5370,11 +5751,14 @@ static int dm_update_planes_state(struct dc *dc,
return ret;
}
-enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, struct drm_atomic_state *state)
-{
-
- int i, j, num_plane;
+static int
+dm_determine_update_type_for_commit(struct dc *dc,
+ struct drm_atomic_state *state,
+ enum surface_update_type *out_type)
+{
+ struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
+ int i, j, num_plane, ret = 0;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
struct drm_crtc *new_plane_crtc, *old_plane_crtc;
@@ -5394,7 +5778,7 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru
DRM_ERROR("Plane or surface update failed to allocate");
/* Set type to FULL to avoid crashing in DC*/
update_type = UPDATE_TYPE_FULL;
- goto ret;
+ goto cleanup;
}
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -5448,27 +5832,40 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru
}
if (num_plane > 0) {
- status = dc_stream_get_status(new_dm_crtc_state->stream);
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto cleanup;
+
+ old_dm_state = dm_atomic_get_old_state(state);
+ if (!old_dm_state) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ status = dc_state_get_stream_status(old_dm_state->context,
+ new_dm_crtc_state->stream);
+
update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
&stream_update, status);
if (update_type > UPDATE_TYPE_MED) {
update_type = UPDATE_TYPE_FULL;
- goto ret;
+ goto cleanup;
}
}
} else if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) {
update_type = UPDATE_TYPE_FULL;
- goto ret;
+ goto cleanup;
}
}
-ret:
+cleanup:
kfree(updates);
kfree(surface);
- return update_type;
+ *out_type = update_type;
+ return ret;
}
/**
@@ -5500,8 +5897,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct amdgpu_device *adev = dev->dev_private;
+ struct dm_atomic_state *dm_state = NULL;
struct dc *dc = adev->dm.dc;
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
struct drm_connector *connector;
struct drm_connector_state *old_con_state, *new_con_state;
struct drm_crtc *crtc;
@@ -5522,12 +5919,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
-
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->color_mgmt_changed &&
- (dm_old_crtc_state->freesync_enabled == dm_new_crtc_state->freesync_enabled))
+ !new_crtc_state->vrr_enabled)
continue;
if (!new_crtc_state->enable)
@@ -5542,10 +5936,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
- dm_state->context = dc_create_state();
- ASSERT(dm_state->context);
- dc_resource_state_copy_construct_current(dc, dm_state->context);
-
/* Remove exiting planes if they are modified */
ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
if (ret) {
@@ -5598,7 +5988,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
lock_and_validation_needed = true;
}
- update_type = dm_determine_update_type_for_commit(dc, state);
+ ret = dm_determine_update_type_for_commit(dc, state, &update_type);
+ if (ret)
+ goto fail;
if (overall_update_type < update_type)
overall_update_type = update_type;
@@ -5616,6 +6008,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (overall_update_type > UPDATE_TYPE_FAST) {
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto fail;
ret = do_aquire_global_lock(dev, state);
if (ret)
@@ -5625,6 +6020,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
ret = -EINVAL;
goto fail;
}
+ } else if (state->legacy_cursor_update) {
+ /*
+ * This is a fast cursor update coming from the plane update
+ * helper, check if it can be done asynchronously for better
+ * performance.
+ */
+ state->async_update = !drm_atomic_helper_async_check(dev, state);
}
/* Must be success */
@@ -5670,14 +6072,15 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct detailed_data_monitor_range *range;
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
- struct dm_connector_state *dm_con_state;
+ struct dm_connector_state *dm_con_state = NULL;
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
+ bool freesync_capable = false;
if (!connector->state) {
DRM_ERROR("%s - Connector has no state", __func__);
- return;
+ goto update;
}
if (!edid) {
@@ -5687,9 +6090,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
amdgpu_dm_connector->max_vfreq = 0;
amdgpu_dm_connector->pixel_clock_mhz = 0;
- dm_con_state->freesync_capable = false;
- dm_con_state->freesync_enable = false;
- return;
+ goto update;
}
dm_con_state = to_dm_connector_state(connector->state);
@@ -5697,10 +6098,10 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
edid_check_required = false;
if (!amdgpu_dm_connector->dc_sink) {
DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
- return;
+ goto update;
}
if (!adev->dm.freesync_module)
- return;
+ goto update;
/*
* if edid non zero restrict freesync only for dp and edp
*/
@@ -5712,7 +6113,6 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
amdgpu_dm_connector);
}
}
- dm_con_state->freesync_capable = false;
if (edid_check_required == true && (edid->version > 1 ||
(edid->version == 1 && edid->revision > 1))) {
for (i = 0; i < 4; i++) {
@@ -5744,8 +6144,16 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (amdgpu_dm_connector->max_vfreq -
amdgpu_dm_connector->min_vfreq > 10) {
- dm_con_state->freesync_capable = true;
+ freesync_capable = true;
}
}
+
+update:
+ if (dm_con_state)
+ dm_con_state->freesync_capable = freesync_capable;
+
+ if (connector->vrr_capable_property)
+ drm_connector_set_vrr_capable_property(connector,
+ freesync_capable);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 607c3cdd7d0c..fbd161ddc3f4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -84,6 +84,18 @@ struct dm_comressor_info {
};
/**
+ * struct amdgpu_dm_backlight_caps - Usable range of backlight values from ACPI
+ * @min_input_signal: minimum possible input in range 0-255
+ * @max_input_signal: maximum possible input in range 0-255
+ * @caps_valid: true if these values are from the ACPI interface
+ */
+struct amdgpu_dm_backlight_caps {
+ int min_input_signal;
+ int max_input_signal;
+ bool caps_valid;
+};
+
+/**
* struct amdgpu_display_manager - Central amdgpu display manager device
*
* @dc: Display Core control structure
@@ -112,6 +124,25 @@ struct amdgpu_display_manager {
u16 display_indexes_num;
/**
+ * @atomic_obj
+ *
+ * In combination with &dm_atomic_state it helps manage
+ * global atomic state that doesn't map cleanly into existing
+ * drm resources, like &dc_context.
+ */
+ struct drm_private_obj atomic_obj;
+
+ struct drm_modeset_lock atomic_obj_lock;
+
+ /**
+ * @dc_lock:
+ *
+ * Guards access to DC functions that can issue register write
+ * sequences.
+ */
+ struct mutex dc_lock;
+
+ /**
* @irq_handler_list_low_tab:
*
* Low priority IRQ handler table.
@@ -158,6 +189,7 @@ struct amdgpu_display_manager {
struct backlight_device *backlight_dev;
const struct dc_link *backlight_link;
+ struct amdgpu_dm_backlight_caps backlight_caps;
struct mod_freesync *freesync_module;
@@ -231,15 +263,21 @@ struct dm_crtc_state {
int crc_skip_count;
bool crc_enabled;
- bool freesync_enabled;
- struct dc_crtc_timing_adjust adjust;
+ bool freesync_timing_changed;
+ bool freesync_vrr_info_changed;
+
+ bool vrr_supported;
+ struct mod_freesync_config freesync_config;
+ struct mod_vrr_params vrr_params;
struct dc_info_packet vrr_infopacket;
+
+ int abm_level;
};
#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
struct dm_atomic_state {
- struct drm_atomic_state base;
+ struct drm_private_state base;
struct dc_state *context;
};
@@ -254,8 +292,8 @@ struct dm_connector_state {
uint8_t underscan_hborder;
uint8_t max_bpc;
bool underscan_enable;
- bool freesync_enable;
bool freesync_capable;
+ uint8_t abm_level;
};
#define to_dm_connector_state(x)\
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 24665795b1ad..24632727e127 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -343,10 +343,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
master->connector_id);
aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
+ drm_connector_attach_encoder(&aconnector->base,
+ &aconnector->mst_encoder->base);
- /*
- * TODO: understand why this one is needed
- */
drm_object_attach_property(
&connector->base,
dev->mode_config.path_property,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
new file mode 100644
index 000000000000..d898981684d5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM amdgpu_dm
+
+#if !defined(_AMDGPU_DM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _AMDGPU_DM_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(amdgpu_dc_rreg,
+ TP_PROTO(unsigned long *read_count, uint32_t reg, uint32_t value),
+ TP_ARGS(read_count, reg, value),
+ TP_STRUCT__entry(
+ __field(uint32_t, reg)
+ __field(uint32_t, value)
+ ),
+ TP_fast_assign(
+ __entry->reg = reg;
+ __entry->value = value;
+ *read_count = *read_count + 1;
+ ),
+ TP_printk("reg=0x%08lx, value=0x%08lx",
+ (unsigned long)__entry->reg,
+ (unsigned long)__entry->value)
+);
+
+TRACE_EVENT(amdgpu_dc_wreg,
+ TP_PROTO(unsigned long *write_count, uint32_t reg, uint32_t value),
+ TP_ARGS(write_count, reg, value),
+ TP_STRUCT__entry(
+ __field(uint32_t, reg)
+ __field(uint32_t, value)
+ ),
+ TP_fast_assign(
+ __entry->reg = reg;
+ __entry->value = value;
+ *write_count = *write_count + 1;
+ ),
+ TP_printk("reg=0x%08lx, value=0x%08lx",
+ (unsigned long)__entry->reg,
+ (unsigned long)__entry->value)
+);
+
+
+TRACE_EVENT(amdgpu_dc_performance,
+ TP_PROTO(unsigned long read_count, unsigned long write_count,
+ unsigned long *last_read, unsigned long *last_write,
+ const char *func, unsigned int line),
+ TP_ARGS(read_count, write_count, last_read, last_write, func, line),
+ TP_STRUCT__entry(
+ __field(uint32_t, reads)
+ __field(uint32_t, writes)
+ __field(uint32_t, read_delta)
+ __field(uint32_t, write_delta)
+ __string(func, func)
+ __field(uint32_t, line)
+ ),
+ TP_fast_assign(
+ __entry->reads = read_count;
+ __entry->writes = write_count;
+ __entry->read_delta = read_count - *last_read;
+ __entry->write_delta = write_count - *last_write;
+ __assign_str(func, func);
+ __entry->line = line;
+ *last_read = read_count;
+ *last_write = write_count;
+ ),
+ TP_printk("%s:%d reads=%08ld (%08ld total), writes=%08ld (%08ld total)",
+ __get_str(func), __entry->line,
+ (unsigned long)__entry->read_delta,
+ (unsigned long)__entry->reads,
+ (unsigned long)__entry->write_delta,
+ (unsigned long)__entry->writes)
+);
+#endif /* _AMDGPU_DM_TRACE_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE amdgpu_dm_trace
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 751bb614fc0e..c513ab6f3843 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -638,6 +638,7 @@ static enum bp_result get_ss_info_v4_1(
{
enum bp_result result = BP_RESULT_OK;
struct atom_display_controller_info_v4_1 *disp_cntl_tbl = NULL;
+ struct atom_smu_info_v3_3 *smu_info = NULL;
if (!ss_info)
return BP_RESULT_BADINPUT;
@@ -650,6 +651,7 @@ static enum bp_result get_ss_info_v4_1(
if (!disp_cntl_tbl)
return BP_RESULT_BADBIOSTABLE;
+
ss_info->type.STEP_AND_DELAY_INFO = false;
ss_info->spread_percentage_divider = 1000;
/* BIOS no longer uses target clock. Always enable for now */
@@ -688,6 +690,19 @@ static enum bp_result get_ss_info_v4_1(
*/
result = BP_RESULT_UNSUPPORTED;
break;
+ case AS_SIGNAL_TYPE_XGMI:
+ smu_info = GET_IMAGE(struct atom_smu_info_v3_3,
+ DATA_TABLES(smu_info));
+ if (!smu_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ ss_info->spread_spectrum_percentage =
+ smu_info->waflclk_ss_percentage;
+ ss_info->spread_spectrum_range =
+ smu_info->gpuclk_ss_rate_10hz * 10;
+ if (smu_info->waflclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
default:
result = BP_RESULT_UNSUPPORTED;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 65b006ad372e..8196f3bb10c7 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -67,6 +67,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
return true;
#endif
case DCE_VERSION_12_0:
+ case DCE_VERSION_12_1:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 3279e26c3440..5fd52094d459 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -62,6 +62,55 @@
const static char DC_BUILD_ID[] = "production-build";
+/**
+ * DOC: Overview
+ *
+ * DC is the OS-agnostic component of the amdgpu DC driver.
+ *
+ * DC maintains and validates a set of structs representing the state of the
+ * driver and writes that state to AMD hardware
+ *
+ * Main DC HW structs:
+ *
+ * struct dc - The central struct. One per driver. Created on driver load,
+ * destroyed on driver unload.
+ *
+ * struct dc_context - One per driver.
+ * Used as a backpointer by most other structs in dc.
+ *
+ * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
+ * plugpoints). Created on driver load, destroyed on driver unload.
+ *
+ * struct dc_sink - One per display. Created on boot or hotplug.
+ * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
+ * (the display directly attached). It may also have one or more remote
+ * sinks (in the Multi-Stream Transport case)
+ *
+ * struct resource_pool - One per driver. Represents the hw blocks not in the
+ * main pipeline. Not directly accessible by dm.
+ *
+ * Main dc state structs:
+ *
+ * These structs can be created and destroyed as needed. There is a full set of
+ * these structs in dc->current_state representing the currently programmed state.
+ *
+ * struct dc_state - The global DC state to track global state information,
+ * such as bandwidth values.
+ *
+ * struct dc_stream_state - Represents the hw configuration for the pipeline from
+ * a framebuffer to a display. Maps one-to-one with dc_sink.
+ *
+ * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
+ * and may have more in the Multi-Plane Overlay case.
+ *
+ * struct resource_context - Represents the programmable state of everything in
+ * the resource_pool. Not directly accessible by dm.
+ *
+ * struct pipe_ctx - A member of struct resource_context. Represents the
+ * internal hardware pipeline components. Each dc_plane_state has either
+ * one or two (in the pipe-split case).
+ */
+
/*******************************************************************************
* Private functions
******************************************************************************/
@@ -102,10 +151,6 @@ static bool create_links(
return false;
}
- if (connectors_num == 0 && num_virtual_links == 0) {
- dm_error("DC: Number of connectors is zero!\n");
- }
-
dm_output_to_console(
"DC: %s: connectors_num: physical:%d, virtual:%d\n",
__func__,
@@ -175,6 +220,17 @@ failed_alloc:
return false;
}
+static struct dc_perf_trace *dc_perf_trace_create(void)
+{
+ return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
+}
+
+static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
+{
+ kfree(*perf_trace);
+ *perf_trace = NULL;
+}
+
/**
*****************************************************************************
* Function: dc_stream_adjust_vmin_vmax
@@ -240,7 +296,7 @@ bool dc_stream_get_crtc_position(struct dc *dc,
}
/**
- * dc_stream_configure_crc: Configure CRC capture for the given stream.
+ * dc_stream_configure_crc() - Configure CRC capture for the given stream.
* @dc: DC Object
* @stream: The stream to configure CRC on.
* @enable: Enable CRC if true, disable otherwise.
@@ -292,7 +348,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
}
/**
- * dc_stream_get_crc: Get CRC values for the given stream.
+ * dc_stream_get_crc() - Get CRC values for the given stream.
* @dc: DC object
* @stream: The DC stream state of the stream to get CRCs from.
* @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
@@ -328,7 +384,7 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream,
enum dc_dither_option option)
{
struct bit_depth_reduction_params params;
- struct dc_link *link = stream->status.link;
+ struct dc_link *link = stream->sink->link;
struct pipe_ctx *pipes = NULL;
int i;
@@ -536,6 +592,8 @@ static void destruct(struct dc *dc)
if (dc->ctx->created_bios)
dal_bios_parser_destroy(&dc->ctx->dc_bios);
+ dc_perf_trace_destroy(&dc->ctx->perf_trace);
+
kfree(dc->ctx);
dc->ctx = NULL;
@@ -659,6 +717,12 @@ static bool construct(struct dc *dc,
goto fail;
}
+ dc_ctx->perf_trace = dc_perf_trace_create();
+ if (!dc_ctx->perf_trace) {
+ ASSERT_CRITICAL(false);
+ goto fail;
+ }
+
/* Create GPIO service */
dc_ctx->gpio_service = dal_gpio_service_create(
dc_version,
@@ -1329,6 +1393,11 @@ static enum surface_update_type check_update_surfaces_for_stream(
return overall_type;
}
+/**
+ * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
+ *
+ * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
+ */
enum surface_update_type dc_check_update_surfaces_for_stream(
struct dc *dc,
struct dc_surface_update *updates,
@@ -1398,7 +1467,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
stream_update->vrr_infopacket ||
- stream_update->vsc_infopacket) {
+ stream_update->vsc_infopacket ||
+ stream_update->vsp_infopacket) {
resource_build_info_frame(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
}
@@ -1409,6 +1479,14 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (stream_update->output_csc_transform)
dc_stream_program_csc_matrix(dc, stream);
+ if (stream_update->dither_option) {
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+ &pipe_ctx->stream->bit_depth_params);
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
+ &stream->bit_depth_params,
+ &stream->clamping);
+ }
+
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
continue;
@@ -1492,9 +1570,6 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
- if (update_type == UPDATE_TYPE_FULL)
- context_timing_trace(dc, &context->res_ctx);
-
// Update Type FAST, Surface updates
if (update_type == UPDATE_TYPE_FAST) {
/* Lock the top pipe while updating plane addrs, since freesync requires
@@ -1631,6 +1706,9 @@ enum dc_irq_source dc_interrupt_to_irq_source(
return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
}
+/**
+ * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
+ */
bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
{
@@ -1686,6 +1764,15 @@ void dc_resume(struct dc *dc)
core_link_resume(dc->links[i]);
}
+bool dc_is_dmcu_initialized(struct dc *dc)
+{
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (dmcu)
+ return dmcu->funcs->is_dmcu_initialized(dmcu);
+ return false;
+}
+
bool dc_submit_i2c(
struct dc *dc,
uint32_t link_index,
@@ -1715,6 +1802,11 @@ static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink
return true;
}
+/**
+ * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
+ *
+ * EDID length is in bytes
+ */
struct dc_sink *dc_link_add_remote_sink(
struct dc_link *link,
const uint8_t *edid,
@@ -1773,6 +1865,12 @@ fail_add_sink:
return NULL;
}
+/**
+ * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
+ *
+ * Note that this just removes the struct dc_sink - it doesn't
+ * program hardware or alter other members of dc_link
+ */
void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
{
int i;
@@ -1810,4 +1908,4 @@ void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx
info->dcfClockDeepSleep = (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz;
info->fClock = (unsigned int)state->bw.dcn.clk.fclk_khz;
info->phyClock = (unsigned int)state->bw.dcn.clk.phyclk_khz;
-} \ No newline at end of file
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 7ee9c033acbd..52deacf39841 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -198,6 +198,13 @@ static bool program_hpd_filter(
return result;
}
+/**
+ * dc_link_detect_sink() - Determine if there is a sink connected
+ *
+ * @type: Returned connection type
+ * Does not detect downstream devices, such as MST sinks
+ * or display connected through active dongles
+ */
bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
{
uint32_t is_hpd_high = 0;
@@ -208,6 +215,9 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
return true;
}
+ if (link->connector_signal == SIGNAL_TYPE_EDP)
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+
/* todo: may need to lock gpio access */
hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
if (hpd_pin == NULL)
@@ -324,15 +334,15 @@ static enum signal_type get_basic_signal_type(
return SIGNAL_TYPE_NONE;
}
-/*
- * @brief
- * Check whether there is a dongle on DP connector
+/**
+ * dc_link_is_dp_sink_present() - Check if there is a native DP
+ * or passive DP-HDMI dongle connected
*/
bool dc_link_is_dp_sink_present(struct dc_link *link)
{
enum gpio_result gpio_result;
uint32_t clock_pin = 0;
-
+ uint8_t retry = 0;
struct ddc *ddc;
enum connector_id connector_id =
@@ -361,11 +371,22 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
return present;
}
- /* Read GPIO: DP sink is present if both clock and data pins are zero */
- /* [anaumov] in DAL2, there was no check for GPIO failure */
-
- gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
- ASSERT(gpio_result == GPIO_RESULT_OK);
+ /*
+ * Read GPIO: DP sink is present if both clock and data pins are zero
+ *
+ * [W/A] plug-unplug DP cable, sometimes customer board has
+ * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI
+ * then monitor can't br light up. Add retry 3 times
+ * But in real passive dongle, it need additional 3ms to detect
+ */
+ do {
+ gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
+ ASSERT(gpio_result == GPIO_RESULT_OK);
+ if (clock_pin)
+ udelay(1000);
+ else
+ break;
+ } while (retry++ < 3);
present = (gpio_result == GPIO_RESULT_OK) && !clock_pin;
@@ -593,6 +614,14 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
}
+/**
+ * dc_link_detect() - Detect if a sink is attached to a given link
+ *
+ * link->local_sink is created or destroyed as needed.
+ *
+ * This does not create remote sinks but will trigger DM
+ * to start MST detection if a branch is detected.
+ */
bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
{
struct dc_sink_init_data sink_init_data = { 0 };
@@ -688,12 +717,26 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps)))
same_dpcd = false;
}
- /* Active dongle downstream unplug */
+ /* Active dongle plug in without display or downstream unplug*/
if (link->type == dc_connection_active_dongle
&& link->dpcd_caps.sink_count.
bits.SINK_COUNT == 0) {
- if (prev_sink != NULL)
+ if (prev_sink != NULL) {
+ /* Downstream unplug */
dc_sink_release(prev_sink);
+ } else {
+ /* Empty dongle plug in */
+ for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
+ int fail_count = 0;
+
+ dp_verify_link_cap(link,
+ &link->reported_link_cap,
+ &fail_count);
+
+ if (fail_count == 0)
+ break;
+ }
+ }
return true;
}
@@ -1396,8 +1439,6 @@ static enum dc_status enable_link_dp(
else
status = DC_FAIL_DP_LINK_TRAINING;
- enable_stream_features(pipe_ctx);
-
return status;
}
@@ -2175,11 +2216,11 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
backlight_pwm_u16_16, backlight_pwm_u16_16);
if (dc_is_embedded_signal(link->connector_signal)) {
- if (stream != NULL) {
- for (i = 0; i < MAX_PIPES; i++) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) {
if (core_dc->current_state->res_ctx.
- pipe_ctx[i].stream
- == stream)
+ pipe_ctx[i].stream->sink->link
+ == link)
/* DMCU -1 for all controller id values,
* therefore +1 here
*/
@@ -2218,7 +2259,7 @@ bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
struct dc *core_dc = link->ctx->dc;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
- if (dmcu != NULL && link->psr_enabled)
+ if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_enabled)
dmcu->funcs->set_psr_enable(dmcu, enable, wait);
return true;
@@ -2594,6 +2635,9 @@ void core_link_enable_stream(
core_dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->sink->link->cur_link_settings);
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ enable_stream_features(pipe_ctx);
+
dc_link_set_backlight_level(pipe_ctx->stream->sink->link,
pipe_ctx->stream->bl_pwm_level,
0,
@@ -2606,11 +2650,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
{
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ core_dc->hwss.blank_stream(pipe_ctx);
+
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
- core_dc->hwss.blank_stream(pipe_ctx);
-
core_dc->hwss.disable_stream(pipe_ctx, option);
disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index d91df5ef0cb3..0caacb60b02f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1089,6 +1089,121 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
return max_link_cap;
}
+static enum dc_status read_hpd_rx_irq_data(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data)
+{
+ static enum dc_status retval;
+
+ /* The HW reads 16 bytes from 200h on HPD,
+ * but if we get an AUX_DEFER, the HW cannot retry
+ * and this causes the CTS tests 4.3.2.1 - 3.2.4 to
+ * fail, so we now explicitly read 6 bytes which is
+ * the req from the above mentioned test cases.
+ *
+ * For DP 1.4 we need to read those from 2002h range.
+ */
+ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14)
+ retval = core_link_read_dpcd(
+ link,
+ DP_SINK_COUNT,
+ irq_data->raw,
+ sizeof(union hpd_irq_data));
+ else {
+ /* Read 14 bytes in a single read and then copy only the required fields.
+ * This is more efficient than doing it in two separate AUX reads. */
+
+ uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
+
+ retval = core_link_read_dpcd(
+ link,
+ DP_SINK_COUNT_ESI,
+ tmp,
+ sizeof(tmp));
+
+ if (retval != DC_OK)
+ return retval;
+
+ irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
+ }
+
+ return retval;
+}
+
+static bool hpd_rx_irq_check_link_loss_status(
+ struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data)
+{
+ uint8_t irq_reg_rx_power_state = 0;
+ enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
+ union lane_status lane_status;
+ uint32_t lane;
+ bool sink_status_changed;
+ bool return_code;
+
+ sink_status_changed = false;
+ return_code = false;
+
+ if (link->cur_link_settings.lane_count == 0)
+ return return_code;
+
+ /*1. Check that Link Status changed, before re-training.*/
+
+ /*parse lane status*/
+ for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
+ /* check status of lanes 0,1
+ * changed DpcdAddress_Lane01Status (0x202)
+ */
+ lane_status.raw = get_nibble_at_index(
+ &hpd_irq_dpcd_data->bytes.lane01_status.raw,
+ lane);
+
+ if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+ !lane_status.bits.CR_DONE_0 ||
+ !lane_status.bits.SYMBOL_LOCKED_0) {
+ /* if one of the channel equalization, clock
+ * recovery or symbol lock is dropped
+ * consider it as (link has been
+ * dropped) dp sink status has changed
+ */
+ sink_status_changed = true;
+ break;
+ }
+ }
+
+ /* Check interlane align.*/
+ if (sink_status_changed ||
+ !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
+
+ DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__);
+
+ return_code = true;
+
+ /*2. Check that we can handle interrupt: Not in FS DOS,
+ * Not in "Display Timeout" state, Link is trained.
+ */
+ dpcd_result = core_link_read_dpcd(link,
+ DP_SET_POWER,
+ &irq_reg_rx_power_state,
+ sizeof(irq_reg_rx_power_state));
+
+ if (dpcd_result != DC_OK) {
+ DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n",
+ __func__);
+ } else {
+ if (irq_reg_rx_power_state != DP_SET_POWER_D0)
+ return_code = false;
+ }
+ }
+
+ return return_code;
+}
+
bool dp_verify_link_cap(
struct dc_link *link,
struct dc_link_settings *known_limit_link_setting,
@@ -1104,12 +1219,14 @@ bool dp_verify_link_cap(
struct clock_source *dp_cs;
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
enum link_training_result status;
+ union hpd_irq_data irq_data;
if (link->dc->debug.skip_detection_link_training) {
link->verified_link_cap = *known_limit_link_setting;
return true;
}
+ memset(&irq_data, 0, sizeof(irq_data));
success = false;
skip_link_training = false;
@@ -1168,9 +1285,15 @@ bool dp_verify_link_cap(
(*fail_count)++;
}
- if (success)
+ if (success) {
link->verified_link_cap = *cur;
-
+ udelay(1000);
+ if (read_hpd_rx_irq_data(link, &irq_data) == DC_OK)
+ if (hpd_rx_irq_check_link_loss_status(
+ link,
+ &irq_data))
+ (*fail_count)++;
+ }
/* always disable the link before trying another
* setting or before returning we'll enable it later
* based on the actual mode we're driving
@@ -1572,122 +1695,6 @@ void decide_link_settings(struct dc_stream_state *stream,
}
/*************************Short Pulse IRQ***************************/
-
-static bool hpd_rx_irq_check_link_loss_status(
- struct dc_link *link,
- union hpd_irq_data *hpd_irq_dpcd_data)
-{
- uint8_t irq_reg_rx_power_state = 0;
- enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
- union lane_status lane_status;
- uint32_t lane;
- bool sink_status_changed;
- bool return_code;
-
- sink_status_changed = false;
- return_code = false;
-
- if (link->cur_link_settings.lane_count == 0)
- return return_code;
-
- /*1. Check that Link Status changed, before re-training.*/
-
- /*parse lane status*/
- for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
- /* check status of lanes 0,1
- * changed DpcdAddress_Lane01Status (0x202)
- */
- lane_status.raw = get_nibble_at_index(
- &hpd_irq_dpcd_data->bytes.lane01_status.raw,
- lane);
-
- if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
- !lane_status.bits.CR_DONE_0 ||
- !lane_status.bits.SYMBOL_LOCKED_0) {
- /* if one of the channel equalization, clock
- * recovery or symbol lock is dropped
- * consider it as (link has been
- * dropped) dp sink status has changed
- */
- sink_status_changed = true;
- break;
- }
- }
-
- /* Check interlane align.*/
- if (sink_status_changed ||
- !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
-
- DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__);
-
- return_code = true;
-
- /*2. Check that we can handle interrupt: Not in FS DOS,
- * Not in "Display Timeout" state, Link is trained.
- */
- dpcd_result = core_link_read_dpcd(link,
- DP_SET_POWER,
- &irq_reg_rx_power_state,
- sizeof(irq_reg_rx_power_state));
-
- if (dpcd_result != DC_OK) {
- DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n",
- __func__);
- } else {
- if (irq_reg_rx_power_state != DP_SET_POWER_D0)
- return_code = false;
- }
- }
-
- return return_code;
-}
-
-static enum dc_status read_hpd_rx_irq_data(
- struct dc_link *link,
- union hpd_irq_data *irq_data)
-{
- static enum dc_status retval;
-
- /* The HW reads 16 bytes from 200h on HPD,
- * but if we get an AUX_DEFER, the HW cannot retry
- * and this causes the CTS tests 4.3.2.1 - 3.2.4 to
- * fail, so we now explicitly read 6 bytes which is
- * the req from the above mentioned test cases.
- *
- * For DP 1.4 we need to read those from 2002h range.
- */
- if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14)
- retval = core_link_read_dpcd(
- link,
- DP_SINK_COUNT,
- irq_data->raw,
- sizeof(union hpd_irq_data));
- else {
- /* Read 14 bytes in a single read and then copy only the required fields.
- * This is more efficient than doing it in two separate AUX reads. */
-
- uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
-
- retval = core_link_read_dpcd(
- link,
- DP_SINK_COUNT_ESI,
- tmp,
- sizeof(tmp));
-
- if (retval != DC_OK)
- return retval;
-
- irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
- irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
- }
-
- return retval;
-}
-
static bool allow_hpd_rx_irq(const struct dc_link *link)
{
/*
@@ -2196,7 +2203,7 @@ static void get_active_converter_info(
}
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) {
- uint8_t det_caps[4];
+ uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/
union dwnstream_port_caps_byte0 *port_caps =
(union dwnstream_port_caps_byte0 *)det_caps;
core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0,
@@ -2240,7 +2247,8 @@ static void get_active_converter_info(
translate_dpcd_max_bpc(
hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
- link->dpcd_caps.dongle_caps.extendedCapValid = true;
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk != 0)
+ link->dpcd_caps.dongle_caps.extendedCapValid = true;
}
break;
@@ -2371,11 +2379,22 @@ static bool retrieve_link_cap(struct dc_link *link)
dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
if (aux_rd_interval.bits.EXT_RECIEVER_CAP_FIELD_PRESENT == 1) {
- core_link_read_dpcd(
+ uint8_t ext_cap_data[16];
+
+ memset(ext_cap_data, '\0', sizeof(ext_cap_data));
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
link,
DP_DP13_DPCD_REV,
- dpcd_data,
- sizeof(dpcd_data));
+ ext_cap_data,
+ sizeof(ext_cap_data));
+ if (status == DC_OK) {
+ memcpy(dpcd_data, ext_cap_data, sizeof(dpcd_data));
+ break;
+ }
+ }
+ if (status != DC_OK)
+ dm_error("%s: Read extend caps data failed, use cap from dpcd 0.\n", __func__);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 82cd1d6e6e59..0065ec7d5330 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -96,6 +96,7 @@ void dp_enable_link_phy(
link_settings,
clock_source);
}
+ link->cur_link_settings = *link_settings;
dp_receiver_power_ctrl(link, true);
}
@@ -307,6 +308,7 @@ void dp_retrain_link_dp_test(struct dc_link *link,
link->link_enc,
link_setting,
pipes[i].clock_source->id);
+ link->cur_link_settings = *link_setting;
dp_receiver_power_ctrl(link, true);
@@ -316,7 +318,6 @@ void dp_retrain_link_dp_test(struct dc_link *link,
skip_video_pattern,
LINK_TRAINING_ATTEMPTS);
- link->cur_link_settings = *link_setting;
link->dc->hwss.enable_stream(&pipes[i]);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index fc65b0055167..76137df74a53 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -83,7 +83,10 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
dc_version = DCE_VERSION_11_22;
break;
case FAMILY_AI:
- dc_version = DCE_VERSION_12_0;
+ if (ASICREV_IS_VEGA20_P(asic_id.hw_internal_rev))
+ dc_version = DCE_VERSION_12_1;
+ else
+ dc_version = DCE_VERSION_12_0;
break;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case FAMILY_RV:
@@ -136,6 +139,7 @@ struct resource_pool *dc_create_resource_pool(
num_virtual_links, dc);
break;
case DCE_VERSION_12_0:
+ case DCE_VERSION_12_1:
res_pool = dce120_create_resource_pool(
num_virtual_links, dc);
break;
@@ -478,10 +482,29 @@ static enum pixel_format convert_pixel_format_to_dalsurface(
return dal_pixel_format;
}
-static void rect_swap_helper(struct rect *rect)
+static inline void get_vp_scan_direction(
+ enum dc_rotation_angle rotation,
+ bool horizontal_mirror,
+ bool *orthogonal_rotation,
+ bool *flip_vert_scan_dir,
+ bool *flip_horz_scan_dir)
{
- swap(rect->height, rect->width);
- swap(rect->x, rect->y);
+ *orthogonal_rotation = false;
+ *flip_vert_scan_dir = false;
+ *flip_horz_scan_dir = false;
+ if (rotation == ROTATION_ANGLE_180) {
+ *flip_vert_scan_dir = true;
+ *flip_horz_scan_dir = true;
+ } else if (rotation == ROTATION_ANGLE_90) {
+ *orthogonal_rotation = true;
+ *flip_horz_scan_dir = true;
+ } else if (rotation == ROTATION_ANGLE_270) {
+ *orthogonal_rotation = true;
+ *flip_vert_scan_dir = true;
+ }
+
+ if (horizontal_mirror)
+ *flip_horz_scan_dir = !*flip_horz_scan_dir;
}
static void calculate_viewport(struct pipe_ctx *pipe_ctx)
@@ -490,33 +513,14 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
const struct dc_stream_state *stream = pipe_ctx->stream;
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect surf_src = plane_state->src_rect;
- struct rect clip = { 0 };
+ struct rect clip, dest;
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
bool pri_split = pipe_ctx->bottom_pipe &&
pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
bool sec_split = pipe_ctx->top_pipe &&
pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
- bool flip_vert_scan_dir = false, flip_horz_scan_dir = false;
-
-
- /*
- * We need take horizontal mirror into account. On an unrotated surface this means
- * that the viewport offset is actually the offset from the other side of source
- * image so we have to subtract the right edge of the viewport from the right edge of
- * the source window. Similar to mirror we need to take into account how offset is
- * affected for 270/180 rotations
- */
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) {
- flip_vert_scan_dir = true;
- flip_horz_scan_dir = true;
- } else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90)
- flip_vert_scan_dir = true;
- else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
- flip_horz_scan_dir = true;
-
- if (pipe_ctx->plane_state->horizontal_mirror)
- flip_horz_scan_dir = !flip_horz_scan_dir;
+ bool orthogonal_rotation, flip_y_start, flip_x_start;
if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
@@ -524,13 +528,10 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
sec_split = false;
}
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
- pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
- rect_swap_helper(&surf_src);
-
/* The actual clip is an intersection between stream
* source and surface clip
*/
+ dest = plane_state->dst_rect;
clip.x = stream->src.x > plane_state->clip_rect.x ?
stream->src.x : plane_state->clip_rect.x;
@@ -547,66 +548,77 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
stream->src.y + stream->src.height - clip.y :
plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ;
- /* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio
- * note: surf_src.ofs should be added after rotation/mirror offset direction
- * adjustment since it is already in viewport space
- * num_pixels = clip.num_pix * scl_ratio
+ /*
+ * Need to calculate how scan origin is shifted in vp space
+ * to correctly rotate clip and dst
*/
- data->viewport.x = (clip.x - plane_state->dst_rect.x) *
- surf_src.width / plane_state->dst_rect.width;
- data->viewport.width = clip.width *
- surf_src.width / plane_state->dst_rect.width;
-
- data->viewport.y = (clip.y - plane_state->dst_rect.y) *
- surf_src.height / plane_state->dst_rect.height;
- data->viewport.height = clip.height *
- surf_src.height / plane_state->dst_rect.height;
+ get_vp_scan_direction(
+ plane_state->rotation,
+ plane_state->horizontal_mirror,
+ &orthogonal_rotation,
+ &flip_y_start,
+ &flip_x_start);
- if (flip_vert_scan_dir)
- data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
- if (flip_horz_scan_dir)
- data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
+ if (orthogonal_rotation) {
+ swap(clip.x, clip.y);
+ swap(clip.width, clip.height);
+ swap(dest.x, dest.y);
+ swap(dest.width, dest.height);
+ }
+ if (flip_x_start) {
+ clip.x = dest.x + dest.width - clip.x - clip.width;
+ dest.x = 0;
+ }
+ if (flip_y_start) {
+ clip.y = dest.y + dest.height - clip.y - clip.height;
+ dest.y = 0;
+ }
- data->viewport.x += surf_src.x;
- data->viewport.y += surf_src.y;
+ /* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio
+ * num_pixels = clip.num_pix * scl_ratio
+ */
+ data->viewport.x = surf_src.x + (clip.x - dest.x) * surf_src.width / dest.width;
+ data->viewport.width = clip.width * surf_src.width / dest.width;
+
+ data->viewport.y = surf_src.y + (clip.y - dest.y) * surf_src.height / dest.height;
+ data->viewport.height = clip.height * surf_src.height / dest.height;
+
+ /* Handle split */
+ if (pri_split || sec_split) {
+ if (orthogonal_rotation) {
+ if (flip_y_start != pri_split)
+ data->viewport.height /= 2;
+ else {
+ data->viewport.y += data->viewport.height / 2;
+ /* Ceil offset pipe */
+ data->viewport.height = (data->viewport.height + 1) / 2;
+ }
+ } else {
+ if (flip_x_start != pri_split)
+ data->viewport.width /= 2;
+ else {
+ data->viewport.x += data->viewport.width / 2;
+ /* Ceil offset pipe */
+ data->viewport.width = (data->viewport.width + 1) / 2;
+ }
+ }
+ }
/* Round down, compensate in init */
data->viewport_c.x = data->viewport.x / vpc_div;
data->viewport_c.y = data->viewport.y / vpc_div;
- data->inits.h_c = (data->viewport.x % vpc_div) != 0 ?
- dc_fixpt_half : dc_fixpt_zero;
- data->inits.v_c = (data->viewport.y % vpc_div) != 0 ?
- dc_fixpt_half : dc_fixpt_zero;
+ data->inits.h_c = (data->viewport.x % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero;
+ data->inits.v_c = (data->viewport.y % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero;
+
/* Round up, assume original video size always even dimensions */
data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
-
- /* Handle hsplit */
- if (sec_split) {
- data->viewport.x += data->viewport.width / 2;
- data->viewport_c.x += data->viewport_c.width / 2;
- /* Ceil offset pipe */
- data->viewport.width = (data->viewport.width + 1) / 2;
- data->viewport_c.width = (data->viewport_c.width + 1) / 2;
- } else if (pri_split) {
- if (data->viewport.width > 1)
- data->viewport.width /= 2;
- if (data->viewport_c.width > 1)
- data->viewport_c.width /= 2;
- }
-
- if (plane_state->rotation == ROTATION_ANGLE_90 ||
- plane_state->rotation == ROTATION_ANGLE_270) {
- rect_swap_helper(&data->viewport_c);
- rect_swap_helper(&data->viewport);
- }
}
-static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
+static void calculate_recout(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream;
- struct rect surf_src = plane_state->src_rect;
struct rect surf_clip = plane_state->clip_rect;
bool pri_split = pipe_ctx->bottom_pipe &&
pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
@@ -614,10 +626,6 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full
pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
bool top_bottom_split = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
- pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
- rect_swap_helper(&surf_src);
-
pipe_ctx->plane_res.scl_data.recout.x = stream->dst.x;
if (stream->src.x < surf_clip.x)
pipe_ctx->plane_res.scl_data.recout.x += (surf_clip.x
@@ -646,7 +654,7 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full
stream->dst.y + stream->dst.height
- pipe_ctx->plane_res.scl_data.recout.y;
- /* Handle h & vsplit */
+ /* Handle h & v split, handle rotation using viewport */
if (sec_split && top_bottom_split) {
pipe_ctx->plane_res.scl_data.recout.y +=
pipe_ctx->plane_res.scl_data.recout.height / 2;
@@ -655,44 +663,14 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full
(pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
} else if (pri_split && top_bottom_split)
pipe_ctx->plane_res.scl_data.recout.height /= 2;
- else if (pri_split || sec_split) {
- /* HMirror XOR Secondary_pipe XOR Rotation_180 */
- bool right_view = (sec_split != plane_state->horizontal_mirror) !=
- (plane_state->rotation == ROTATION_ANGLE_180);
-
- if (plane_state->rotation == ROTATION_ANGLE_90
- || plane_state->rotation == ROTATION_ANGLE_270)
- /* Secondary_pipe XOR Rotation_270 */
- right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
-
- if (right_view) {
- pipe_ctx->plane_res.scl_data.recout.x +=
- pipe_ctx->plane_res.scl_data.recout.width / 2;
- /* Ceil offset pipe */
- pipe_ctx->plane_res.scl_data.recout.width =
- (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
- } else {
- if (pipe_ctx->plane_res.scl_data.recout.width > 1)
- pipe_ctx->plane_res.scl_data.recout.width /= 2;
- }
- }
- /* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset)
- * * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
- * ratio)
- */
- recout_full->x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
- * stream->dst.width / stream->src.width -
- surf_src.x * plane_state->dst_rect.width / surf_src.width
- * stream->dst.width / stream->src.width;
- recout_full->y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
- * stream->dst.height / stream->src.height -
- surf_src.y * plane_state->dst_rect.height / surf_src.height
- * stream->dst.height / stream->src.height;
-
- recout_full->width = plane_state->dst_rect.width
- * stream->dst.width / stream->src.width;
- recout_full->height = plane_state->dst_rect.height
- * stream->dst.height / stream->src.height;
+ else if (sec_split) {
+ pipe_ctx->plane_res.scl_data.recout.x +=
+ pipe_ctx->plane_res.scl_data.recout.width / 2;
+ /* Ceil offset pipe */
+ pipe_ctx->plane_res.scl_data.recout.width =
+ (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
+ } else if (pri_split)
+ pipe_ctx->plane_res.scl_data.recout.width /= 2;
}
static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -705,9 +683,10 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
const int out_w = stream->dst.width;
const int out_h = stream->dst.height;
+ /*Swap surf_src height and width since scaling ratios are in recout rotation*/
if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
- rect_swap_helper(&surf_src);
+ swap(surf_src.height, surf_src.width);
pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_from_fraction(
surf_src.width,
@@ -744,351 +723,202 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.ratios.vert_c, 19);
}
-static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
+static inline void adjust_vp_and_init_for_seamless_clip(
+ bool flip_scan_dir,
+ int recout_skip,
+ int src_size,
+ int taps,
+ struct fixed31_32 ratio,
+ struct fixed31_32 *init,
+ int *vp_offset,
+ int *vp_size)
{
- struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
- struct rect src = pipe_ctx->plane_state->src_rect;
- int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
- || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
- bool flip_vert_scan_dir = false, flip_horz_scan_dir = false;
-
- /*
- * Need to calculate the scan direction for viewport to make adjustments
- */
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) {
- flip_vert_scan_dir = true;
- flip_horz_scan_dir = true;
- } else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90)
- flip_vert_scan_dir = true;
- else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
- flip_horz_scan_dir = true;
-
- if (pipe_ctx->plane_state->horizontal_mirror)
- flip_horz_scan_dir = !flip_horz_scan_dir;
-
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
- pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
- rect_swap_helper(&src);
- rect_swap_helper(&data->viewport_c);
- rect_swap_helper(&data->viewport);
- }
-
- /*
- * Init calculated according to formula:
- * init = (scaling_ratio + number_of_taps + 1) / 2
- * init_bot = init + scaling_ratio
- * init_c = init + truncated_vp_c_offset(from calculate viewport)
- */
- data->inits.h = dc_fixpt_truncate(dc_fixpt_div_int(
- dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2), 19);
-
- data->inits.h_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int(
- dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)), 19);
-
- data->inits.v = dc_fixpt_truncate(dc_fixpt_div_int(
- dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2), 19);
-
- data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
- dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
-
- if (!flip_horz_scan_dir) {
+ if (!flip_scan_dir) {
/* Adjust for viewport end clip-off */
- if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
- int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.h, data->ratios.horz));
+ if ((*vp_offset + *vp_size) < src_size) {
+ int vp_clip = src_size - *vp_size - *vp_offset;
+ int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio));
int_part = int_part > 0 ? int_part : 0;
- data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
- }
- if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
- int vp_clip = (src.x + src.width) / vpc_div -
- data->viewport_c.width - data->viewport_c.x;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
+ *vp_size += int_part < vp_clip ? int_part : vp_clip;
}
/* Adjust for non-0 viewport offset */
- if (data->viewport.x) {
+ if (*vp_offset) {
int int_part;
- data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
- data->ratios.horz, data->recout.x - recout_full->x));
- int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
- if (int_part < data->taps.h_taps) {
- int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
- (data->taps.h_taps - int_part) : data->viewport.x;
- data->viewport.x -= int_adj;
- data->viewport.width += int_adj;
+ *init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip));
+ int_part = dc_fixpt_floor(*init) - *vp_offset;
+ if (int_part < taps) {
+ int int_adj = *vp_offset >= (taps - int_part) ?
+ (taps - int_part) : *vp_offset;
+ *vp_offset -= int_adj;
+ *vp_size += int_adj;
int_part += int_adj;
- } else if (int_part > data->taps.h_taps) {
- data->viewport.x += int_part - data->taps.h_taps;
- data->viewport.width -= int_part - data->taps.h_taps;
- int_part = data->taps.h_taps;
+ } else if (int_part > taps) {
+ *vp_offset += int_part - taps;
+ *vp_size -= int_part - taps;
+ int_part = taps;
}
- data->inits.h.value &= 0xffffffff;
- data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
- }
-
- if (data->viewport_c.x) {
- int int_part;
-
- data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
- data->ratios.horz_c, data->recout.x - recout_full->x));
- int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
- if (int_part < data->taps.h_taps_c) {
- int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
- (data->taps.h_taps_c - int_part) : data->viewport_c.x;
- data->viewport_c.x -= int_adj;
- data->viewport_c.width += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.h_taps_c) {
- data->viewport_c.x += int_part - data->taps.h_taps_c;
- data->viewport_c.width -= int_part - data->taps.h_taps_c;
- int_part = data->taps.h_taps_c;
- }
- data->inits.h_c.value &= 0xffffffff;
- data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
+ init->value &= 0xffffffff;
+ *init = dc_fixpt_add_int(*init, int_part);
}
} else {
/* Adjust for non-0 viewport offset */
- if (data->viewport.x) {
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.h, data->ratios.horz));
+ if (*vp_offset) {
+ int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio));
int_part = int_part > 0 ? int_part : 0;
- data->viewport.width += int_part < data->viewport.x ? int_part : data->viewport.x;
- data->viewport.x -= int_part < data->viewport.x ? int_part : data->viewport.x;
- }
- if (data->viewport_c.x) {
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport_c.width += int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
- data->viewport_c.x -= int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
+ *vp_size += int_part < *vp_offset ? int_part : *vp_offset;
+ *vp_offset -= int_part < *vp_offset ? int_part : *vp_offset;
}
/* Adjust for viewport end clip-off */
- if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
- int int_part;
- int end_offset = src.x + src.width
- - data->viewport.x - data->viewport.width;
-
- /*
- * this is init if vp had no offset, keep in mind this is from the
- * right side of vp due to scan direction
- */
- data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
- data->ratios.horz, data->recout.x - recout_full->x));
- /*
- * this is the difference between first pixel of viewport available to read
- * and init position, takning into account scan direction
- */
- int_part = dc_fixpt_floor(data->inits.h) - end_offset;
- if (int_part < data->taps.h_taps) {
- int int_adj = end_offset >= (data->taps.h_taps - int_part) ?
- (data->taps.h_taps - int_part) : end_offset;
- data->viewport.width += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.h_taps) {
- data->viewport.width += int_part - data->taps.h_taps;
- int_part = data->taps.h_taps;
- }
- data->inits.h.value &= 0xffffffff;
- data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
- }
-
- if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
+ if ((*vp_offset + *vp_size) < src_size) {
int int_part;
- int end_offset = (src.x + src.width) / vpc_div
- - data->viewport_c.x - data->viewport_c.width;
+ int end_offset = src_size - *vp_offset - *vp_size;
/*
* this is init if vp had no offset, keep in mind this is from the
* right side of vp due to scan direction
*/
- data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
- data->ratios.horz_c, data->recout.x - recout_full->x));
+ *init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip));
/*
* this is the difference between first pixel of viewport available to read
* and init position, takning into account scan direction
*/
- int_part = dc_fixpt_floor(data->inits.h_c) - end_offset;
- if (int_part < data->taps.h_taps_c) {
- int int_adj = end_offset >= (data->taps.h_taps_c - int_part) ?
- (data->taps.h_taps_c - int_part) : end_offset;
- data->viewport_c.width += int_adj;
+ int_part = dc_fixpt_floor(*init) - end_offset;
+ if (int_part < taps) {
+ int int_adj = end_offset >= (taps - int_part) ?
+ (taps - int_part) : end_offset;
+ *vp_size += int_adj;
int_part += int_adj;
- } else if (int_part > data->taps.h_taps_c) {
- data->viewport_c.width += int_part - data->taps.h_taps_c;
- int_part = data->taps.h_taps_c;
+ } else if (int_part > taps) {
+ *vp_size += int_part - taps;
+ int_part = taps;
}
- data->inits.h_c.value &= 0xffffffff;
- data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
+ init->value &= 0xffffffff;
+ *init = dc_fixpt_add_int(*init, int_part);
}
-
}
- if (!flip_vert_scan_dir) {
- /* Adjust for viewport end clip-off */
- if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
- int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.v, data->ratios.vert));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
- }
- if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
- int vp_clip = (src.y + src.height) / vpc_div -
- data->viewport_c.height - data->viewport_c.y;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
- }
-
- /* Adjust for non-0 viewport offset */
- if (data->viewport.y) {
- int int_part;
-
- data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
- data->ratios.vert, data->recout.y - recout_full->y));
- int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
- if (int_part < data->taps.v_taps) {
- int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
- (data->taps.v_taps - int_part) : data->viewport.y;
- data->viewport.y -= int_adj;
- data->viewport.height += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.v_taps) {
- data->viewport.y += int_part - data->taps.v_taps;
- data->viewport.height -= int_part - data->taps.v_taps;
- int_part = data->taps.v_taps;
- }
- data->inits.v.value &= 0xffffffff;
- data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
- }
-
- if (data->viewport_c.y) {
- int int_part;
+}
- data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
- data->ratios.vert_c, data->recout.y - recout_full->y));
- int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
- if (int_part < data->taps.v_taps_c) {
- int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
- (data->taps.v_taps_c - int_part) : data->viewport_c.y;
- data->viewport_c.y -= int_adj;
- data->viewport_c.height += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.v_taps_c) {
- data->viewport_c.y += int_part - data->taps.v_taps_c;
- data->viewport_c.height -= int_part - data->taps.v_taps_c;
- int_part = data->taps.v_taps_c;
- }
- data->inits.v_c.value &= 0xffffffff;
- data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
- }
- } else {
- /* Adjust for non-0 viewport offset */
- if (data->viewport.y) {
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.v, data->ratios.vert));
+static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
+{
+ const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
+ struct rect src = pipe_ctx->plane_state->src_rect;
+ int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v;
+ int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
+ || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
+ bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
- int_part = int_part > 0 ? int_part : 0;
- data->viewport.height += int_part < data->viewport.y ? int_part : data->viewport.y;
- data->viewport.y -= int_part < data->viewport.y ? int_part : data->viewport.y;
- }
- if (data->viewport_c.y) {
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
+ /*
+ * Need to calculate the scan direction for viewport to make adjustments
+ */
+ get_vp_scan_direction(
+ plane_state->rotation,
+ plane_state->horizontal_mirror,
+ &orthogonal_rotation,
+ &flip_vert_scan_dir,
+ &flip_horz_scan_dir);
+
+ /* Calculate src rect rotation adjusted to recout space */
+ surf_size_h = src.x + src.width;
+ surf_size_v = src.y + src.height;
+ if (flip_horz_scan_dir)
+ src.x = 0;
+ if (flip_vert_scan_dir)
+ src.y = 0;
+ if (orthogonal_rotation) {
+ swap(src.x, src.y);
+ swap(src.width, src.height);
+ }
- int_part = int_part > 0 ? int_part : 0;
- data->viewport_c.height += int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
- data->viewport_c.y -= int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
- }
+ /* Recout matching initial vp offset = recout_offset - (stream dst offset +
+ * ((surf dst offset - stream src offset) * 1/ stream scaling ratio)
+ * - (surf surf_src offset * 1/ full scl ratio))
+ */
+ recout_skip_h = data->recout.x - (stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+ * stream->dst.width / stream->src.width -
+ src.x * plane_state->dst_rect.width / src.width
+ * stream->dst.width / stream->src.width);
+ recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+ * stream->dst.height / stream->src.height -
+ src.y * plane_state->dst_rect.height / src.height
+ * stream->dst.height / stream->src.height);
+ if (orthogonal_rotation)
+ swap(recout_skip_h, recout_skip_v);
+ /*
+ * Init calculated according to formula:
+ * init = (scaling_ratio + number_of_taps + 1) / 2
+ * init_bot = init + scaling_ratio
+ * init_c = init + truncated_vp_c_offset(from calculate viewport)
+ */
+ data->inits.h = dc_fixpt_truncate(dc_fixpt_div_int(
+ dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2), 19);
- /* Adjust for viewport end clip-off */
- if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
- int int_part;
- int end_offset = src.y + src.height
- - data->viewport.y - data->viewport.height;
+ data->inits.h_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int(
+ dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)), 19);
- /*
- * this is init if vp had no offset, keep in mind this is from the
- * right side of vp due to scan direction
- */
- data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
- data->ratios.vert, data->recout.y - recout_full->y));
- /*
- * this is the difference between first pixel of viewport available to read
- * and init position, taking into account scan direction
- */
- int_part = dc_fixpt_floor(data->inits.v) - end_offset;
- if (int_part < data->taps.v_taps) {
- int int_adj = end_offset >= (data->taps.v_taps - int_part) ?
- (data->taps.v_taps - int_part) : end_offset;
- data->viewport.height += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.v_taps) {
- data->viewport.height += int_part - data->taps.v_taps;
- int_part = data->taps.v_taps;
- }
- data->inits.v.value &= 0xffffffff;
- data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
- }
+ data->inits.v = dc_fixpt_truncate(dc_fixpt_div_int(
+ dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2), 19);
- if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
- int int_part;
- int end_offset = (src.y + src.height) / vpc_div
- - data->viewport_c.y - data->viewport_c.height;
+ data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
+ dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
- /*
- * this is init if vp had no offset, keep in mind this is from the
- * right side of vp due to scan direction
- */
- data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
- data->ratios.vert_c, data->recout.y - recout_full->y));
- /*
- * this is the difference between first pixel of viewport available to read
- * and init position, taking into account scan direction
- */
- int_part = dc_fixpt_floor(data->inits.v_c) - end_offset;
- if (int_part < data->taps.v_taps_c) {
- int int_adj = end_offset >= (data->taps.v_taps_c - int_part) ?
- (data->taps.v_taps_c - int_part) : end_offset;
- data->viewport_c.height += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.v_taps_c) {
- data->viewport_c.height += int_part - data->taps.v_taps_c;
- int_part = data->taps.v_taps_c;
- }
- data->inits.v_c.value &= 0xffffffff;
- data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
- }
- }
+ /*
+ * Taps, inits and scaling ratios are in recout space need to rotate
+ * to viewport rotation before adjustment
+ */
+ adjust_vp_and_init_for_seamless_clip(
+ flip_horz_scan_dir,
+ recout_skip_h,
+ surf_size_h,
+ orthogonal_rotation ? data->taps.v_taps : data->taps.h_taps,
+ orthogonal_rotation ? data->ratios.vert : data->ratios.horz,
+ orthogonal_rotation ? &data->inits.v : &data->inits.h,
+ &data->viewport.x,
+ &data->viewport.width);
+ adjust_vp_and_init_for_seamless_clip(
+ flip_horz_scan_dir,
+ recout_skip_h,
+ surf_size_h / vpc_div,
+ orthogonal_rotation ? data->taps.v_taps_c : data->taps.h_taps_c,
+ orthogonal_rotation ? data->ratios.vert_c : data->ratios.horz_c,
+ orthogonal_rotation ? &data->inits.v_c : &data->inits.h_c,
+ &data->viewport_c.x,
+ &data->viewport_c.width);
+ adjust_vp_and_init_for_seamless_clip(
+ flip_vert_scan_dir,
+ recout_skip_v,
+ surf_size_v,
+ orthogonal_rotation ? data->taps.h_taps : data->taps.v_taps,
+ orthogonal_rotation ? data->ratios.horz : data->ratios.vert,
+ orthogonal_rotation ? &data->inits.h : &data->inits.v,
+ &data->viewport.y,
+ &data->viewport.height);
+ adjust_vp_and_init_for_seamless_clip(
+ flip_vert_scan_dir,
+ recout_skip_v,
+ surf_size_v / vpc_div,
+ orthogonal_rotation ? data->taps.h_taps_c : data->taps.v_taps_c,
+ orthogonal_rotation ? data->ratios.horz_c : data->ratios.vert_c,
+ orthogonal_rotation ? &data->inits.h_c : &data->inits.v_c,
+ &data->viewport_c.y,
+ &data->viewport_c.height);
/* Interlaced inits based on final vert inits */
data->inits.v_bot = dc_fixpt_add(data->inits.v, data->ratios.vert);
data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
- pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
- rect_swap_helper(&data->viewport_c);
- rect_swap_helper(&data->viewport);
- }
}
bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
- struct rect recout_full = { 0 };
bool res = false;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* Important: scaling ratio calculation requires pixel format,
@@ -1105,7 +935,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
return false;
- calculate_recout(pipe_ctx, &recout_full);
+ calculate_recout(pipe_ctx);
/**
* Setting line buffer pixel depth to 24bpp yields banding
@@ -1146,7 +976,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
if (res)
/* May need to re-check lb size after this in some obscure scenario */
- calculate_inits_and_adj_vp(pipe_ctx, &recout_full);
+ calculate_inits_and_adj_vp(pipe_ctx);
DC_LOG_SCALER(
"%s: Viewport:\nheight:%d width:%d x:%d "
@@ -1356,6 +1186,9 @@ bool dc_add_plane_to_context(
return false;
}
+ tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream);
+ ASSERT(tail_pipe);
+
free_pipe = acquire_free_pipe_for_stream(context, pool, stream);
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
@@ -1373,10 +1206,6 @@ bool dc_add_plane_to_context(
free_pipe->plane_state = plane_state;
if (head_pipe != free_pipe) {
-
- tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream);
- ASSERT(tail_pipe);
-
free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
@@ -1622,6 +1451,14 @@ static bool are_stream_backends_same(
return true;
}
+/**
+ * dc_is_stream_unchanged() - Compare two stream states for equivalence.
+ *
+ * Checks if there a difference between the two states
+ * that would require a mode change.
+ *
+ * Does not compare cursor position or attributes.
+ */
bool dc_is_stream_unchanged(
struct dc_stream_state *old_stream, struct dc_stream_state *stream)
{
@@ -1632,6 +1469,9 @@ bool dc_is_stream_unchanged(
return true;
}
+/**
+ * dc_is_stream_scaling_unchanged() - Compare scaling rectangles of two streams.
+ */
bool dc_is_stream_scaling_unchanged(
struct dc_stream_state *old_stream, struct dc_stream_state *stream)
{
@@ -1791,16 +1631,19 @@ bool resource_is_stream_unchanged(
return false;
}
+/**
+ * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
+ */
enum dc_status dc_add_stream_to_ctx(
struct dc *dc,
struct dc_state *new_ctx,
struct dc_stream_state *stream)
{
- struct dc_context *dc_ctx = dc->ctx;
enum dc_status res;
+ DC_LOGGER_INIT(dc->ctx->logger);
if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) {
- DC_ERROR("Max streams reached, can't add stream %p !\n", stream);
+ DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream);
return DC_ERROR_UNEXPECTED;
}
@@ -1810,11 +1653,14 @@ enum dc_status dc_add_stream_to_ctx(
res = dc->res_pool->funcs->add_stream_to_ctx(dc, new_ctx, stream);
if (res != DC_OK)
- DC_ERROR("Adding stream %p to context failed with err %d!\n", stream, res);
+ DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res);
return res;
}
+/**
+ * dc_remove_stream_from_ctx() - Remove a stream from a dc_state.
+ */
enum dc_status dc_remove_stream_from_ctx(
struct dc *dc,
struct dc_state *new_ctx,
@@ -1976,6 +1822,8 @@ enum dc_status resource_map_pool_resources(
}
*/
+ calculate_phy_pix_clks(stream);
+
/* acquire new resources */
pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
@@ -2033,6 +1881,12 @@ enum dc_status resource_map_pool_resources(
return DC_ERROR_UNEXPECTED;
}
+/**
+ * dc_resource_state_copy_construct_current() - Creates a new dc_state from existing state
+ * Is a shallow copy. Increments refcounts on existing streams and planes.
+ * @dc: copy out of dc->current_state
+ * @dst_ctx: copy into this
+ */
void dc_resource_state_copy_construct_current(
const struct dc *dc,
struct dc_state *dst_ctx)
@@ -2048,6 +1902,14 @@ void dc_resource_state_construct(
dst_ctx->dccg = dc->res_pool->clk_mgr;
}
+/**
+ * dc_validate_global_state() - Determine if HW can support a given state
+ * Checks HW resource availability and bandwidth requirement.
+ * @dc: dc struct for this driver
+ * @new_ctx: state to be validated
+ *
+ * Return: DC_OK if the result can be programmed. Otherwise, an error code.
+ */
enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx)
@@ -2375,113 +2237,15 @@ static void set_vendor_info_packet(
struct dc_info_packet *info_packet,
struct dc_stream_state *stream)
{
- uint32_t length = 0;
- bool hdmi_vic_mode = false;
- uint8_t checksum = 0;
- uint32_t i = 0;
- enum dc_timing_3d_format format;
- // Can be different depending on packet content /*todo*/
- // unsigned int length = pPathMode->dolbyVision ? 24 : 5;
-
- info_packet->valid = false;
-
- format = stream->timing.timing_3d_format;
- if (stream->view_format == VIEW_3D_FORMAT_NONE)
- format = TIMING_3D_FORMAT_NONE;
-
- /* Can be different depending on packet content */
- length = 5;
-
- if (stream->timing.hdmi_vic != 0
- && stream->timing.h_total >= 3840
- && stream->timing.v_total >= 2160)
- hdmi_vic_mode = true;
-
- /* According to HDMI 1.4a CTS, VSIF should be sent
- * for both 3D stereo and HDMI VIC modes.
- * For all other modes, there is no VSIF sent. */
+ /* SPD info packet for FreeSync */
- if (format == TIMING_3D_FORMAT_NONE && !hdmi_vic_mode)
+ /* Check if Freesync is supported. Return if false. If true,
+ * set the corresponding bit in the info packet
+ */
+ if (!stream->vsp_infopacket.valid)
return;
- /* 24bit IEEE Registration identifier (0x000c03). LSB first. */
- info_packet->sb[1] = 0x03;
- info_packet->sb[2] = 0x0C;
- info_packet->sb[3] = 0x00;
-
- /*PB4: 5 lower bytes = 0 (reserved). 3 higher bits = HDMI_Video_Format.
- * The value for HDMI_Video_Format are:
- * 0x0 (0b000) - No additional HDMI video format is presented in this
- * packet
- * 0x1 (0b001) - Extended resolution format present. 1 byte of HDMI_VIC
- * parameter follows
- * 0x2 (0b010) - 3D format indication present. 3D_Structure and
- * potentially 3D_Ext_Data follows
- * 0x3..0x7 (0b011..0b111) - reserved for future use */
- if (format != TIMING_3D_FORMAT_NONE)
- info_packet->sb[4] = (2 << 5);
- else if (hdmi_vic_mode)
- info_packet->sb[4] = (1 << 5);
-
- /* PB5: If PB4 claims 3D timing (HDMI_Video_Format = 0x2):
- * 4 lower bites = 0 (reserved). 4 higher bits = 3D_Structure.
- * The value for 3D_Structure are:
- * 0x0 - Frame Packing
- * 0x1 - Field Alternative
- * 0x2 - Line Alternative
- * 0x3 - Side-by-Side (full)
- * 0x4 - L + depth
- * 0x5 - L + depth + graphics + graphics-depth
- * 0x6 - Top-and-Bottom
- * 0x7 - Reserved for future use
- * 0x8 - Side-by-Side (Half)
- * 0x9..0xE - Reserved for future use
- * 0xF - Not used */
- switch (format) {
- case TIMING_3D_FORMAT_HW_FRAME_PACKING:
- case TIMING_3D_FORMAT_SW_FRAME_PACKING:
- info_packet->sb[5] = (0x0 << 4);
- break;
-
- case TIMING_3D_FORMAT_SIDE_BY_SIDE:
- case TIMING_3D_FORMAT_SBS_SW_PACKED:
- info_packet->sb[5] = (0x8 << 4);
- length = 6;
- break;
-
- case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
- case TIMING_3D_FORMAT_TB_SW_PACKED:
- info_packet->sb[5] = (0x6 << 4);
- break;
-
- default:
- break;
- }
-
- /*PB5: If PB4 is set to 0x1 (extended resolution format)
- * fill PB5 with the correct HDMI VIC code */
- if (hdmi_vic_mode)
- info_packet->sb[5] = stream->timing.hdmi_vic;
-
- /* Header */
- info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR; /* VSIF packet type. */
- info_packet->hb1 = 0x01; /* Version */
-
- /* 4 lower bits = Length, 4 higher bits = 0 (reserved) */
- info_packet->hb2 = (uint8_t) (length);
-
- /* Calculate checksum */
- checksum = 0;
- checksum += info_packet->hb0;
- checksum += info_packet->hb1;
- checksum += info_packet->hb2;
-
- for (i = 1; i <= length; i++)
- checksum += info_packet->sb[i];
-
- info_packet->sb[0] = (uint8_t) (0x100 - checksum);
-
- info_packet->valid = true;
+ *info_packet = stream->vsp_infopacket;
}
static void set_spd_info_packet(
@@ -2537,10 +2301,6 @@ void dc_resource_state_destruct(struct dc_state *context)
}
}
-/*
- * Copy src_ctx into dst_ctx and retain all surfaces and streams referenced
- * by the src_ctx
- */
void dc_resource_state_copy_construct(
const struct dc_state *src_ctx,
struct dc_state *dst_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index e113439aaa86..66e5c4623a49 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -100,8 +100,6 @@ static void construct(struct dc_stream_state *stream,
/* EDID CAP translation for HDMI 2.0 */
stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble;
- stream->status.link = stream->sink->link;
-
update_stream_signal(stream);
stream->out_transfer_func = dc_create_transfer_func();
@@ -172,7 +170,7 @@ struct dc_stream_status *dc_stream_get_status(
}
/**
- * Update the cursor attributes and set cursor surface address
+ * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
*/
bool dc_stream_set_cursor_attributes(
struct dc_stream_state *stream,
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index d16a20c84792..4b5bbb13ce7f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -36,9 +36,10 @@
#include "inc/hw_sequencer.h"
#include "inc/compressor.h"
+#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.04"
+#define DC_VER "3.2.08"
#define MAX_SURFACES 3
#define MAX_STREAMS 6
@@ -47,13 +48,6 @@
/*******************************************************************************
* Display Core Interfaces
******************************************************************************/
-struct dmcu_version {
- unsigned int date;
- unsigned int month;
- unsigned int year;
- unsigned int interface_version;
-};
-
struct dc_versions {
const char *dc_ver;
struct dmcu_version dmcu_version;
@@ -748,5 +742,6 @@ void dc_set_power_state(
struct dc *dc,
enum dc_acpi_cm_power_state power_state);
void dc_resume(struct dc *dc);
+bool dc_is_dmcu_initialized(struct dc *dc);
#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index fcfd50b5dba0..4842d2378bbf 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -234,14 +234,14 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
if (field_value == condition_value) {
if (i * delay_between_poll_us > 1000 &&
!IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
- dm_output_to_console("REG_WAIT taking a while: %dms in %s line:%d\n",
+ DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n",
delay_between_poll_us * i / 1000,
func_name, line);
return reg_val;
}
}
- dm_error("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
+ DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
delay_between_poll_us, time_out_num_tries,
func_name, line);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 7825e4b5e97c..e72fce4eca65 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -192,7 +192,6 @@ enum surface_pixel_format {
/*swaped & float*/
SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
/*grow graphics here if necessary */
- SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
@@ -200,6 +199,7 @@ enum surface_pixel_format {
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr,
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb,
SURFACE_PIXEL_FORMAT_SUBSAMPLE_END,
+ SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
SURFACE_PIXEL_FORMAT_INVALID
/*grow 444 video here if necessary */
@@ -358,15 +358,16 @@ union dc_tiling_info {
} gfx8;
struct {
+ enum swizzle_mode_values swizzle;
unsigned int num_pipes;
- unsigned int num_banks;
+ unsigned int max_compressed_frags;
unsigned int pipe_interleave;
+
+ unsigned int num_banks;
unsigned int num_shader_engines;
unsigned int num_rb_per_se;
- unsigned int max_compressed_frags;
bool shaderEnable;
- enum swizzle_mode_values swizzle;
bool meta_linear;
bool rb_aligned;
bool pipe_aligned;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 8738f27a8708..29f19d57ff7a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -128,8 +128,10 @@ struct dc_link {
const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
-/*
- * Return an enumerated dc_link. dc_link order is constant and determined at
+/**
+ * dc_get_link_at_index() - Return an enumerated dc_link.
+ *
+ * dc_link order is constant and determined at
* boot time. They cannot be created or destroyed.
* Use dc_get_caps() to get number of links.
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index c5bd1fbb6982..be34d638e15d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -56,6 +56,7 @@ struct dc_stream_state {
struct dc_crtc_timing_adjust adjust;
struct dc_info_packet vrr_infopacket;
struct dc_info_packet vsc_infopacket;
+ struct dc_info_packet vsp_infopacket;
struct rect src; /* composition area */
struct rect dst; /* stream addressable area */
@@ -104,8 +105,6 @@ struct dc_stream_state {
bool dpms_off;
bool apply_edp_fast_boot_optimization;
- struct dc_stream_status status;
-
struct dc_cursor_attributes cursor_attributes;
struct dc_cursor_position cursor_position;
uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
@@ -131,11 +130,13 @@ struct dc_stream_update {
struct dc_crtc_timing_adjust *adjust;
struct dc_info_packet *vrr_infopacket;
struct dc_info_packet *vsc_infopacket;
+ struct dc_info_packet *vsp_infopacket;
bool *dpms_off;
struct colorspace_transform *gamut_remap;
enum dc_color_space *output_color_space;
+ enum dc_dither_option *dither_option;
struct dc_csc_transform *output_csc_transform;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 6e12d640d020..0b20ae23f169 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -73,10 +73,18 @@ struct hw_asic_id {
void *atombios_base_address;
};
+struct dc_perf_trace {
+ unsigned long read_count;
+ unsigned long write_count;
+ unsigned long last_entry_read;
+ unsigned long last_entry_write;
+};
+
struct dc_context {
struct dc *dc;
void *driver_context; /* e.g. amdgpu_device */
+ struct dc_perf_trace *perf_trace;
void *cgs_device;
enum dce_environment dce_environment;
@@ -191,7 +199,6 @@ union display_content_support {
};
struct dc_panel_patch {
- unsigned int disconnect_delay;
unsigned int dppowerup_delay;
unsigned int extra_t12_ms;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index 9a28a04417d1..afd287f08bc9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -94,7 +94,7 @@ static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
/*ClocksStatePerformance*/
{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
-static int dentist_get_divider_from_did(int did)
+int dentist_get_divider_from_did(int did)
{
if (did < DENTIST_BASE_DID_1)
did = DENTIST_BASE_DID_1;
@@ -277,7 +277,8 @@ static int dce_set_clock(
if (requested_clk_khz == 0)
clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
- dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
+ if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
+ dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
return actual_clock;
}
@@ -324,9 +325,11 @@ int dce112_set_clock(struct clk_mgr *clk_mgr, int requested_clk_khz)
bp->funcs->set_dce_clock(bp, &dce_clk_params);
if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
- if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
- dmcu->funcs->set_psr_wait_loop(dmcu,
- actual_clock / 1000 / 7);
+ if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
+ if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
+ dmcu->funcs->set_psr_wait_loop(dmcu,
+ actual_clock / 1000 / 7);
+ }
}
clk_mgr_dce->dfs_bypass_disp_clk = actual_clock;
@@ -588,6 +591,8 @@ static void dce11_pplib_apply_display_requirements(
dc,
context->bw.dce.sclk_khz);
+ pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz;
+
pp_display_cfg->min_engine_clock_deep_sleep_khz
= context->bw.dce.sclk_deep_sleep_khz;
@@ -671,6 +676,11 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
+ int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+
+ /*TODO: W/A for dal3 linux, investigate why this works */
+ if (!clk_mgr_dce->dfs_bypass_active)
+ context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
/* get max clock state from PPLIB */
@@ -685,6 +695,8 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr,
clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
}
dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+
+ context->bw.dce.dispclk_khz = unpatched_disp_clk;
}
static void dce12_update_clocks(struct clk_mgr *clk_mgr,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
index 046077797416..3bceb31d910d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
@@ -165,4 +165,6 @@ struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx);
void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr);
+int dentist_get_divider_from_did(int did);
+
#endif /* _DCE_CLK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index c47c81883d3c..cce0d18f91da 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -908,7 +908,6 @@ static void dce110_stream_encoder_dp_blank(
struct stream_encoder *enc)
{
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
- uint32_t retries = 0;
uint32_t reg1 = 0;
uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
@@ -926,30 +925,28 @@ static void dce110_stream_encoder_dp_blank(
* (2 = start of the next vertical blank) */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
/* Larger delay to wait until VBLANK - use max retry of
- * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
- * a little more because we may not trust delay accuracy.
- */
+ * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
+ * a little more because we may not trust delay accuracy.
+ */
max_retries = DP_BLANK_MAX_RETRY * 150;
/* disable DP stream */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
/* the encoder stops sending the video stream
- * at the start of the vertical blanking.
- * Poll for DP_VID_STREAM_STATUS == 0
- */
+ * at the start of the vertical blanking.
+ * Poll for DP_VID_STREAM_STATUS == 0
+ */
REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS,
0,
10, max_retries);
- ASSERT(retries <= max_retries);
-
/* Tell the DP encoder to ignore timing from CRTC, must be done after
- * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
- * complete, stream status will be stuck in video stream enabled state,
- * i.e. DP_VID_STREAM_STATUS stuck at 1.
- */
+ * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
+ * complete, stream status will be stuck in video stream enabled state,
+ * i.e. DP_VID_STREAM_STATUS stuck at 1.
+ */
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
index bc50a8e25f4f..87771676acac 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -117,6 +117,18 @@ void dce100_prepare_bandwidth(
false);
}
+void dce100_optimize_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+ dc->res_pool->clk_mgr->funcs->update_clocks(
+ dc->res_pool->clk_mgr,
+ context,
+ true);
+}
+
/**************************************************************************/
void dce100_hw_sequencer_construct(struct dc *dc)
@@ -125,6 +137,6 @@ void dce100_hw_sequencer_construct(struct dc *dc)
dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
- dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth;
+ dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index 1f7f25013217..52d50e24a995 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -64,65 +64,37 @@ static const struct dce110_compressor_reg_offsets reg_offsets[] = {
static const uint32_t dce11_one_lpt_channel_max_resolution = 2560 * 1600;
-enum fbc_idle_force {
- /* Bit 0 - Display registers updated */
- FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
-
- /* Bit 2 - FBC_GRPH_COMP_EN register updated */
- FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
- /* Bit 3 - FBC_SRC_SEL register updated */
- FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
- /* Bit 4 - FBC_MIN_COMPRESSION register updated */
- FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
- /* Bit 5 - FBC_ALPHA_COMP_EN register updated */
- FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
- /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
- FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
- /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
- FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
-
- /* Bit 24 - Memory write to region 0 defined by MC registers. */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
- /* Bit 25 - Memory write to region 1 defined by MC registers */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
- /* Bit 26 - Memory write to region 2 defined by MC registers */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
- /* Bit 27 - Memory write to region 3 defined by MC registers. */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
-
- /* Bit 28 - Memory write from any client other than MCIF */
- FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
- /* Bit 29 - CG statics screen signal is inactive */
- FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
-};
-
-
static uint32_t align_to_chunks_number_per_line(uint32_t pixels)
{
return 256 * ((pixels + 255) / 256);
}
-static void reset_lb_on_vblank(struct dc_context *ctx)
+static void reset_lb_on_vblank(struct compressor *compressor, uint32_t crtc_inst)
{
- uint32_t value, frame_count;
+ uint32_t value;
+ uint32_t frame_count;
+ uint32_t status_pos;
uint32_t retry = 0;
- uint32_t status_pos =
- dm_read_reg(ctx, mmCRTC_STATUS_POSITION);
+ struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+
+ cp110->offsets = reg_offsets[crtc_inst];
+
+ status_pos = dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_POSITION));
/* Only if CRTC is enabled and counter is moving we wait for one frame. */
- if (status_pos != dm_read_reg(ctx, mmCRTC_STATUS_POSITION)) {
+ if (status_pos != dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_POSITION))) {
/* Resetting LB on VBlank */
- value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
+ value = dm_read_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL));
set_reg_field_value(value, 3, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
set_reg_field_value(value, 1, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
- dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
+ dm_write_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL), value);
- frame_count = dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT);
+ frame_count = dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_FRAME_COUNT));
for (retry = 10000; retry > 0; retry--) {
- if (frame_count != dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT))
+ if (frame_count != dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_FRAME_COUNT)))
break;
udelay(10);
}
@@ -130,13 +102,11 @@ static void reset_lb_on_vblank(struct dc_context *ctx)
dm_error("Frame count did not increase for 100ms.\n");
/* Resetting LB on VBlank */
- value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
+ value = dm_read_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL));
set_reg_field_value(value, 2, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
set_reg_field_value(value, 0, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
- dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
-
+ dm_write_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL), value);
}
-
}
static void wait_for_fbc_state_changed(
@@ -226,10 +196,10 @@ void dce110_compressor_enable_fbc(
uint32_t addr;
uint32_t value, misc_value;
-
addr = mmFBC_CNTL;
value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ /* params->inst is valid HW CRTC instance start from 0 */
set_reg_field_value(
value,
params->inst,
@@ -238,8 +208,10 @@ void dce110_compressor_enable_fbc(
/* Keep track of enum controller_id FBC is attached to */
compressor->is_enabled = true;
- compressor->attached_inst = params->inst;
- cp110->offsets = reg_offsets[params->inst];
+ /* attached_inst is SW CRTC instance start from 1
+ * 0 = CONTROLLER_ID_UNDEFINED means not attached crtc
+ */
+ compressor->attached_inst = params->inst + CONTROLLER_ID_D0;
/* Toggle it as there is bug in HW */
set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
@@ -268,9 +240,10 @@ void dce110_compressor_enable_fbc(
void dce110_compressor_disable_fbc(struct compressor *compressor)
{
struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+ uint32_t crtc_inst = 0;
if (compressor->options.bits.FBC_SUPPORT) {
- if (dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
+ if (dce110_compressor_is_fbc_enabled_in_hw(compressor, &crtc_inst)) {
uint32_t reg_data;
/* Turn off compression */
reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
@@ -284,8 +257,10 @@ void dce110_compressor_disable_fbc(struct compressor *compressor)
wait_for_fbc_state_changed(cp110, false);
}
- /* Sync line buffer - dce100/110 only*/
- reset_lb_on_vblank(compressor->ctx);
+ /* Sync line buffer which fbc was attached to dce100/110 only */
+ if (crtc_inst > CONTROLLER_ID_UNDEFINED && crtc_inst < CONTROLLER_ID_D3)
+ reset_lb_on_vblank(compressor,
+ crtc_inst - CONTROLLER_ID_D0);
}
}
@@ -328,6 +303,8 @@ void dce110_compressor_program_compressed_surface_address_and_pitch(
uint32_t compressed_surf_address_low_part =
compressor->compr_surface_address.addr.low_part;
+ cp110->offsets = reg_offsets[params->inst];
+
/* Clear content first. */
dm_write_reg(
compressor->ctx,
@@ -410,13 +387,7 @@ void dce110_compressor_set_fbc_invalidation_triggers(
value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(
value,
- fbc_trigger |
- FBC_IDLE_FORCE_GRPH_COMP_EN |
- FBC_IDLE_FORCE_SRC_SEL_CHANGE |
- FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
- FBC_IDLE_FORCE_ALPHA_COMP_EN |
- FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
- FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
+ fbc_trigger,
FBC_IDLE_FORCE_CLEAR_MASK,
FBC_IDLE_FORCE_CLEAR_MASK);
dm_write_reg(compressor->ctx, addr, value);
@@ -549,7 +520,7 @@ void dce110_compressor_construct(struct dce110_compressor *compressor,
compressor->base.channel_interleave_size = 0;
compressor->base.dram_channels_num = 0;
compressor->base.lpt_channels_num = 0;
- compressor->base.attached_inst = 0;
+ compressor->base.attached_inst = CONTROLLER_ID_UNDEFINED;
compressor->base.is_enabled = false;
compressor->base.funcs = &dce110_compressor_funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 9724a17e352b..4bf24758217f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1267,10 +1267,19 @@ static void program_scaler(const struct dc *dc,
pipe_ctx->plane_res.scl_data.lb_params.depth,
&pipe_ctx->stream->bit_depth_params);
- if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color)
+ if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) {
+ /*
+ * The way 420 is packed, 2 channels carry Y component, 1 channel
+ * alternate between Cb and Cr, so both channels need the pixel
+ * value for Y
+ */
+ if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ color.color_r_cr = color.color_g_y;
+
pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color(
pipe_ctx->stream_res.tg,
&color);
+ }
pipe_ctx->plane_res.xfm->funcs->transform_set_scaler(pipe_ctx->plane_res.xfm,
&pipe_ctx->plane_res.scl_data);
@@ -1766,12 +1775,13 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
* Check if FBC can be enabled
*/
static bool should_enable_fbc(struct dc *dc,
- struct dc_state *context,
- uint32_t *pipe_idx)
+ struct dc_state *context,
+ uint32_t *pipe_idx)
{
uint32_t i;
struct pipe_ctx *pipe_ctx = NULL;
struct resource_context *res_ctx = &context->res_ctx;
+ unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
ASSERT(dc->fbc_compressor);
@@ -1786,14 +1796,28 @@ static bool should_enable_fbc(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (res_ctx->pipe_ctx[i].stream) {
+
pipe_ctx = &res_ctx->pipe_ctx[i];
- *pipe_idx = i;
- break;
+
+ if (!pipe_ctx)
+ continue;
+
+ /* fbc not applicable on underlay pipe */
+ if (pipe_ctx->pipe_idx != underlay_idx) {
+ *pipe_idx = i;
+ break;
+ }
}
}
- /* Pipe context should be found */
- ASSERT(pipe_ctx);
+ if (i == dc->res_pool->pipe_count)
+ return false;
+
+ if (!pipe_ctx->stream->sink)
+ return false;
+
+ if (!pipe_ctx->stream->sink->link)
+ return false;
/* Only supports eDP */
if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
@@ -1817,8 +1841,9 @@ static bool should_enable_fbc(struct dc *dc,
/*
* Enable FBC
*/
-static void enable_fbc(struct dc *dc,
- struct dc_state *context)
+static void enable_fbc(
+ struct dc *dc,
+ struct dc_state *context)
{
uint32_t pipe_idx = 0;
@@ -1828,10 +1853,9 @@ static void enable_fbc(struct dc *dc,
struct compressor *compr = dc->fbc_compressor;
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
-
params.source_view_width = pipe_ctx->stream->timing.h_addressable;
params.source_view_height = pipe_ctx->stream->timing.v_addressable;
-
+ params.inst = pipe_ctx->stream_res.tg->inst;
compr->compr_surface_address.quad_part = dc->ctx->fbc_gpu_addr;
compr->funcs->surface_address_and_pitch(compr, &params);
@@ -2046,10 +2070,10 @@ enum dc_status dce110_apply_ctx_to_hw(
return status;
}
- dcb->funcs->set_scratch_critical_state(dcb, false);
-
if (dc->fbc_compressor)
- enable_fbc(dc, context);
+ enable_fbc(dc, dc->current_state);
+
+ dcb->funcs->set_scratch_critical_state(dcb, false);
return DC_OK;
}
@@ -2282,7 +2306,7 @@ static void dce110_enable_per_frame_crtc_position_reset(
int i;
gsl_params.gsl_group = 0;
- gsl_params.gsl_master = grouped_pipes[0]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst;
+ gsl_params.gsl_master = 0;
for (i = 0; i < group_size; i++)
grouped_pipes[i]->stream_res.tg->funcs->setup_global_swap_lock(
@@ -2408,7 +2432,6 @@ static void dce110_program_front_end_for_pipe(
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct xfm_grph_csc_adjustment adjust;
struct out_csc_color_matrix tbl_entry;
- unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
unsigned int i;
DC_LOGGER_INIT();
memset(&tbl_entry, 0, sizeof(tbl_entry));
@@ -2449,15 +2472,6 @@ static void dce110_program_front_end_for_pipe(
program_scaler(dc, pipe_ctx);
- /* fbc not applicable on Underlay pipe */
- if (dc->fbc_compressor && old_pipe->stream &&
- pipe_ctx->pipe_idx != underlay_idx) {
- if (plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
- dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
- else
- enable_fbc(dc, dc->current_state);
- }
-
mi->funcs->mem_input_program_surface_config(
mi,
plane_state->format,
@@ -2534,6 +2548,9 @@ static void dce110_apply_ctx_for_surface(
if (num_planes == 0)
return;
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -2576,6 +2593,9 @@ static void dce110_apply_ctx_for_surface(
(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
+
+ if (dc->fbc_compressor)
+ enable_fbc(dc, dc->current_state);
}
static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 6d40b3d54ac1..cdd1d6b7b9f2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -41,7 +41,6 @@
#include "dce/dce_mem_input.h"
#include "dce/dce_link_encoder.h"
#include "dce/dce_stream_encoder.h"
-#include "dce/dce_mem_input.h"
#include "dce/dce_ipp.h"
#include "dce/dce_transform.h"
#include "dce/dce_opp.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
index 20f531d27e2b..54abedbf1b43 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
@@ -223,7 +223,7 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
&dc->res_pool->pp_smu_req;
struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
- struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+ uint32_t requested_dcf_clock_in_khz = 0;
bool send_request_to_increase = false;
bool send_request_to_lower = false;
int display_count;
@@ -263,8 +263,6 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
// F Clock
if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) {
clk_mgr->clks.fclk_khz = new_clocks->fclk_khz;
- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
- clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000;
notify_hard_min_fclk_to_smu(pp_smu, new_clocks->fclk_khz);
@@ -293,10 +291,9 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
*/
if (send_request_to_increase) {
/*use dcfclk to request voltage*/
- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
- clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+ requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
- notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz);
+ notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz);
if (pp_smu->set_display_requirement)
pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
@@ -317,10 +314,9 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
if (!send_request_to_increase && send_request_to_lower) {
/*use dcfclk to request voltage*/
- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
- clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+ requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
- notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz);
+ notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz);
if (pp_smu->set_display_requirement)
pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
@@ -332,12 +328,10 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
*smu_req_cur = smu_req;
}
-
static const struct clk_mgr_funcs dcn1_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = dcn1_update_clocks
};
-
struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx)
{
struct dc_debug_options *debug = &ctx->dc->debug;
@@ -377,3 +371,5 @@ struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx)
return &clk_mgr_dce->base;
}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
index 9dbaf6578006..a995eda443a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
@@ -28,6 +28,12 @@
#include "../dce/dce_clk_mgr.h"
+struct clk_bypass {
+ uint32_t dcfclk_bypass;
+ uint32_t dispclk_pypass;
+ uint32_t dprefclk_bypass;
+};
+
void dcn1_pplib_apply_display_requirements(
struct dc *dc,
struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 3eea44092a04..7469333a2c8a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -324,7 +324,7 @@ bool cm_helper_translate_curve_to_hw_format(
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
return false;
- PERF_TRACE();
+ PERF_TRACE_CTX(output_tf->ctx);
corner_points = lut_params->corner_points;
rgb_resulted = lut_params->rgb_resulted;
@@ -513,7 +513,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
return false;
- PERF_TRACE();
+ PERF_TRACE_CTX(output_tf->ctx);
corner_points = lut_params->corner_points;
rgb_resulted = lut_params->rgb_resulted;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 4254e7e1a509..c7d1e678ebf5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -100,7 +100,7 @@ bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL,
DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable);
- return true ? false : enable;
+ return enable ? true : false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 74132a1f3046..345af015d061 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -99,6 +99,14 @@ static unsigned int hubp1_get_underflow_status(struct hubp *hubp)
return hubp_underflow;
}
+
+void hubp1_clear_underflow(struct hubp *hubp)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, 1);
+}
+
static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
@@ -565,19 +573,6 @@ void hubp1_program_deadline(
REFCYC_X_AFTER_SCALER, dlg_attr->refcyc_x_after_scaler,
DST_Y_AFTER_SCALER, dlg_attr->dst_y_after_scaler);
- if (REG(PREFETCH_SETTINS))
- REG_SET_2(PREFETCH_SETTINS, 0,
- DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
- VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
- else
- REG_SET_2(PREFETCH_SETTINGS, 0,
- DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
- VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
-
- REG_SET_2(VBLANK_PARAMETERS_0, 0,
- DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
- DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
-
REG_SET(REF_FREQ_TO_PIX_FREQ, 0,
REF_FREQ_TO_PIX_FREQ, dlg_attr->ref_freq_to_pix_freq);
@@ -585,9 +580,6 @@ void hubp1_program_deadline(
REG_SET(VBLANK_PARAMETERS_1, 0,
REFCYC_PER_PTE_GROUP_VBLANK_L, dlg_attr->refcyc_per_pte_group_vblank_l);
- REG_SET(VBLANK_PARAMETERS_3, 0,
- REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
-
if (REG(NOM_PARAMETERS_0))
REG_SET(NOM_PARAMETERS_0, 0,
DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l);
@@ -602,27 +594,13 @@ void hubp1_program_deadline(
REG_SET(NOM_PARAMETERS_5, 0,
REFCYC_PER_META_CHUNK_NOM_L, dlg_attr->refcyc_per_meta_chunk_nom_l);
- REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
- REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
- REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
-
REG_SET_2(PER_LINE_DELIVERY, 0,
REFCYC_PER_LINE_DELIVERY_L, dlg_attr->refcyc_per_line_delivery_l,
REFCYC_PER_LINE_DELIVERY_C, dlg_attr->refcyc_per_line_delivery_c);
- if (REG(PREFETCH_SETTINS_C))
- REG_SET(PREFETCH_SETTINS_C, 0,
- VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
- else
- REG_SET(PREFETCH_SETTINGS_C, 0,
- VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
-
REG_SET(VBLANK_PARAMETERS_2, 0,
REFCYC_PER_PTE_GROUP_VBLANK_C, dlg_attr->refcyc_per_pte_group_vblank_c);
- REG_SET(VBLANK_PARAMETERS_4, 0,
- REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
-
if (REG(NOM_PARAMETERS_2))
REG_SET(NOM_PARAMETERS_2, 0,
DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c);
@@ -642,10 +620,6 @@ void hubp1_program_deadline(
QoS_LEVEL_LOW_WM, ttu_attr->qos_level_low_wm,
QoS_LEVEL_HIGH_WM, ttu_attr->qos_level_high_wm);
- REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
- MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
- QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
-
/* TTU - per luma/chroma */
/* Assumed surf0 is luma and 1 is chroma */
@@ -654,25 +628,15 @@ void hubp1_program_deadline(
QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_l,
QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_l);
- REG_SET(DCN_SURF0_TTU_CNTL1, 0,
- REFCYC_PER_REQ_DELIVERY_PRE,
- ttu_attr->refcyc_per_req_delivery_pre_l);
-
REG_SET_3(DCN_SURF1_TTU_CNTL0, 0,
REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_c,
QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_c,
QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_c);
- REG_SET(DCN_SURF1_TTU_CNTL1, 0,
- REFCYC_PER_REQ_DELIVERY_PRE,
- ttu_attr->refcyc_per_req_delivery_pre_c);
-
REG_SET_3(DCN_CUR0_TTU_CNTL0, 0,
REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_cur0,
QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_cur0,
QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_cur0);
- REG_SET(DCN_CUR0_TTU_CNTL1, 0,
- REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
}
static void hubp1_setup(
@@ -690,6 +654,48 @@ static void hubp1_setup(
hubp1_vready_workaround(hubp, pipe_dest);
}
+static void hubp1_setup_interdependent(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_SET_2(PREFETCH_SETTINS, 0,
+ DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
+ VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
+
+ REG_SET(PREFETCH_SETTINS_C, 0,
+ VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
+
+ REG_SET_2(VBLANK_PARAMETERS_0, 0,
+ DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
+ DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
+
+ REG_SET(VBLANK_PARAMETERS_3, 0,
+ REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
+
+ REG_SET(VBLANK_PARAMETERS_4, 0,
+ REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
+
+ REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
+ REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
+
+ REG_SET(DCN_SURF0_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ ttu_attr->refcyc_per_req_delivery_pre_l);
+ REG_SET(DCN_SURF1_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ ttu_attr->refcyc_per_req_delivery_pre_c);
+ REG_SET(DCN_CUR0_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
+
+ REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
+ MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
+ QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
+}
+
bool hubp1_is_flip_pending(struct hubp *hubp)
{
uint32_t flip_pending = 0;
@@ -1178,6 +1184,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
hubp1_program_surface_config,
.hubp_is_flip_pending = hubp1_is_flip_pending,
.hubp_setup = hubp1_setup,
+ .hubp_setup_interdependent = hubp1_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp1_set_vm_system_aperture_settings,
.hubp_set_vm_context0_settings = hubp1_set_vm_context0_settings,
.set_blank = hubp1_set_blank,
@@ -1190,6 +1197,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_clk_cntl = hubp1_clk_cntl,
.hubp_vtg_sel = hubp1_vtg_sel,
.hubp_read_state = hubp1_read_state,
+ .hubp_clear_underflow = hubp1_clear_underflow,
.hubp_disable_control = hubp1_disable_control,
.hubp_get_underflow_status = hubp1_get_underflow_status,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 4890273b632b..62d4232e7796 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -251,6 +251,7 @@
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\
@@ -435,6 +436,7 @@
type HUBP_NO_OUTSTANDING_REQ;\
type HUBP_VTG_SEL;\
type HUBP_UNDERFLOW_STATUS;\
+ type HUBP_UNDERFLOW_CLEAR;\
type NUM_PIPES;\
type NUM_BANKS;\
type PIPE_INTERLEAVE;\
@@ -739,6 +741,7 @@ void dcn10_hubp_construct(
const struct dcn_mi_mask *hubp_mask);
void hubp1_read_state(struct hubp *hubp);
+void hubp1_clear_underflow(struct hubp *hubp);
enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 87495dea45ec..91e015e14355 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1227,7 +1227,8 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
tf = plane_state->in_transfer_func;
if (plane_state->gamma_correction &&
- !plane_state->gamma_correction->is_identity
+ !dpp_base->ctx->dc->debug.always_use_regamma
+ && !plane_state->gamma_correction->is_identity
&& dce_use_lut(plane_state->format))
dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
@@ -1400,7 +1401,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
grouped_pipes[i]->stream_res.tg,
- grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst,
+ 0,
&grouped_pipes[i]->stream->triggered_crtc_reset);
DC_SYNC_INFO("Waiting for trigger\n");
@@ -1770,7 +1771,7 @@ bool is_rgb_cspace(enum dc_color_space output_color_space)
}
}
-static void dcn10_get_surface_visual_confirm_color(
+void dcn10_get_surface_visual_confirm_color(
const struct pipe_ctx *pipe_ctx,
struct tg_color *color)
{
@@ -1806,7 +1807,7 @@ static void dcn10_get_surface_visual_confirm_color(
}
}
-static void dcn10_get_hdr_visual_confirm_color(
+void dcn10_get_hdr_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
{
@@ -2067,6 +2068,10 @@ void update_dchubp_dpp(
&pipe_ctx->ttu_regs,
&pipe_ctx->rq_regs,
&pipe_ctx->pipe_dlg_param);
+ hubp->funcs->hubp_setup_interdependent(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs);
}
size.grph.surface_size = pipe_ctx->plane_res.scl_data.viewport;
@@ -2154,6 +2159,15 @@ static void dcn10_blank_pixel_data(
color_space = stream->output_color_space;
color_space_to_black_color(dc, color_space, &black_color);
+ /*
+ * The way 420 is packed, 2 channels carry Y component, 1 channel
+ * alternate between Cb and Cr, so both channels need the pixel
+ * value for Y
+ */
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ black_color.color_r_cr = black_color.color_g_y;
+
+
if (stream_res->tg->funcs->set_blank_color)
stream_res->tg->funcs->set_blank_color(
stream_res->tg,
@@ -2337,6 +2351,34 @@ static void dcn10_apply_ctx_for_surface(
dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
+ if (top_pipe_to_program->plane_state &&
+ top_pipe_to_program->plane_state->update_flags.bits.full_update)
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ /* Skip inactive pipes and ones already updated */
+ if (!pipe_ctx->stream || pipe_ctx->stream == stream
+ || !pipe_ctx->plane_state)
+ continue;
+
+ pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
+
+ pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
+ pipe_ctx->plane_res.hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->stream || pipe_ctx->stream == stream
+ || !pipe_ctx->plane_state)
+ continue;
+
+ dcn10_pipe_control_lock(dc, pipe_ctx, false);
+ }
+
if (num_planes == 0)
false_optc_underflow_wa(dc, stream, tg);
@@ -2710,6 +2752,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.set_avmute = dce110_set_avmute,
.log_hw_state = dcn10_log_hw_state,
.get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
.edp_backlight_control = hwss_edp_backlight_control,
.edp_power_control = hwss_edp_power_control,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 5e5610c9e600..f8eea10e4c64 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -51,6 +51,8 @@ void dcn10_get_hw_state(
char *pBuf, unsigned int bufSize,
unsigned int mask);
+void dcn10_clear_status_bits(struct dc *dc, unsigned int mask);
+
bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
@@ -61,6 +63,14 @@ void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp);
void set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
+void dcn10_get_surface_visual_confirm_color(
+ const struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+
+void dcn10_get_hdr_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+
void update_dchubp_dpp(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 64158900730f..cd469014baa3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -44,6 +44,7 @@
#include "dcn10_hubp.h"
#include "dcn10_hubbub.h"
#include "dcn10_cm_common.h"
+#include "dcn10_clk_mgr.h"
static unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...)
{
@@ -454,12 +455,6 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int
remaining_buffer -= chars_printed;
pBuf += chars_printed;
-
- // Clear underflow for debug purposes
- // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
- // This function is called only from Windows or Diags test environment, hence it's safe to clear
- // it from here without affecting the original intent.
- tg->funcs->clear_optc_underflow(tg);
}
}
@@ -469,19 +464,75 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int
static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned int bufSize)
{
unsigned int chars_printed = 0;
+ unsigned int remaining_buffer = bufSize;
- chars_printed = snprintf_count(pBuf, bufSize, "dcfclk_khz,dcfclk_deep_sleep_khz,dispclk_khz,"
- "dppclk_khz,max_supported_dppclk_khz,fclk_khz,socclk_khz\n"
- "%d,%d,%d,%d,%d,%d,%d\n",
+ chars_printed = snprintf_count(pBuf, bufSize, "dcfclk,dcfclk_deep_sleep,dispclk,"
+ "dppclk,fclk,socclk\n"
+ "%d,%d,%d,%d,%d,%d\n",
dc->current_state->bw.dcn.clk.dcfclk_khz,
dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
dc->current_state->bw.dcn.clk.dispclk_khz,
dc->current_state->bw.dcn.clk.dppclk_khz,
- dc->current_state->bw.dcn.clk.max_supported_dppclk_khz,
dc->current_state->bw.dcn.clk.fclk_khz,
dc->current_state->bw.dcn.clk.socclk_khz);
- return chars_printed;
+ remaining_buffer -= chars_printed;
+ pBuf += chars_printed;
+
+ return bufSize - remaining_buffer;
+}
+
+static void dcn10_clear_otpc_underflow(struct dc *dc)
+{
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ for (i = 0; i < pool->timing_generator_count; i++) {
+ struct timing_generator *tg = pool->timing_generators[i];
+ struct dcn_otg_state s = {0};
+
+ optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+
+ if (s.otg_enabled & 1)
+ tg->funcs->clear_optc_underflow(tg);
+ }
+}
+
+static void dcn10_clear_hubp_underflow(struct dc *dc)
+{
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct hubp *hubp = pool->hubps[i];
+ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
+
+ hubp->funcs->hubp_read_state(hubp);
+
+ if (!s->blank_en)
+ hubp->funcs->hubp_clear_underflow(hubp);
+ }
+}
+
+void dcn10_clear_status_bits(struct dc *dc, unsigned int mask)
+{
+ /*
+ * Mask Format
+ * Bit 0 - 31: Status bit to clear
+ *
+ * Mask = 0x0 means clear all status bits
+ */
+ const unsigned int DC_HW_STATE_MASK_HUBP_UNDERFLOW = 0x1;
+ const unsigned int DC_HW_STATE_MASK_OTPC_UNDERFLOW = 0x2;
+
+ if (mask == 0x0)
+ mask = 0xFFFFFFFF;
+
+ if (mask & DC_HW_STATE_MASK_HUBP_UNDERFLOW)
+ dcn10_clear_hubp_underflow(dc);
+
+ if (mask & DC_HW_STATE_MASK_OTPC_UNDERFLOW)
+ dcn10_clear_otpc_underflow(dc);
}
void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask)
@@ -491,16 +542,16 @@ void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigne
* Bit 0 - 15: Hardware block mask
* Bit 15: 1 = Invariant Only, 0 = All
*/
- const unsigned int DC_HW_STATE_MASK_HUBBUB = 0x1;
- const unsigned int DC_HW_STATE_MASK_HUBP = 0x2;
- const unsigned int DC_HW_STATE_MASK_RQ = 0x4;
- const unsigned int DC_HW_STATE_MASK_DLG = 0x8;
- const unsigned int DC_HW_STATE_MASK_TTU = 0x10;
- const unsigned int DC_HW_STATE_MASK_CM = 0x20;
- const unsigned int DC_HW_STATE_MASK_MPCC = 0x40;
- const unsigned int DC_HW_STATE_MASK_OTG = 0x80;
- const unsigned int DC_HW_STATE_MASK_CLOCKS = 0x100;
- const unsigned int DC_HW_STATE_INVAR_ONLY = 0x8000;
+ const unsigned int DC_HW_STATE_MASK_HUBBUB = 0x1;
+ const unsigned int DC_HW_STATE_MASK_HUBP = 0x2;
+ const unsigned int DC_HW_STATE_MASK_RQ = 0x4;
+ const unsigned int DC_HW_STATE_MASK_DLG = 0x8;
+ const unsigned int DC_HW_STATE_MASK_TTU = 0x10;
+ const unsigned int DC_HW_STATE_MASK_CM = 0x20;
+ const unsigned int DC_HW_STATE_MASK_MPCC = 0x40;
+ const unsigned int DC_HW_STATE_MASK_OTG = 0x80;
+ const unsigned int DC_HW_STATE_MASK_CLOCKS = 0x100;
+ const unsigned int DC_HW_STATE_INVAR_ONLY = 0x8000;
unsigned int chars_printed = 0;
unsigned int remaining_buf_size = bufSize;
@@ -556,6 +607,9 @@ void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigne
remaining_buf_size -= chars_printed;
}
- if ((mask & DC_HW_STATE_MASK_CLOCKS) && remaining_buf_size > 0)
+ if ((mask & DC_HW_STATE_MASK_CLOCKS) && remaining_buf_size > 0) {
chars_printed = dcn10_get_clock_states(dc, pBuf, remaining_buf_size);
+ pBuf += chars_printed;
+ remaining_buf_size -= chars_printed;
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 7d1f66797cb3..7c138615f17d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -335,9 +335,8 @@ void optc1_program_timing(
/* Enable stereo - only when we need to pack 3D frame. Other types
* of stereo handled in explicit call
*/
- h_div_2 = (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ?
- 1 : 0;
+ h_div_2 = optc1_is_two_pixels_per_containter(&patched_crtc_timing);
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_BY2, h_div_2);
@@ -360,20 +359,19 @@ void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enab
static void optc1_unblank_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t vertical_interrupt_enable = 0;
-
- REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL,
- OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &vertical_interrupt_enable);
-
- /* temporary work around for vertical interrupt, once vertical interrupt enabled,
- * this check will be removed.
- */
- if (vertical_interrupt_enable)
- optc1_set_blank_data_double_buffer(optc, true);
REG_UPDATE_2(OTG_BLANK_CONTROL,
OTG_BLANK_DATA_EN, 0,
OTG_BLANK_DE_MODE, 0);
+
+ /* W/A for automated testing
+ * Automated testing will fail underflow test as there
+ * sporadic underflows which occur during the optc blank
+ * sequence. As a w/a, clear underflow on unblank.
+ * This prevents the failure, but will not mask actual
+ * underflow that affect real use cases.
+ */
+ optc1_clear_optc_underflow(optc);
}
/**
@@ -1422,3 +1420,9 @@ void dcn10_timing_generator_init(struct optc *optc1)
optc1->min_h_sync_width = 8;
optc1->min_v_sync_width = 1;
}
+
+bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
+{
+ return timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index c1b114209fe8..8bacf0b6e27e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -565,4 +565,6 @@ bool optc1_configure_crc(struct timing_generator *optc,
bool optc1_get_crc(struct timing_generator *optc,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
+bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
+
#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 47dbe4bb294a..5d4772dec0ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -202,7 +202,6 @@ enum dcn10_clk_src_array_id {
#define MMHUB_SR(reg_name)\
.reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
-
/* macros to expend register list macro defined in HW object header file
* end *********************/
@@ -436,7 +435,6 @@ static const struct dcn_optc_mask tg_mask = {
TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
};
-
static const struct bios_registers bios_regs = {
NBIO_SR(BIOS_SCRATCH_0),
NBIO_SR(BIOS_SCRATCH_3),
@@ -497,7 +495,6 @@ static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
};
-
static const struct resource_caps res_cap = {
.num_timing_generator = 4,
.num_opp = 4,
@@ -1277,7 +1274,6 @@ static bool construct(
goto fail;
}
}
-
pool->base.clk_mgr = dcn1_clk_mgr_create(ctx);
if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 6f9078f3c4d3..b8b5525a389a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -766,7 +766,6 @@ void enc1_stream_encoder_dp_blank(
struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- uint32_t retries = 0;
uint32_t reg1 = 0;
uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
@@ -803,8 +802,6 @@ void enc1_stream_encoder_dp_blank(
0,
10, max_retries);
- ASSERT(retries <= max_retries);
-
/* Tell the DP encoder to ignore timing from CRTC, must be done after
* the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
* complete, stream status will be stuck in video stream enabled state,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_event_log.h b/drivers/gpu/drm/amd/display/dc/dm_event_log.h
index 34a701ca879e..65663f4d93e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_event_log.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_event_log.h
@@ -33,6 +33,7 @@
#define EVENT_LOG_AUX_REQ(ddc, type, action, address, len, data)
#define EVENT_LOG_AUX_REP(ddc, type, replyStatus, len, data)
+#define EVENT_LOG_CUST_MSG(tag, a, ...)
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index beb08fd12b1d..0029a39efb1c 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -102,7 +102,7 @@ struct pp_smu_funcs_rv {
*/
void (*set_display_count)(struct pp_smu *pp, int count);
- /* which SMU message? are reader and writer WM separate SMU msg? */
+ /* reader and writer WM's are sent together as part of one table*/
/*
* PPSMC_MSG_SetDriverDramAddrHigh
* PPSMC_MSG_SetDriverDramAddrLow
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 28128c02de00..1961cc6d9143 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -31,6 +31,8 @@
#define __DM_SERVICES_H__
+#include "amdgpu_dm_trace.h"
+
/* TODO: remove when DC is complete. */
#include "dm_services_types.h"
#include "logger_interface.h"
@@ -70,6 +72,7 @@ static inline uint32_t dm_read_reg_func(
}
#endif
value = cgs_read_register(ctx->cgs_device, address);
+ trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
return value;
}
@@ -90,6 +93,7 @@ static inline void dm_write_reg_func(
}
#endif
cgs_write_register(ctx->cgs_device, address, value);
+ trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
}
static inline uint32_t dm_read_index_reg(
@@ -351,8 +355,12 @@ unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
/*
* performance tracing
*/
-void dm_perf_trace_timestamp(const char *func_name, unsigned int line);
-#define PERF_TRACE() dm_perf_trace_timestamp(__func__, __LINE__)
+#define PERF_TRACE() trace_amdgpu_dc_performance(CTX->perf_trace->read_count,\
+ CTX->perf_trace->write_count, &CTX->perf_trace->last_entry_read,\
+ &CTX->perf_trace->last_entry_write, __func__, __LINE__)
+#define PERF_TRACE_CTX(__CTX) trace_amdgpu_dc_performance(__CTX->perf_trace->read_count,\
+ __CTX->perf_trace->write_count, &__CTX->perf_trace->last_entry_read,\
+ &__CTX->perf_trace->last_entry_write, __func__, __LINE__)
/*
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index f20161c5706d..dada04296025 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -56,7 +56,6 @@ struct gpio_service *dal_gpio_service_create(
struct dc_context *ctx)
{
struct gpio_service *service;
-
uint32_t index_of_id;
service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL);
@@ -78,44 +77,33 @@ struct gpio_service *dal_gpio_service_create(
goto failure_1;
}
- /* allocate and initialize business storage */
+ /* allocate and initialize busyness storage */
{
- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
index_of_id = 0;
service->ctx = ctx;
do {
uint32_t number_of_bits =
service->factory.number_of_pins[index_of_id];
+ uint32_t i = 0;
- uint32_t number_of_uints =
- (number_of_bits + bits_per_uint - 1) /
- bits_per_uint;
-
- uint32_t *slot;
-
- if (number_of_bits) {
- uint32_t index_of_uint = 0;
+ if (number_of_bits) {
+ service->busyness[index_of_id] =
+ kcalloc(number_of_bits, sizeof(char),
+ GFP_KERNEL);
- slot = kcalloc(number_of_uints,
- sizeof(uint32_t),
- GFP_KERNEL);
-
- if (!slot) {
+ if (!service->busyness[index_of_id]) {
BREAK_TO_DEBUGGER();
goto failure_2;
}
do {
- slot[index_of_uint] = 0;
-
- ++index_of_uint;
- } while (index_of_uint < number_of_uints);
- } else
- slot = NULL;
-
- service->busyness[index_of_id] = slot;
+ service->busyness[index_of_id][i] = 0;
+ ++i;
+ } while (i < number_of_bits);
+ } else {
+ service->busyness[index_of_id] = NULL;
+ }
++index_of_id;
} while (index_of_id < GPIO_ID_COUNT);
@@ -125,13 +113,8 @@ struct gpio_service *dal_gpio_service_create(
failure_2:
while (index_of_id) {
- uint32_t *slot;
-
--index_of_id;
-
- slot = service->busyness[index_of_id];
-
- kfree(slot);
+ kfree(service->busyness[index_of_id]);
}
failure_1:
@@ -169,9 +152,7 @@ void dal_gpio_service_destroy(
uint32_t index_of_id = 0;
do {
- uint32_t *slot = (*ptr)->busyness[index_of_id];
-
- kfree(slot);
+ kfree((*ptr)->busyness[index_of_id]);
++index_of_id;
} while (index_of_id < GPIO_ID_COUNT);
@@ -192,11 +173,7 @@ static bool is_pin_busy(
enum gpio_id id,
uint32_t en)
{
- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
- const uint32_t *slot = service->busyness[id] + (en / bits_per_uint);
-
- return 0 != (*slot & (1 << (en % bits_per_uint)));
+ return service->busyness[id][en];
}
static void set_pin_busy(
@@ -204,10 +181,7 @@ static void set_pin_busy(
enum gpio_id id,
uint32_t en)
{
- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
- service->busyness[id][en / bits_per_uint] |=
- (1 << (en % bits_per_uint));
+ service->busyness[id][en] = true;
}
static void set_pin_free(
@@ -215,10 +189,7 @@ static void set_pin_free(
enum gpio_id id,
uint32_t en)
{
- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
- service->busyness[id][en / bits_per_uint] &=
- ~(1 << (en % bits_per_uint));
+ service->busyness[id][en] = false;
}
enum gpio_result dal_gpio_service_open(
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
index c7f3081f59cc..1d501a43d13b 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
@@ -36,10 +36,9 @@ struct gpio_service {
/*
* @brief
* Business storage.
- * For each member of 'enum gpio_id',
- * store array of bits (packed into uint32_t slots),
- * index individual bit by 'en' value */
- uint32_t *busyness[GPIO_ID_COUNT];
+ * one byte For each member of 'enum gpio_id'
+ */
+ char *busyness[GPIO_ID_COUNT];
};
enum gpio_result dal_gpio_service_open(
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index a683f4102e65..c2028c4744a6 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -79,6 +79,7 @@ bool dal_hw_factory_init(
dal_hw_factory_dce110_init(factory);
return true;
case DCE_VERSION_12_0:
+ case DCE_VERSION_12_1:
dal_hw_factory_dce120_init(factory);
return true;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 096f45628630..236ca28784a9 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -76,6 +76,7 @@ bool dal_hw_translate_init(
dal_hw_translate_dce110_init(translate);
return true;
case DCE_VERSION_12_0:
+ case DCE_VERSION_12_1:
dal_hw_translate_dce120_init(translate);
return true;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
index e56093f26eed..1ad6e49102ff 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
@@ -90,6 +90,7 @@ struct i2caux *dal_i2caux_create(
case DCE_VERSION_10_0:
return dal_i2caux_dce100_create(ctx);
case DCE_VERSION_12_0:
+ case DCE_VERSION_12_1:
return dal_i2caux_dce120_create(ctx);
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case DCN_VERSION_1_0:
diff --git a/drivers/gpu/drm/amd/display/dc/inc/compressor.h b/drivers/gpu/drm/amd/display/dc/inc/compressor.h
index bcb18f5e1e60..7a147a9762a0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/compressor.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/compressor.h
@@ -77,6 +77,7 @@ struct compressor_funcs {
};
struct compressor {
struct dc_context *ctx;
+ /* CONTROLLER_ID_D0 + instance, CONTROLLER_ID_UNDEFINED = 0 */
uint32_t attached_inst;
bool is_enabled;
const struct compressor_funcs *funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index e3ee96afa60e..b168a5e9dd9d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -272,6 +272,17 @@ union bw_context {
struct dce_bw_output dce;
};
+/**
+ * struct dc_state - The full description of a state requested by a user
+ *
+ * @streams: Stream properties
+ * @stream_status: The planes on a given stream
+ * @res_ctx: Persistent state of resources
+ * @bw: The output from bandwidth and watermark calculations
+ * @pp_display_cfg: PowerPlay clocks and settings
+ * @dcn_bw_vars: non-stack memory to support bandwidth calculations
+ *
+ */
struct dc_state {
struct dc_stream_state *streams[MAX_PIPES];
struct dc_stream_status stream_status[MAX_PIPES];
@@ -279,7 +290,6 @@ struct dc_state {
struct resource_context res_ctx;
- /* The output from BW and WM calculations. */
union bw_context bw;
/* Note: these are big structures, do *not* put on stack! */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index 4550747fb61c..cb85eaa9857f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -32,6 +32,13 @@ enum dmcu_state {
DMCU_RUNNING = 1
};
+struct dmcu_version {
+ unsigned int date;
+ unsigned int month;
+ unsigned int year;
+ unsigned int interface_version;
+};
+
struct dmcu {
struct dc_context *ctx;
const struct dmcu_funcs *funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 334c48cdafdc..04c6989aac58 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -63,6 +63,11 @@ struct hubp_funcs {
struct _vcs_dpi_display_rq_regs_st *rq_regs,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+ void (*hubp_setup_interdependent)(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs);
+
void (*dcc_control)(struct hubp *hubp, bool enable,
bool independent_64b_blks);
void (*mem_program_viewport)(
@@ -121,6 +126,7 @@ struct hubp_funcs {
void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
void (*hubp_read_state)(struct hubp *hubp);
+ void (*hubp_clear_underflow)(struct hubp *hubp);
void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp);
unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index e9b702ce02dd..d6a85f48b6d1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -200,6 +200,7 @@ struct hw_sequencer_funcs {
void (*log_hw_state)(struct dc *dc,
struct dc_log_buffer_ctx *log_ctx);
void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask);
+ void (*clear_status_bits)(struct dc *dc, unsigned int mask);
void (*wait_for_mpcc_disconnect)(struct dc *dc,
struct resource_pool *res_pool,
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
index f8dbfa5b89f2..7fd78a696800 100644
--- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
@@ -41,6 +41,7 @@ enum as_signal_type {
AS_SIGNAL_TYPE_LVDS,
AS_SIGNAL_TYPE_DISPLAY_PORT,
AS_SIGNAL_TYPE_GPU_PLL,
+ AS_SIGNAL_TYPE_XGMI,
AS_SIGNAL_TYPE_UNKNOWN
};
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index 89627133e188..f5bd869d4320 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -42,6 +42,7 @@ enum dce_version {
DCE_VERSION_11_2,
DCE_VERSION_11_22,
DCE_VERSION_12_0,
+ DCE_VERSION_12_1,
DCE_VERSION_MAX,
DCN_VERSION_1_0,
#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 7480f072c375..479b77c2e89e 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -813,20 +813,26 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
const struct hw_x_point *coord_x = coordinate_x;
struct fixed31_32 scaledX = dc_fixpt_zero;
struct fixed31_32 scaledX1 = dc_fixpt_zero;
- struct fixed31_32 max_display = dc_fixpt_from_int(fs_params->max_display);
- struct fixed31_32 min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000);
- struct fixed31_32 max_content = dc_fixpt_from_int(fs_params->max_content);
- struct fixed31_32 min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000);
+ struct fixed31_32 max_display;
+ struct fixed31_32 min_display;
+ struct fixed31_32 max_content;
+ struct fixed31_32 min_content;
struct fixed31_32 clip = dc_fixpt_one;
struct fixed31_32 output;
bool use_eetf = false;
bool is_clipped = false;
- struct fixed31_32 sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level);
+ struct fixed31_32 sdr_white_level;
if (fs_params == NULL || fs_params->max_content == 0 ||
fs_params->max_display == 0)
return false;
+ max_display = dc_fixpt_from_int(fs_params->max_display);
+ min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000);
+ max_content = dc_fixpt_from_int(fs_params->max_content);
+ min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000);
+ sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level);
+
if (fs_params->min_display > 1000) // cap at 0.1 at the bottom
min_display = dc_fixpt_from_fraction(1, 10);
if (fs_params->max_display < 100) // cap at 100 at the top
@@ -1755,7 +1761,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
struct pwl_float_data *rgb_user = NULL;
struct pwl_float_data_ex *curve = NULL;
- struct gamma_pixel *axix_x = NULL;
+ struct gamma_pixel *axis_x = NULL;
struct pixel_gamma_point *coeff = NULL;
enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
bool ret = false;
@@ -1781,10 +1787,10 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
GFP_KERNEL);
if (!curve)
goto curve_alloc_fail;
- axix_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axix_x),
+ axis_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axis_x),
GFP_KERNEL);
- if (!axix_x)
- goto axix_x_alloc_fail;
+ if (!axis_x)
+ goto axis_x_alloc_fail;
coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
GFP_KERNEL);
if (!coeff)
@@ -1797,7 +1803,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
tf = input_tf->tf;
build_evenly_distributed_points(
- axix_x,
+ axis_x,
ramp->num_entries,
dividers);
@@ -1822,7 +1828,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
tf_pts->x_point_at_y1_blue = 1;
map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
- coordinates_x, axix_x, curve,
+ coordinates_x, axis_x, curve,
MAX_HW_POINTS, tf_pts,
mapUserRamp && ramp->type != GAMMA_CUSTOM);
if (ramp->type == GAMMA_CUSTOM)
@@ -1832,8 +1838,8 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
kvfree(coeff);
coeff_alloc_fail:
- kvfree(axix_x);
-axix_x_alloc_fail:
+ kvfree(axis_x);
+axis_x_alloc_fail:
kvfree(curve);
curve_alloc_fail:
kvfree(rgb_user);
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 620a171620ee..1544ed3f1747 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -608,12 +608,12 @@ static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
struct dc_info_packet *infopacket)
{
- if (app_tf != transfer_func_unknown) {
+ if (app_tf != TRANSFER_FUNC_UNKNOWN) {
infopacket->valid = true;
infopacket->sb[6] |= 0x08; // PB6 = [Bit 3 = Native Color Active]
- if (app_tf == transfer_func_gamma_22) {
+ if (app_tf == TRANSFER_FUNC_GAMMA_22) {
infopacket->sb[9] |= 0x04; // PB6 = [Bit 2 = Gamma 2.2 EOTF Active]
}
}
@@ -688,11 +688,11 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
return;
switch (packet_type) {
- case packet_type_fs2:
+ case PACKET_TYPE_FS2:
build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
break;
- case packet_type_vrr:
- case packet_type_fs1:
+ case PACKET_TYPE_VRR:
+ case PACKET_TYPE_FS1:
default:
build_vrr_infopacket_v1(stream->signal, vrr, infopacket);
}
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index 786b34380f85..5b1c9a4c7643 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -26,15 +26,13 @@
#ifndef MOD_INFO_PACKET_H_
#define MOD_INFO_PACKET_H_
-struct info_packet_inputs {
- const struct dc_stream_state *pStream;
-};
+#include "mod_shared.h"
-struct info_packets {
- struct dc_info_packet *pVscInfoPacket;
-};
+//Forward Declarations
+struct dc_stream_state;
+struct dc_info_packet;
-void mod_build_infopackets(struct info_packet_inputs *inputs,
- struct info_packets *info_packets);
+void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+ struct dc_info_packet *info_packet);
#endif
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
index 238c431ae483..1bd02c0ac30c 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
@@ -23,27 +23,26 @@
*
*/
-
#ifndef MOD_SHARED_H_
#define MOD_SHARED_H_
enum color_transfer_func {
- transfer_func_unknown,
- transfer_func_srgb,
- transfer_func_bt709,
- transfer_func_pq2084,
- transfer_func_pq2084_interim,
- transfer_func_linear_0_1,
- transfer_func_linear_0_125,
- transfer_func_dolbyvision,
- transfer_func_gamma_22,
- transfer_func_gamma_26
+ TRANSFER_FUNC_UNKNOWN,
+ TRANSFER_FUNC_SRGB,
+ TRANSFER_FUNC_BT709,
+ TRANSFER_FUNC_PQ2084,
+ TRANSFER_FUNC_PQ2084_INTERIM,
+ TRANSFER_FUNC_LINEAR_0_1,
+ TRANSFER_FUNC_LINEAR_0_125,
+ TRANSFER_FUNC_GAMMA_22,
+ TRANSFER_FUNC_GAMMA_26
};
enum vrr_packet_type {
- packet_type_vrr,
- packet_type_fs1,
- packet_type_fs2
+ PACKET_TYPE_VRR,
+ PACKET_TYPE_FS1,
+ PACKET_TYPE_FS2
};
+
#endif /* MOD_SHARED_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index ff8bfb9b43b0..db06fab2ad5c 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -25,6 +25,10 @@
#include "mod_info_packet.h"
#include "core_types.h"
+#include "dc_types.h"
+#include "mod_shared.h"
+
+#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
enum ColorimetryRGBDP {
ColorimetryRGB_DP_sRGB = 0,
@@ -41,7 +45,7 @@ enum ColorimetryYCCDP {
ColorimetryYCC_DP_ITU2020YCbCr = 7,
};
-static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet)
{
unsigned int vscPacketRevision = 0;
@@ -159,7 +163,7 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
* DPCD register is exposed in the new Extended Receiver Capability field for DPCD Rev. 1.4
* (and higher). When MISC1. bit 6. is Set to 1, a Source device uses a VSC SDP to indicate
* the Pixel Encoding/Colorimetry Format and that a Sink device must ignore MISC1, bit 7, and
- * MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become “don’t care”).)
+ * MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become "don't care").)
*/
if (vscPacketRevision == 0x5) {
/* Secondary-data Packet ID = 0 */
@@ -320,10 +324,3 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
-void mod_build_infopackets(struct info_packet_inputs *inputs,
- struct info_packets *info_packets)
-{
- if (info_packets->pVscInfoPacket != NULL)
- mod_build_vsc_infopacket(inputs->pStream, info_packets->pVscInfoPacket);
-}
-
diff --git a/drivers/gpu/drm/amd/display/modules/power/Makefile b/drivers/gpu/drm/amd/display/modules/power/Makefile
new file mode 100644
index 000000000000..87851f892a52
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/power/Makefile
@@ -0,0 +1,31 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'power' sub-module of DAL.
+#
+
+MOD_POWER = power_helpers.o
+
+AMD_DAL_MOD_POWER = $(addprefix $(AMDDALPATH)/modules/power/,$(MOD_POWER))
+#$(info ************ DAL POWER MODULE MAKEFILE ************)
+
+AMD_DISPLAY_FILES += $(AMD_DAL_MOD_POWER) \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
new file mode 100644
index 000000000000..00f63b7dd32f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -0,0 +1,326 @@
+/* Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "power_helpers.h"
+#include "dc/inc/hw/dmcu.h"
+
+#define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b))
+
+/* Possible Min Reduction config from least aggressive to most aggressive
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12
+ * 100 98.0 94.1 94.1 85.1 80.3 75.3 69.4 60.0 57.6 50.2 49.8 40.0 %
+ */
+static const unsigned char min_reduction_table[13] = {
+0xff, 0xfa, 0xf0, 0xf0, 0xd9, 0xcd, 0xc0, 0xb1, 0x99, 0x93, 0x80, 0x82, 0x66};
+
+/* Possible Max Reduction configs from least aggressive to most aggressive
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12
+ * 96.1 89.8 85.1 80.3 69.4 64.7 64.7 50.2 39.6 30.2 30.2 30.2 19.6 %
+ */
+static const unsigned char max_reduction_table[13] = {
+0xf5, 0xe5, 0xd9, 0xcd, 0xb1, 0xa5, 0xa5, 0x80, 0x65, 0x4d, 0x4d, 0x4d, 0x32};
+
+/* Predefined ABM configuration sets. We may have different configuration sets
+ * in order to satisfy different power/quality requirements.
+ */
+static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_level] = {
+/* ABM Level 1, ABM Level 2, ABM Level 3, ABM Level 4 */
+{ 2, 5, 7, 8 }, /* Default - Medium aggressiveness */
+{ 2, 5, 8, 11 }, /* Alt #1 - Increased aggressiveness */
+{ 0, 2, 4, 8 }, /* Alt #2 - Minimal aggressiveness */
+{ 3, 6, 10, 12 }, /* Alt #3 - Super aggressiveness */
+};
+
+#define NUM_AMBI_LEVEL 5
+#define NUM_AGGR_LEVEL 4
+#define NUM_POWER_FN_SEGS 8
+#define NUM_BL_CURVE_SEGS 16
+
+/* NOTE: iRAM is 256B in size */
+struct iram_table_v_2 {
+ /* flags */
+ uint16_t flags; /* 0x00 U16 */
+
+ /* parameters for ABM2.0 algorithm */
+ uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */
+ uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */
+ uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */
+ uint8_t bright_neg_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */
+ uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x52 U2.6 */
+ uint8_t dark_neg_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x66 U2.6 */
+ uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x7a U0.8 */
+ uint8_t deviation_gain; /* 0x7f U0.8 */
+
+ /* parameters for crgb conversion */
+ uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */
+ uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */
+ uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */
+
+ /* parameters for custom curve */
+ /* thresholds for brightness --> backlight */
+ uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */
+ /* offsets for brightness --> backlight */
+ uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */
+
+ /* For reading PSR State directly from IRAM */
+ uint8_t psr_state; /* 0xf0 */
+ uint8_t dmcu_interface_version; /* 0xf1 */
+ uint8_t dmcu_date_version_year_b0; /* 0xf2 */
+ uint8_t dmcu_date_version_year_b1; /* 0xf3 */
+ uint8_t dmcu_date_version_month; /* 0xf4 */
+ uint8_t dmcu_date_version_day; /* 0xf5 */
+ uint8_t dmcu_state; /* 0xf6 */
+
+ uint16_t blRampReduction; /* 0xf7 */
+ uint16_t blRampStart; /* 0xf9 */
+ uint8_t dummy5; /* 0xfb */
+ uint8_t dummy6; /* 0xfc */
+ uint8_t dummy7; /* 0xfd */
+ uint8_t dummy8; /* 0xfe */
+ uint8_t dummy9; /* 0xff */
+};
+
+static uint16_t backlight_8_to_16(unsigned int backlight_8bit)
+{
+ return (uint16_t)(backlight_8bit * 0x101);
+}
+
+static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
+ struct iram_table_v_2 *table)
+{
+ unsigned int i;
+ unsigned int num_entries = NUM_BL_CURVE_SEGS;
+ unsigned int query_input_8bit;
+ unsigned int query_output_8bit;
+ unsigned int lut_index;
+
+ table->backlight_thresholds[0] = 0;
+ table->backlight_offsets[0] = params.backlight_lut_array[0];
+ table->backlight_thresholds[num_entries-1] = 0xFFFF;
+ table->backlight_offsets[num_entries-1] =
+ params.backlight_lut_array[params.backlight_lut_array_size - 1];
+
+ /* Setup all brightness levels between 0% and 100% exclusive
+ * Fills brightness-to-backlight transform table. Backlight custom curve
+ * describes transform from brightness to backlight. It will be defined
+ * as set of thresholds and set of offsets, together, implying
+ * extrapolation of custom curve into 16 uniformly spanned linear
+ * segments. Each threshold/offset represented by 16 bit entry in
+ * format U4.10.
+ */
+ for (i = 1; i+1 < num_entries; i++) {
+ query_input_8bit = DIV_ROUNDUP((i * 256), num_entries);
+
+ lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
+ ASSERT(lut_index < params.backlight_lut_array_size);
+ query_output_8bit = params.backlight_lut_array[lut_index] >> 8;
+
+ table->backlight_thresholds[i] =
+ backlight_8_to_16(query_input_8bit);
+ table->backlight_offsets[i] =
+ backlight_8_to_16(query_output_8bit);
+ }
+}
+
+bool dmcu_load_iram(struct dmcu *dmcu,
+ struct dmcu_iram_parameters params)
+{
+ struct iram_table_v_2 ram_table;
+ unsigned int set = params.set;
+
+ if (dmcu == NULL)
+ return false;
+
+ if (!dmcu->funcs->is_dmcu_initialized(dmcu))
+ return true;
+
+ memset(&ram_table, 0, sizeof(ram_table));
+
+ ram_table.flags = 0x0;
+ ram_table.deviation_gain = 0xb3;
+
+ ram_table.blRampReduction =
+ cpu_to_be16(params.backlight_ramping_reduction);
+ ram_table.blRampStart =
+ cpu_to_be16(params.backlight_ramping_start);
+
+ ram_table.min_reduction[0][0] = min_reduction_table[abm_config[set][0]];
+ ram_table.min_reduction[1][0] = min_reduction_table[abm_config[set][0]];
+ ram_table.min_reduction[2][0] = min_reduction_table[abm_config[set][0]];
+ ram_table.min_reduction[3][0] = min_reduction_table[abm_config[set][0]];
+ ram_table.min_reduction[4][0] = min_reduction_table[abm_config[set][0]];
+ ram_table.max_reduction[0][0] = max_reduction_table[abm_config[set][0]];
+ ram_table.max_reduction[1][0] = max_reduction_table[abm_config[set][0]];
+ ram_table.max_reduction[2][0] = max_reduction_table[abm_config[set][0]];
+ ram_table.max_reduction[3][0] = max_reduction_table[abm_config[set][0]];
+ ram_table.max_reduction[4][0] = max_reduction_table[abm_config[set][0]];
+
+ ram_table.min_reduction[0][1] = min_reduction_table[abm_config[set][1]];
+ ram_table.min_reduction[1][1] = min_reduction_table[abm_config[set][1]];
+ ram_table.min_reduction[2][1] = min_reduction_table[abm_config[set][1]];
+ ram_table.min_reduction[3][1] = min_reduction_table[abm_config[set][1]];
+ ram_table.min_reduction[4][1] = min_reduction_table[abm_config[set][1]];
+ ram_table.max_reduction[0][1] = max_reduction_table[abm_config[set][1]];
+ ram_table.max_reduction[1][1] = max_reduction_table[abm_config[set][1]];
+ ram_table.max_reduction[2][1] = max_reduction_table[abm_config[set][1]];
+ ram_table.max_reduction[3][1] = max_reduction_table[abm_config[set][1]];
+ ram_table.max_reduction[4][1] = max_reduction_table[abm_config[set][1]];
+
+ ram_table.min_reduction[0][2] = min_reduction_table[abm_config[set][2]];
+ ram_table.min_reduction[1][2] = min_reduction_table[abm_config[set][2]];
+ ram_table.min_reduction[2][2] = min_reduction_table[abm_config[set][2]];
+ ram_table.min_reduction[3][2] = min_reduction_table[abm_config[set][2]];
+ ram_table.min_reduction[4][2] = min_reduction_table[abm_config[set][2]];
+ ram_table.max_reduction[0][2] = max_reduction_table[abm_config[set][2]];
+ ram_table.max_reduction[1][2] = max_reduction_table[abm_config[set][2]];
+ ram_table.max_reduction[2][2] = max_reduction_table[abm_config[set][2]];
+ ram_table.max_reduction[3][2] = max_reduction_table[abm_config[set][2]];
+ ram_table.max_reduction[4][2] = max_reduction_table[abm_config[set][2]];
+
+ ram_table.min_reduction[0][3] = min_reduction_table[abm_config[set][3]];
+ ram_table.min_reduction[1][3] = min_reduction_table[abm_config[set][3]];
+ ram_table.min_reduction[2][3] = min_reduction_table[abm_config[set][3]];
+ ram_table.min_reduction[3][3] = min_reduction_table[abm_config[set][3]];
+ ram_table.min_reduction[4][3] = min_reduction_table[abm_config[set][3]];
+ ram_table.max_reduction[0][3] = max_reduction_table[abm_config[set][3]];
+ ram_table.max_reduction[1][3] = max_reduction_table[abm_config[set][3]];
+ ram_table.max_reduction[2][3] = max_reduction_table[abm_config[set][3]];
+ ram_table.max_reduction[3][3] = max_reduction_table[abm_config[set][3]];
+ ram_table.max_reduction[4][3] = max_reduction_table[abm_config[set][3]];
+
+ ram_table.bright_pos_gain[0][0] = 0x20;
+ ram_table.bright_pos_gain[0][1] = 0x20;
+ ram_table.bright_pos_gain[0][2] = 0x20;
+ ram_table.bright_pos_gain[0][3] = 0x20;
+ ram_table.bright_pos_gain[1][0] = 0x20;
+ ram_table.bright_pos_gain[1][1] = 0x20;
+ ram_table.bright_pos_gain[1][2] = 0x20;
+ ram_table.bright_pos_gain[1][3] = 0x20;
+ ram_table.bright_pos_gain[2][0] = 0x20;
+ ram_table.bright_pos_gain[2][1] = 0x20;
+ ram_table.bright_pos_gain[2][2] = 0x20;
+ ram_table.bright_pos_gain[2][3] = 0x20;
+ ram_table.bright_pos_gain[3][0] = 0x20;
+ ram_table.bright_pos_gain[3][1] = 0x20;
+ ram_table.bright_pos_gain[3][2] = 0x20;
+ ram_table.bright_pos_gain[3][3] = 0x20;
+ ram_table.bright_pos_gain[4][0] = 0x20;
+ ram_table.bright_pos_gain[4][1] = 0x20;
+ ram_table.bright_pos_gain[4][2] = 0x20;
+ ram_table.bright_pos_gain[4][3] = 0x20;
+ ram_table.bright_neg_gain[0][1] = 0x00;
+ ram_table.bright_neg_gain[0][2] = 0x00;
+ ram_table.bright_neg_gain[0][3] = 0x00;
+ ram_table.bright_neg_gain[1][0] = 0x00;
+ ram_table.bright_neg_gain[1][1] = 0x00;
+ ram_table.bright_neg_gain[1][2] = 0x00;
+ ram_table.bright_neg_gain[1][3] = 0x00;
+ ram_table.bright_neg_gain[2][0] = 0x00;
+ ram_table.bright_neg_gain[2][1] = 0x00;
+ ram_table.bright_neg_gain[2][2] = 0x00;
+ ram_table.bright_neg_gain[2][3] = 0x00;
+ ram_table.bright_neg_gain[3][0] = 0x00;
+ ram_table.bright_neg_gain[3][1] = 0x00;
+ ram_table.bright_neg_gain[3][2] = 0x00;
+ ram_table.bright_neg_gain[3][3] = 0x00;
+ ram_table.bright_neg_gain[4][0] = 0x00;
+ ram_table.bright_neg_gain[4][1] = 0x00;
+ ram_table.bright_neg_gain[4][2] = 0x00;
+ ram_table.bright_neg_gain[4][3] = 0x00;
+ ram_table.dark_pos_gain[0][0] = 0x00;
+ ram_table.dark_pos_gain[0][1] = 0x00;
+ ram_table.dark_pos_gain[0][2] = 0x00;
+ ram_table.dark_pos_gain[0][3] = 0x00;
+ ram_table.dark_pos_gain[1][0] = 0x00;
+ ram_table.dark_pos_gain[1][1] = 0x00;
+ ram_table.dark_pos_gain[1][2] = 0x00;
+ ram_table.dark_pos_gain[1][3] = 0x00;
+ ram_table.dark_pos_gain[2][0] = 0x00;
+ ram_table.dark_pos_gain[2][1] = 0x00;
+ ram_table.dark_pos_gain[2][2] = 0x00;
+ ram_table.dark_pos_gain[2][3] = 0x00;
+ ram_table.dark_pos_gain[3][0] = 0x00;
+ ram_table.dark_pos_gain[3][1] = 0x00;
+ ram_table.dark_pos_gain[3][2] = 0x00;
+ ram_table.dark_pos_gain[3][3] = 0x00;
+ ram_table.dark_pos_gain[4][0] = 0x00;
+ ram_table.dark_pos_gain[4][1] = 0x00;
+ ram_table.dark_pos_gain[4][2] = 0x00;
+ ram_table.dark_pos_gain[4][3] = 0x00;
+ ram_table.dark_neg_gain[0][0] = 0x00;
+ ram_table.dark_neg_gain[0][1] = 0x00;
+ ram_table.dark_neg_gain[0][2] = 0x00;
+ ram_table.dark_neg_gain[0][3] = 0x00;
+ ram_table.dark_neg_gain[1][0] = 0x00;
+ ram_table.dark_neg_gain[1][1] = 0x00;
+ ram_table.dark_neg_gain[1][2] = 0x00;
+ ram_table.dark_neg_gain[1][3] = 0x00;
+ ram_table.dark_neg_gain[2][0] = 0x00;
+ ram_table.dark_neg_gain[2][1] = 0x00;
+ ram_table.dark_neg_gain[2][2] = 0x00;
+ ram_table.dark_neg_gain[2][3] = 0x00;
+ ram_table.dark_neg_gain[3][0] = 0x00;
+ ram_table.dark_neg_gain[3][1] = 0x00;
+ ram_table.dark_neg_gain[3][2] = 0x00;
+ ram_table.dark_neg_gain[3][3] = 0x00;
+ ram_table.dark_neg_gain[4][0] = 0x00;
+ ram_table.dark_neg_gain[4][1] = 0x00;
+ ram_table.dark_neg_gain[4][2] = 0x00;
+ ram_table.dark_neg_gain[4][3] = 0x00;
+ ram_table.iir_curve[0] = 0x65;
+ ram_table.iir_curve[1] = 0x65;
+ ram_table.iir_curve[2] = 0x65;
+ ram_table.iir_curve[3] = 0x65;
+ ram_table.iir_curve[4] = 0x65;
+ ram_table.crgb_thresh[0] = cpu_to_be16(0x13b6);
+ ram_table.crgb_thresh[1] = cpu_to_be16(0x1648);
+ ram_table.crgb_thresh[2] = cpu_to_be16(0x18e3);
+ ram_table.crgb_thresh[3] = cpu_to_be16(0x1b41);
+ ram_table.crgb_thresh[4] = cpu_to_be16(0x1d46);
+ ram_table.crgb_thresh[5] = cpu_to_be16(0x1f21);
+ ram_table.crgb_thresh[6] = cpu_to_be16(0x2167);
+ ram_table.crgb_thresh[7] = cpu_to_be16(0x2384);
+ ram_table.crgb_offset[0] = cpu_to_be16(0x2999);
+ ram_table.crgb_offset[1] = cpu_to_be16(0x3999);
+ ram_table.crgb_offset[2] = cpu_to_be16(0x4666);
+ ram_table.crgb_offset[3] = cpu_to_be16(0x5999);
+ ram_table.crgb_offset[4] = cpu_to_be16(0x6333);
+ ram_table.crgb_offset[5] = cpu_to_be16(0x7800);
+ ram_table.crgb_offset[6] = cpu_to_be16(0x8c00);
+ ram_table.crgb_offset[7] = cpu_to_be16(0xa000);
+ ram_table.crgb_slope[0] = cpu_to_be16(0x3147);
+ ram_table.crgb_slope[1] = cpu_to_be16(0x2978);
+ ram_table.crgb_slope[2] = cpu_to_be16(0x23a2);
+ ram_table.crgb_slope[3] = cpu_to_be16(0x1f55);
+ ram_table.crgb_slope[4] = cpu_to_be16(0x1c63);
+ ram_table.crgb_slope[5] = cpu_to_be16(0x1a0f);
+ ram_table.crgb_slope[6] = cpu_to_be16(0x178d);
+ ram_table.crgb_slope[7] = cpu_to_be16(0x15ab);
+
+ fill_backlight_transform_table(
+ params, &ram_table);
+
+ return dmcu->funcs->load_iram(
+ dmcu, 0, (char *)(&ram_table), sizeof(ram_table));
+}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
new file mode 100644
index 000000000000..da5df00fedce
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -0,0 +1,47 @@
+/* Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MODULES_POWER_POWER_HELPERS_H_
+#define MODULES_POWER_POWER_HELPERS_H_
+
+#include "dc/inc/hw/dmcu.h"
+
+
+enum abm_defines {
+ abm_defines_max_level = 4,
+ abm_defines_max_config = 4,
+};
+
+struct dmcu_iram_parameters {
+ unsigned int *backlight_lut_array;
+ unsigned int backlight_lut_array_size;
+ unsigned int backlight_ramping_reduction;
+ unsigned int backlight_ramping_start;
+ unsigned int set;
+};
+
+bool dmcu_load_iram(struct dmcu *dmcu,
+ struct dmcu_iram_parameters params);
+
+#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/include/amd_acpi.h b/drivers/gpu/drm/amd/include/amd_acpi.h
index 9b9699fc433f..c72cbfe8f684 100644
--- a/drivers/gpu/drm/amd/include/amd_acpi.h
+++ b/drivers/gpu/drm/amd/include/amd_acpi.h
@@ -52,6 +52,30 @@ struct atif_sbios_requests {
u8 backlight_level; /* panel backlight level (0-255) */
} __packed;
+struct atif_qbtc_arguments {
+ u16 size; /* structure size in bytes (includes size field) */
+ u8 requested_display; /* which display is requested */
+} __packed;
+
+#define ATIF_QBTC_MAX_DATA_POINTS 99
+
+struct atif_qbtc_data_point {
+ u8 luminance; /* luminance in percent */
+ u8 ipnut_signal; /* input signal in range 0-255 */
+} __packed;
+
+struct atif_qbtc_output {
+ u16 size; /* structure size in bytes (includes size field) */
+ u16 flags; /* all zeroes */
+ u8 error_code; /* error code */
+ u8 ac_level; /* default brightness on AC power */
+ u8 dc_level; /* default brightness on DC power */
+ u8 min_input_signal; /* max input signal in range 0-255 */
+ u8 max_input_signal; /* min input signal in range 0-255 */
+ u8 number_of_points; /* number of data points */
+ struct atif_qbtc_data_point data_points[ATIF_QBTC_MAX_DATA_POINTS];
+} __packed;
+
#define ATIF_NOTIFY_MASK 0x3
#define ATIF_NOTIFY_NONE 0
#define ATIF_NOTIFY_81 1
@@ -126,26 +150,18 @@ struct atcs_pref_req_output {
* DWORD - supported functions bit vector
*/
/* Notifications mask */
-# define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED (1 << 0)
-# define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED (1 << 1)
# define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED (1 << 2)
# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED (1 << 3)
# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED (1 << 4)
-# define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED (1 << 5)
-# define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED (1 << 6)
# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED (1 << 7)
# define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED (1 << 8)
+# define ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST_SUPPORTED (1 << 12)
/* supported functions vector */
# define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED (1 << 0)
# define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED (1 << 1)
-# define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED (1 << 2)
-# define ATIF_GET_LID_STATE_SUPPORTED (1 << 3)
-# define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED (1 << 4)
-# define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED (1 << 5)
-# define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED (1 << 6)
-# define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED (1 << 7)
# define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED (1 << 12)
-# define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED (1 << 14)
+# define ATIF_QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS_SUPPORTED (1 << 15)
+# define ATIF_READY_TO_UNDOCK_NOTIFICATION_SUPPORTED (1 << 16)
# define ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED (1 << 20)
#define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS 0x1
/* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS
@@ -170,6 +186,10 @@ struct atcs_pref_req_output {
* n (0xd0-0xd9) is specified in notify command code.
* bit 2:
* 1 - lid changes not reported though int10
+ * bit 3:
+ * 1 - system bios controls overclocking
+ * bit 4:
+ * 1 - enable overclocking
*/
#define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS 0x2
/* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS
@@ -177,28 +197,23 @@ struct atcs_pref_req_output {
* OUTPUT:
* WORD - structure size in bytes (includes size field)
* DWORD - pending sbios requests
- * BYTE - panel expansion mode
+ * BYTE - reserved (all zeroes)
* BYTE - thermal state: target gfx controller
* BYTE - thermal state: state id (0: exit state, non-0: state)
* BYTE - forced power state: target gfx controller
- * BYTE - forced power state: state id
+ * BYTE - forced power state: state id (0: forced state, non-0: state)
* BYTE - system power source
* BYTE - panel backlight level (0-255)
+ * BYTE - GPU package power limit: target gfx controller
+ * DWORD - GPU package power limit: value (24:8 fractional format, Watts)
*/
/* pending sbios requests */
-# define ATIF_DISPLAY_SWITCH_REQUEST (1 << 0)
-# define ATIF_EXPANSION_MODE_CHANGE_REQUEST (1 << 1)
# define ATIF_THERMAL_STATE_CHANGE_REQUEST (1 << 2)
# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST (1 << 3)
# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST (1 << 4)
-# define ATIF_DISPLAY_CONF_CHANGE_REQUEST (1 << 5)
-# define ATIF_PX_GFX_SWITCH_REQUEST (1 << 6)
# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST (1 << 7)
# define ATIF_DGPU_DISPLAY_EVENT (1 << 8)
-/* panel expansion mode */
-# define ATIF_PANEL_EXPANSION_DISABLE 0
-# define ATIF_PANEL_EXPANSION_FULL 1
-# define ATIF_PANEL_EXPANSION_ASPECT 2
+# define ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST (1 << 12)
/* target gfx controller */
# define ATIF_TARGET_GFX_SINGLE 0
# define ATIF_TARGET_GFX_PX_IGPU 1
@@ -208,76 +223,6 @@ struct atcs_pref_req_output {
# define ATIF_POWER_SOURCE_DC 2
# define ATIF_POWER_SOURCE_RESTRICTED_AC_1 3
# define ATIF_POWER_SOURCE_RESTRICTED_AC_2 4
-#define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS 0x3
-/* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS
- * ARG1:
- * WORD - structure size in bytes (includes size field)
- * WORD - selected displays
- * WORD - connected displays
- * OUTPUT:
- * WORD - structure size in bytes (includes size field)
- * WORD - selected displays
- */
-# define ATIF_LCD1 (1 << 0)
-# define ATIF_CRT1 (1 << 1)
-# define ATIF_TV (1 << 2)
-# define ATIF_DFP1 (1 << 3)
-# define ATIF_CRT2 (1 << 4)
-# define ATIF_LCD2 (1 << 5)
-# define ATIF_DFP2 (1 << 7)
-# define ATIF_CV (1 << 8)
-# define ATIF_DFP3 (1 << 9)
-# define ATIF_DFP4 (1 << 10)
-# define ATIF_DFP5 (1 << 11)
-# define ATIF_DFP6 (1 << 12)
-#define ATIF_FUNCTION_GET_LID_STATE 0x4
-/* ARG0: ATIF_FUNCTION_GET_LID_STATE
- * ARG1: none
- * OUTPUT:
- * WORD - structure size in bytes (includes size field)
- * BYTE - lid state (0: open, 1: closed)
- *
- * GET_LID_STATE only works at boot and resume, for general lid
- * status, use the kernel provided status
- */
-#define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS 0x5
-/* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS
- * ARG1: none
- * OUTPUT:
- * WORD - structure size in bytes (includes size field)
- * BYTE - 0
- * BYTE - TV standard
- */
-# define ATIF_TV_STD_NTSC 0
-# define ATIF_TV_STD_PAL 1
-# define ATIF_TV_STD_PALM 2
-# define ATIF_TV_STD_PAL60 3
-# define ATIF_TV_STD_NTSCJ 4
-# define ATIF_TV_STD_PALCN 5
-# define ATIF_TV_STD_PALN 6
-# define ATIF_TV_STD_SCART_RGB 9
-#define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS 0x6
-/* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS
- * ARG1:
- * WORD - structure size in bytes (includes size field)
- * BYTE - 0
- * BYTE - TV standard
- * OUTPUT: none
- */
-#define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS 0x7
-/* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS
- * ARG1: none
- * OUTPUT:
- * WORD - structure size in bytes (includes size field)
- * BYTE - panel expansion mode
- */
-#define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS 0x8
-/* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS
- * ARG1:
- * WORD - structure size in bytes (includes size field)
- * BYTE - panel expansion mode
- * OUTPUT: none
- */
#define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION 0xD
/* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION
* ARG1:
@@ -286,21 +231,43 @@ struct atcs_pref_req_output {
* BYTE - current temperature (degress Celsius)
* OUTPUT: none
*/
-#define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES 0xF
-/* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES
- * ARG1: none
+#define ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS 0x10
+/* ARG0: ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * BYTE - requested display
* OUTPUT:
- * WORD - number of gfx devices
- * WORD - device structure size in bytes (excludes device size field)
- * DWORD - flags \
- * WORD - bus number } repeated structure
- * WORD - device number /
+ * WORD - structure size in bytes (includes size field)
+ * WORD - flags (currently all 16 bits are reserved)
+ * BYTE - error code (on failure, disregard all below fields)
+ * BYTE - AC level (default brightness in percent when machine has full power)
+ * BYTE - DC level (default brightness in percent when machine is on battery)
+ * BYTE - min input signal, in range 0-255, corresponding to 0% backlight
+ * BYTE - max input signal, in range 0-255, corresponding to 100% backlight
+ * BYTE - number of reported data points
+ * BYTE - luminance level in percent \ repeated structure
+ * BYTE - input signal in range 0-255 / does not have entries for 0% and 100%
+ */
+/* requested display */
+# define ATIF_QBTC_REQUEST_LCD1 0
+# define ATIF_QBTC_REQUEST_CRT1 1
+# define ATIF_QBTC_REQUEST_DFP1 3
+# define ATIF_QBTC_REQUEST_CRT2 4
+# define ATIF_QBTC_REQUEST_LCD2 5
+# define ATIF_QBTC_REQUEST_DFP2 7
+# define ATIF_QBTC_REQUEST_DFP3 9
+# define ATIF_QBTC_REQUEST_DFP4 10
+# define ATIF_QBTC_REQUEST_DFP5 11
+# define ATIF_QBTC_REQUEST_DFP6 12
+/* error code */
+# define ATIF_QBTC_ERROR_CODE_SUCCESS 0
+# define ATIF_QBTC_ERROR_CODE_FAILURE 1
+# define ATIF_QBTC_ERROR_CODE_DEVICE_NOT_SUPPORTED 2
+#define ATIF_FUNCTION_READY_TO_UNDOCK_NOTIFICATION 0x11
+/* ARG0: ATIF_FUNCTION_READY_TO_UNDOCK_NOTIFICATION
+ * ARG1: none
+ * OUTPUT: none
*/
-/* flags */
-# define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE (1 << 0)
-# define ATIF_XGP_PORT (1 << 1)
-# define ATIF_VGA_ENABLED_GRAPHICS_DEVICE (1 << 2)
-# define ATIF_XGP_PORT_IN_DOCK (1 << 3)
#define ATIF_FUNCTION_GET_EXTERNAL_GPU_INFORMATION 0x15
/* ARG0: ATIF_FUNCTION_GET_EXTERNAL_GPU_INFORMATION
* ARG1: none
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h
new file mode 100644
index 000000000000..8f515875a34d
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mmhub_9_4_0_OFFSET_HEADER
+#define _mmhub_9_4_0_OFFSET_HEADER
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+// base address: 0x6a040
+#define mmMC_VM_XGMI_LFB_CNTL 0x0823
+#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX 0
+#define mmMC_VM_XGMI_LFB_SIZE 0x0824
+#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h
new file mode 100644
index 000000000000..0a6b072d191e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mmhub_9_4_0_SH_MASK_HEADER
+#define _mmhub_9_4_0_SH_MASK_HEADER
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+//MC_VM_XGMI_LFB_CNTL
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000070L
+//MC_VM_XGMI_LFB_SIZE
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 58ac0b90c310..8154d67388cc 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -188,8 +188,8 @@ struct tile_config {
*/
#define ALLOC_MEM_FLAGS_VRAM (1 << 0)
#define ALLOC_MEM_FLAGS_GTT (1 << 1)
-#define ALLOC_MEM_FLAGS_USERPTR (1 << 2) /* TODO */
-#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3) /* TODO */
+#define ALLOC_MEM_FLAGS_USERPTR (1 << 2)
+#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
/*
* Allocation flags attributes/access options.
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 980e696989b1..1479ea1dc3e7 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -276,6 +276,10 @@ struct amd_pm_funcs {
struct amd_pp_simple_clock_info *clocks);
int (*notify_smu_enable_pwe)(void *handle);
int (*enable_mgpu_fan_boost)(void *handle);
+ int (*set_active_display_count)(void *handle, uint32_t count);
+ int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock);
+ int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock);
+ int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock);
};
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index d6aa1d414320..9bc27f468d5b 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -300,7 +300,7 @@ static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
return -EINVAL;
if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
@@ -387,7 +387,7 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
return 0;
if (hwmgr->hwmgr_func->get_sclk == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -405,7 +405,7 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
return 0;
if (hwmgr->hwmgr_func->get_mclk == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -422,7 +422,7 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
return;
if (hwmgr->hwmgr_func->powergate_vce == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
mutex_lock(&hwmgr->smu_lock);
@@ -438,7 +438,7 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate)
return;
if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
mutex_lock(&hwmgr->smu_lock);
@@ -505,7 +505,7 @@ static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
return;
if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
mutex_lock(&hwmgr->smu_lock);
@@ -522,7 +522,7 @@ static uint32_t pp_dpm_get_fan_control_mode(void *handle)
return 0;
if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -540,7 +540,7 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
return -EINVAL;
if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -558,7 +558,7 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
return -EINVAL;
if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
@@ -594,7 +594,7 @@ static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
return -EINVAL;
if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -720,12 +720,12 @@ static int pp_dpm_force_clock_level(void *handle,
return -EINVAL;
if (hwmgr->hwmgr_func->force_clock_level == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
- pr_info("force clock level is for dpm manual mode only.\n");
+ pr_debug("force clock level is for dpm manual mode only.\n");
return -EINVAL;
}
@@ -745,7 +745,7 @@ static int pp_dpm_print_clock_levels(void *handle,
return -EINVAL;
if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -763,7 +763,7 @@ static int pp_dpm_get_sclk_od(void *handle)
return -EINVAL;
if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -781,7 +781,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
return -EINVAL;
if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
@@ -800,7 +800,7 @@ static int pp_dpm_get_mclk_od(void *handle)
return -EINVAL;
if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -818,7 +818,7 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
return -EINVAL;
if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -878,7 +878,7 @@ static int pp_get_power_profile_mode(void *handle, char *buf)
return -EINVAL;
if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return snprintf(buf, PAGE_SIZE, "\n");
}
@@ -894,12 +894,12 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
return ret;
if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return ret;
}
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
- pr_info("power profile setting is for manual dpm mode only.\n");
+ pr_debug("power profile setting is for manual dpm mode only.\n");
return ret;
}
@@ -917,7 +917,7 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
return -EINVAL;
if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;
}
@@ -935,7 +935,7 @@ static int pp_dpm_switch_power_profile(void *handle,
return -EINVAL;
if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;
}
@@ -972,7 +972,7 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
return -EINVAL;
if (hwmgr->hwmgr_func->set_power_limit == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;
}
@@ -1072,7 +1072,7 @@ static int pp_get_current_clocks(void *handle,
&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
if (ret) {
- pr_info("Error in phm_get_clock_info \n");
+ pr_debug("Error in phm_get_clock_info \n");
mutex_unlock(&hwmgr->smu_lock);
return -EINVAL;
}
@@ -1212,7 +1212,7 @@ static int pp_dpm_powergate_mmhub(void *handle)
return -EINVAL;
if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
@@ -1227,7 +1227,7 @@ static int pp_dpm_powergate_gfx(void *handle, bool gate)
return 0;
if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
@@ -1242,7 +1242,7 @@ static void pp_dpm_powergate_acp(void *handle, bool gate)
return;
if (hwmgr->hwmgr_func->powergate_acp == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
@@ -1257,7 +1257,7 @@ static void pp_dpm_powergate_sdma(void *handle, bool gate)
return;
if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
@@ -1303,7 +1303,7 @@ static int pp_notify_smu_enable_pwe(void *handle)
return -EINVAL;
if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;;
}
@@ -1332,6 +1332,78 @@ static int pp_enable_mgpu_fan_boost(void *handle)
return 0;
}
+static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
+ pr_debug("%s was not implemented.\n", __func__);
+ return -EINVAL;;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
+static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
+ pr_debug("%s was not implemented.\n", __func__);
+ return -EINVAL;;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
+static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
+ pr_debug("%s was not implemented.\n", __func__);
+ return -EINVAL;;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
+static int pp_set_active_display_count(void *handle, uint32_t count)
+{
+ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ mutex_lock(&hwmgr->smu_lock);
+ ret = phm_set_active_display_count(hwmgr, count);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
+}
+
static const struct amd_pm_funcs pp_dpm_funcs = {
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1378,4 +1450,8 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
.notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
.enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
+ .set_active_display_count = pp_set_active_display_count,
+ .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
+ .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
+ .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 85119c2bdcc8..1f92a9f4c9e3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -80,7 +80,9 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
PHM_FUNC_CHECK(hwmgr);
adev = hwmgr->adev;
- if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)) {
+ /* Skip for suspend/resume case */
+ if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)
+ && adev->in_suspend) {
pr_info("dpm has been enabled\n");
return 0;
}
@@ -286,8 +288,8 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
if (display_config == NULL)
return -EINVAL;
- if (NULL != hwmgr->hwmgr_func->set_deep_sleep_dcefclk)
- hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
+ if (NULL != hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk)
+ hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
for (index = 0; index < display_config->num_path_including_non_display; index++) {
if (display_config->displays[index].controller_id != 0)
@@ -478,3 +480,44 @@ int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr)
return hwmgr->hwmgr_func->disable_smc_firmware_ctf(hwmgr);
}
+
+int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (!hwmgr->hwmgr_func->set_active_display_count)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->set_active_display_count(hwmgr, count);
+}
+
+int phm_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (!hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
+}
+
+int phm_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (!hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
+}
+
+int phm_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (!hwmgr->hwmgr_func->set_hard_min_fclk_by_freq)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 47ac92369739..0173d0480024 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -352,6 +352,9 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
+ ret = phm_pre_display_configuration_changed(hwmgr);
+ if (ret)
+ return ret;
ret = phm_set_cpu_power_state(hwmgr);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index 91ffb7bc4ee7..56437866d120 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -265,8 +265,6 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
if (skip)
return 0;
- phm_pre_display_configuration_changed(hwmgr);
-
phm_display_configuration_changed(hwmgr);
if (hwmgr->ps)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index dd18cb710391..f95c5f50eb0f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -216,12 +216,12 @@ static inline uint32_t convert_10k_to_mhz(uint32_t clock)
return (clock + 99) / 100;
}
-static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
+static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (smu10_data->need_min_deep_sleep_dcefclk &&
- smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) {
+ smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) {
smu10_data->deep_sleep_dcefclk = convert_10k_to_mhz(clock);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
@@ -230,6 +230,34 @@ static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
return 0;
}
+static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ if (smu10_data->dcf_actual_hard_min_freq &&
+ smu10_data->dcf_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
+ smu10_data->dcf_actual_hard_min_freq = convert_10k_to_mhz(clock);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinDcefclkByFreq,
+ smu10_data->dcf_actual_hard_min_freq);
+ }
+ return 0;
+}
+
+static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ if (smu10_data->f_actual_hard_min_freq &&
+ smu10_data->f_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
+ smu10_data->f_actual_hard_min_freq = convert_10k_to_mhz(clock);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ smu10_data->f_actual_hard_min_freq);
+ }
+ return 0;
+}
+
static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
@@ -1206,7 +1234,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.get_max_high_clocks = smu10_get_max_high_clocks,
.read_sensor = smu10_read_sensor,
.set_active_display_count = smu10_set_active_display_count,
- .set_deep_sleep_dcefclk = smu10_set_deep_sleep_dcefclk,
+ .set_min_deep_sleep_dcefclk = smu10_set_min_deep_sleep_dcefclk,
.dynamic_state_management_enable = smu10_enable_dpm_tasks,
.power_off_asic = smu10_power_off_asic,
.asic_setup = smu10_setup_asic_task,
@@ -1217,6 +1245,8 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.display_clock_voltage_request = smu10_display_clock_voltage_request,
.powergate_gfx = smu10_gfx_off_control,
.powergate_sdma = smu10_powergate_sdma,
+ .set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq,
+ .set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq,
};
int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 88f6b35ea6fe..d91390459326 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -269,7 +269,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
hwmgr->dyn_state.mvdd_dependency_on_mclk);
PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 MVDD table from dependancy table.",
+ "Failed to retrieve SVI2 MVDD table from dependency table.",
return result;);
}
@@ -288,7 +288,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
hwmgr->dyn_state.vddci_dependency_on_mclk);
PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDCI table from dependancy table.",
+ "Failed to retrieve SVI2 VDDCI table from dependency table.",
return result);
}
@@ -317,7 +317,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
table_info->vddc_lookup_table);
PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
+ "Failed to retrieve SVI2 VDDC table from dependency table.", return result;);
}
tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
@@ -2859,7 +2859,10 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
- switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
+ if (hwmgr->is_kicker)
+ switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
+ else
+ switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
break;
case CHIP_VEGAM:
switch_limit_us = 30;
@@ -3589,8 +3592,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
}
if (i >= sclk_table->count) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- sclk_table->dpm_levels[i-1].value = sclk;
+ if (sclk > sclk_table->dpm_levels[i-1].value) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ sclk_table->dpm_levels[i-1].value = sclk;
+ }
} else {
/* TODO: Check SCLK in DAL's minimum clocks
* in case DeepSleep divider update is required.
@@ -3607,8 +3612,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
}
if (i >= mclk_table->count) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
- mclk_table->dpm_levels[i-1].value = mclk;
+ if (mclk > mclk_table->dpm_levels[i-1].value) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+ mclk_table->dpm_levels[i-1].value = mclk;
+ }
}
if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
@@ -4219,9 +4226,17 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
if (tmp & (1 << 23)) {
data->mem_latency_high = MEM_LATENCY_HIGH;
data->mem_latency_low = MEM_LATENCY_LOW;
+ if ((hwmgr->chip_id == CHIP_POLARIS10) ||
+ (hwmgr->chip_id == CHIP_POLARIS11) ||
+ (hwmgr->chip_id == CHIP_POLARIS12))
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC);
} else {
data->mem_latency_high = 330;
data->mem_latency_low = 330;
+ if ((hwmgr->chip_id == CHIP_POLARIS10) ||
+ (hwmgr->chip_id == CHIP_POLARIS11) ||
+ (hwmgr->chip_id == CHIP_POLARIS12))
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC);
}
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index fef111ddb736..553a203ac47c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -1228,17 +1228,14 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
- if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
- smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
+ if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
- }
return 0;
}
static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
- smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
return smum_send_msg_to_smc_with_parameter(
hwmgr,
PPSMC_MSG_UVDPowerON,
@@ -1995,6 +1992,7 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
.power_state_set = smu8_set_power_state_tasks,
.dynamic_state_management_disable = smu8_disable_dpm_tasks,
.notify_cac_buffer_info = smu8_notify_cac_buffer_info,
+ .update_nbdpm_pstate = smu8_nbdpm_pstate_enable_disable,
.get_thermal_temperature_range = smu8_get_thermal_temperature_range,
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index e2bc6e0c229f..79c86247d0ac 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -3266,8 +3266,10 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
}
if (i >= sclk_table->count) {
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- sclk_table->dpm_levels[i-1].value = sclk;
+ if (sclk > sclk_table->dpm_levels[i-1].value) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ sclk_table->dpm_levels[i-1].value = sclk;
+ }
}
for (i = 0; i < mclk_table->count; i++) {
@@ -3276,8 +3278,10 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
}
if (i >= mclk_table->count) {
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
- mclk_table->dpm_levels[i-1].value = mclk;
+ if (mclk > mclk_table->dpm_levels[i-1].value) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+ mclk_table->dpm_levels[i-1].value = mclk;
+ }
}
if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 2679d1240fa1..26154f9b2178 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -49,6 +49,10 @@
#include "soc15_common.h"
#include "smuio/smuio_9_0_offset.h"
#include "smuio/smuio_9_0_sh_mask.h"
+#include "nbio/nbio_7_4_sh_mask.h"
+
+#define smnPCIE_LC_SPEED_CNTL 0x11140290
+#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
{
@@ -130,7 +134,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.disable_auto_wattman = 1;
data->registry_data.auto_wattman_debug = 0;
data->registry_data.auto_wattman_sample_period = 100;
- data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD;
+ data->registry_data.fclk_gfxclk_ratio = 0;
data->registry_data.auto_wattman_threshold = 50;
data->registry_data.gfxoff_controlled_by_driver = 1;
data->gfxoff_allowed = false;
@@ -1660,14 +1664,15 @@ static uint32_t vega20_find_highest_dpm_level(
return i;
}
-static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
+static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
uint32_t min_freq;
int ret = 0;
- if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
+ (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
@@ -1676,7 +1681,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_UCLK].enabled &&
+ (feature_mask & FEATURE_DPM_UCLK_MASK)) {
min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
@@ -1692,7 +1698,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ if (data->smu_features[GNLD_DPM_UVD].enabled &&
+ (feature_mask & FEATURE_DPM_UVD_MASK)) {
min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1710,7 +1717,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ if (data->smu_features[GNLD_DPM_VCE].enabled &&
+ (feature_mask & FEATURE_DPM_VCE_MASK)) {
min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1720,7 +1728,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
+ (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1733,14 +1742,15 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret;
}
-static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
+static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
uint32_t max_freq;
int ret = 0;
- if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
+ (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1750,7 +1760,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_UCLK].enabled &&
+ (feature_mask & FEATURE_DPM_UCLK_MASK)) {
max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1760,7 +1771,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ if (data->smu_features[GNLD_DPM_UVD].enabled &&
+ (feature_mask & FEATURE_DPM_UVD_MASK)) {
max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1777,7 +1789,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ if (data->smu_features[GNLD_DPM_VCE].enabled &&
+ (feature_mask & FEATURE_DPM_VCE_MASK)) {
max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1787,7 +1800,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
+ (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -2126,12 +2140,12 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to highest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@@ -2158,12 +2172,12 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to highest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@@ -2176,12 +2190,12 @@ static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
int ret = 0;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Bootup Levels!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Max Levels!",
return ret);
@@ -2239,12 +2253,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
data->dpm_table.gfx_table.dpm_state.soft_max_level =
data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@@ -2259,12 +2273,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@@ -2272,6 +2286,18 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
break;
case PP_PCIE:
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+ if (soft_min_level >= NUM_LINK_LEVELS ||
+ soft_max_level >= NUM_LINK_LEVELS)
+ return -EINVAL;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to set min link dpm level!",
+ return ret);
+
break;
default:
@@ -2748,9 +2774,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
data->od8_settings.od8_settings_array;
OverDriveTable_t *od_table =
&(data->smc_state_table.overdrive_table);
+ struct phm_ppt_v3_information *pptable_information =
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
+ PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable;
+ struct amdgpu_device *adev = hwmgr->adev;
struct pp_clock_levels_with_latency clocks;
int i, now, size = 0;
int ret = 0;
+ uint32_t gen_speed, lane_width;
switch (type) {
case PP_SCLK:
@@ -2788,6 +2819,28 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case PP_PCIE:
+ gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+ >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+ lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+ for (i = 0; i < NUM_LINK_LEVELS; i++)
+ size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+ (pptable->PcieGenSpeed[i] == 0) ? "2.5GT/s," :
+ (pptable->PcieGenSpeed[i] == 1) ? "5.0GT/s," :
+ (pptable->PcieGenSpeed[i] == 2) ? "8.0GT/s," :
+ (pptable->PcieGenSpeed[i] == 3) ? "16.0GT/s," : "",
+ (pptable->PcieLaneCount[i] == 1) ? "x1" :
+ (pptable->PcieLaneCount[i] == 2) ? "x2" :
+ (pptable->PcieLaneCount[i] == 3) ? "x4" :
+ (pptable->PcieLaneCount[i] == 4) ? "x8" :
+ (pptable->PcieLaneCount[i] == 5) ? "x12" :
+ (pptable->PcieLaneCount[i] == 6) ? "x16" : "",
+ pptable->LclkFreq[i],
+ (gen_speed == pptable->PcieGenSpeed[i]) &&
+ (lane_width == pptable->PcieLaneCount[i]) ?
+ "*" : "");
break;
case OD_SCLK:
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 54fd0125d9cf..f4dab979a3a1 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -463,5 +463,8 @@ extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
extern int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr);
+
+extern int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count);
+
#endif /* _HARDWARE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 07d180ce4d18..0d298a0409f5 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -309,7 +309,7 @@ struct pp_hwmgr_func {
int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable);
int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr);
int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count);
- int (*set_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock);
+ int (*set_min_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock);
int (*start_thermal_controller)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range);
int (*notify_cac_buffer_info)(struct pp_hwmgr *hwmgr,
uint32_t virtual_addr_low,
@@ -317,6 +317,9 @@ struct pp_hwmgr_func {
uint32_t mc_addr_low,
uint32_t mc_addr_hi,
uint32_t size);
+ int (*update_nbdpm_pstate)(struct pp_hwmgr *hwmgr,
+ bool enable,
+ bool lock);
int (*get_thermal_temperature_range)(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range);
int (*get_power_profile_mode)(struct pp_hwmgr *hwmgr, char *buf);
@@ -329,6 +332,8 @@ struct pp_hwmgr_func {
int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
int (*powergate_sdma)(struct pp_hwmgr *hwmgr, bool bgate);
int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr);
+ int (*set_hard_min_dcefclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
+ int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
};
struct pp_table_func {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
index 62f36ba2435b..6e19f4c7cf8f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
@@ -386,6 +386,8 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403)
#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404)
+#define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415)
+
#define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280)
#define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281)
#define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282)
@@ -395,6 +397,9 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306)
+#define PPSMC_MSG_EnableFFC ((uint16_t) 0x307)
+#define PPSMC_MSG_DisableFFC ((uint16_t) 0x308)
+
#define PPSMC_MSG_EnableDpmDidt ((uint16_t) 0x309)
#define PPSMC_MSG_DisableDpmDidt ((uint16_t) 0x30A)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 2b2c26616902..52abca065764 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -1528,8 +1528,21 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
efuse = efuse >> 24;
if (hwmgr->chip_id == CHIP_POLARIS10) {
- min = 1000;
- max = 2300;
+ if (hwmgr->is_kicker) {
+ min = 1200;
+ max = 2500;
+ } else {
+ min = 1000;
+ max = 2300;
+ }
+ } else if (hwmgr->chip_id == CHIP_POLARIS11) {
+ if (hwmgr->is_kicker) {
+ min = 900;
+ max = 2100;
+ } else {
+ min = 1100;
+ max = 2100;
+ }
} else {
min = 1100;
max = 2100;
@@ -1626,6 +1639,7 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
int result = 0;
@@ -1646,6 +1660,59 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
if (0 == result) {
+ if (((adev->pdev->device == 0x67ef) &&
+ ((adev->pdev->revision == 0xe0) ||
+ (adev->pdev->revision == 0xe5))) ||
+ ((adev->pdev->device == 0x67ff) &&
+ ((adev->pdev->revision == 0xcf) ||
+ (adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff)))) {
+ avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
+ if ((adev->pdev->device == 0x67ef && adev->pdev->revision == 0xe5) ||
+ (adev->pdev->device == 0x67ff && adev->pdev->revision == 0xef)) {
+ if ((avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 == 0xEA522DD3) &&
+ (avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 == 0x5645A) &&
+ (avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 == 0x33F9E) &&
+ (avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 == 0xFFFFC5CC) &&
+ (avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 == 0x1B1A) &&
+ (avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b == 0xFFFFFCED)) {
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 = 0xF718F1D4;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 = 0x323FD;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 = 0x1E455;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0;
+ avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b = 0x23;
+ }
+ }
+ } else if (hwmgr->chip_id == CHIP_POLARIS12 && !hwmgr->is_kicker) {
+ avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 = 0xF6B024DD;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 = 0x3005E;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 = 0x18A5F;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0x315;
+ avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0xFED1;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b = 0x3B;
+ } else if (((adev->pdev->device == 0x67df) &&
+ ((adev->pdev->revision == 0xe0) ||
+ (adev->pdev->revision == 0xe3) ||
+ (adev->pdev->revision == 0xe4) ||
+ (adev->pdev->revision == 0xe5) ||
+ (adev->pdev->revision == 0xe7) ||
+ (adev->pdev->revision == 0xef))) ||
+ ((adev->pdev->device == 0x6fdf) &&
+ ((adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff)))) {
+ avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 = 0xF843B66B;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 = 0x59CB5;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 = 0xFFFF287F;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0;
+ avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0xFF23;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b = 0x58;
+ }
+ }
+
+ if (0 == result) {
table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
@@ -1984,6 +2051,12 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+ /* Apply avfs cks-off voltages to avoid the overshoot
+ * when switching to the highest sclk frequency
+ */
+ if (data->apply_avfs_cks_off_voltage)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
+
return 0;
}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 892c1d9304bb..642d0e70d0f8 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -334,7 +334,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
ptr = (char __user *)(uintptr_t)args->ptr;
- if (!access_ok(VERIFY_READ, ptr, args->size))
+ if (!access_ok(ptr, args->size))
return -EFAULT;
ret = fault_in_pages_readable(ptr, args->size);
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index a80bca1a857f..c2e41369adcf 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -261,6 +261,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
{
struct ast_framebuffer *afb = &afbdev->afb;
+ drm_crtc_force_disable_all(dev);
drm_fb_helper_unregister_fbi(&afbdev->helper);
if (afb->obj) {
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index dac355812adc..373700c05a00 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -583,7 +583,8 @@ void ast_driver_unload(struct drm_device *dev)
drm_mode_config_cleanup(dev);
ast_mm_fini(ast);
- pci_iounmap(dev->pdev, ast->ioregs);
+ if (ast->ioregs != ast->regs + AST_IO_MM_OFFSET)
+ pci_iounmap(dev->pdev, ast->ioregs);
pci_iounmap(dev->pdev, ast->regs);
kfree(ast);
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 7c6ac3cadb6b..8bb355d5d43d 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -973,9 +973,21 @@ static int get_clock(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_private *ast = i2c->dev->dev_private;
- uint32_t val;
+ uint32_t val, val2, count, pass;
+
+ count = 0;
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+ do {
+ val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+ if (val == val2) {
+ pass++;
+ } else {
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+ }
+ } while ((pass < 5) && (count++ < 0x10000));
- val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
return val & 1 ? 1 : 0;
}
@@ -983,9 +995,21 @@ static int get_data(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_private *ast = i2c->dev->dev_private;
- uint32_t val;
+ uint32_t val, val2, count, pass;
+
+ count = 0;
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+ do {
+ val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+ if (val == val2) {
+ pass++;
+ } else {
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+ }
+ } while ((pass < 5) && (count++ < 0x10000));
- val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
return val & 1 ? 1 : 0;
}
@@ -998,7 +1022,7 @@ static void set_clock(void *i2c_priv, int clock)
for (i = 0; i < 0x10000; i++) {
ujcrb7 = ((clock & 0x01) ? 0 : 1);
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
if (ujcrb7 == jtemp)
break;
@@ -1014,7 +1038,7 @@ static void set_data(void *i2c_priv, int data)
for (i = 0; i < 0x10000; i++) {
ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
if (ujcrb7 == jtemp)
break;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 680566d97adc..10243965ee7c 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -54,7 +54,7 @@
#define SN_AUX_ADDR_7_0_REG 0x76
#define SN_AUX_LENGTH_REG 0x77
#define SN_AUX_CMD_REG 0x78
-#define AUX_CMD_SEND BIT(1)
+#define AUX_CMD_SEND BIT(0)
#define AUX_CMD_REQ(x) ((x) << 4)
#define SN_AUX_RDATA_REG(x) (0x79 + (x))
#define SN_SSC_CONFIG_REG 0x93
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 008224f376fe..5eb40130fafb 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -531,6 +531,8 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
struct drm_crtc *crtc = new_plane_state->crtc;
const struct drm_framebuffer *fb = new_plane_state->fb;
unsigned int fb_width, fb_height;
+ struct drm_mode_rect *clips;
+ uint32_t num_clips;
int ret;
/* either *both* CRTC and FB must be set, or neither */
@@ -604,6 +606,26 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
return -ENOSPC;
}
+ clips = drm_plane_get_damage_clips(new_plane_state);
+ num_clips = drm_plane_get_damage_clips_count(new_plane_state);
+
+ /* Make sure damage clips are valid and inside the fb. */
+ while (num_clips > 0) {
+ if (clips->x1 >= clips->x2 ||
+ clips->y1 >= clips->y2 ||
+ clips->x1 < 0 ||
+ clips->y1 < 0 ||
+ clips->x2 > fb_width ||
+ clips->y2 > fb_height) {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid damage clip %d %d %d %d\n",
+ plane->base.id, plane->name, clips->x1,
+ clips->y1, clips->x2, clips->y2);
+ return -EINVAL;
+ }
+ clips++;
+ num_clips--;
+ }
+
if (plane_switching_crtc(old_plane_state, new_plane_state)) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
plane->base.id, plane->name);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 8150fa8387d5..54e2ae614dcc 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -32,6 +32,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_writeback.h>
+#include <drm/drm_damage_helper.h>
#include <linux/dma-fence.h>
#include "drm_crtc_helper_internal.h"
@@ -862,6 +863,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
+ drm_atomic_helper_check_plane_damage(state, new_plane_state);
+
if (!funcs || !funcs->atomic_check)
continue;
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 60bd7d708e35..4985384e51f6 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -241,6 +241,7 @@ void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
state->fence = NULL;
state->commit = NULL;
+ state->fb_damage_clips = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
@@ -285,6 +286,8 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
if (state->commit)
drm_crtc_commit_put(state->commit);
+
+ drm_property_blob_put(state->fb_damage_clips);
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 86ac33922b09..c40889888a16 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -433,6 +433,8 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
drm_property_blob_put(mode);
return ret;
+ } else if (property == config->prop_vrr_enabled) {
+ state->vrr_enabled = val;
} else if (property == config->degamma_lut_property) {
ret = drm_atomic_replace_property_blob_from_id(dev,
&state->degamma_lut,
@@ -491,6 +493,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
*val = state->active;
else if (property == config->prop_mode_id)
*val = (state->mode_blob) ? state->mode_blob->base.id : 0;
+ else if (property == config->prop_vrr_enabled)
+ *val = state->vrr_enabled;
else if (property == config->degamma_lut_property)
*val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
else if (property == config->ctm_property)
@@ -513,6 +517,8 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
{
struct drm_device *dev = plane->dev;
struct drm_mode_config *config = &dev->mode_config;
+ bool replaced = false;
+ int ret;
if (property == config->prop_fb_id) {
struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
@@ -566,6 +572,14 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
state->color_encoding = val;
} else if (property == plane->color_range_property) {
state->color_range = val;
+ } else if (property == config->prop_fb_damage_clips) {
+ ret = drm_atomic_replace_property_blob_from_id(dev,
+ &state->fb_damage_clips,
+ val,
+ -1,
+ sizeof(struct drm_rect),
+ &replaced);
+ return ret;
} else if (plane->funcs->atomic_set_property) {
return plane->funcs->atomic_set_property(plane, state,
property, val);
@@ -621,6 +635,9 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->color_encoding;
} else if (property == plane->color_range_property) {
*val = state->color_range;
+ } else if (property == config->prop_fb_damage_clips) {
+ *val = (state->fb_damage_clips) ?
+ state->fb_damage_clips->base.id : 0;
} else if (plane->funcs->atomic_get_property) {
return plane->funcs->atomic_get_property(plane, state, property, val);
} else {
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index d9c0f7573905..1669c42c40ed 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -142,6 +142,7 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
lockdep_assert_held_once(&dev->master_mutex);
+ WARN_ON(fpriv->is_master);
old_master = fpriv->master;
fpriv->master = drm_master_create(dev);
if (!fpriv->master) {
@@ -170,6 +171,7 @@ out_err:
/* drop references and restore old master on failure */
drm_master_put(&fpriv->master);
fpriv->master = old_master;
+ fpriv->is_master = 0;
return ret;
}
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 66b2fd20369a..847539645558 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1326,6 +1326,105 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
/**
+ * DOC: Variable refresh properties
+ *
+ * Variable refresh rate capable displays can dynamically adjust their
+ * refresh rate by extending the duration of their vertical front porch
+ * until page flip or timeout occurs. This can reduce or remove stuttering
+ * and latency in scenarios where the page flip does not align with the
+ * vblank interval.
+ *
+ * An example scenario would be an application flipping at a constant rate
+ * of 48Hz on a 60Hz display. The page flip will frequently miss the vblank
+ * interval and the same contents will be displayed twice. This can be
+ * observed as stuttering for content with motion.
+ *
+ * If variable refresh rate was active on a display that supported a
+ * variable refresh range from 35Hz to 60Hz no stuttering would be observable
+ * for the example scenario. The minimum supported variable refresh rate of
+ * 35Hz is below the page flip frequency and the vertical front porch can
+ * be extended until the page flip occurs. The vblank interval will be
+ * directly aligned to the page flip rate.
+ *
+ * Not all userspace content is suitable for use with variable refresh rate.
+ * Large and frequent changes in vertical front porch duration may worsen
+ * perceived stuttering for input sensitive applications.
+ *
+ * Panel brightness will also vary with vertical front porch duration. Some
+ * panels may have noticeable differences in brightness between the minimum
+ * vertical front porch duration and the maximum vertical front porch duration.
+ * Large and frequent changes in vertical front porch duration may produce
+ * observable flickering for such panels.
+ *
+ * Userspace control for variable refresh rate is supported via properties
+ * on the &drm_connector and &drm_crtc objects.
+ *
+ * "vrr_capable":
+ * Optional &drm_connector boolean property that drivers should attach
+ * with drm_connector_attach_vrr_capable_property() on connectors that
+ * could support variable refresh rates. Drivers should update the
+ * property value by calling drm_connector_set_vrr_capable_property().
+ *
+ * Absence of the property should indicate absence of support.
+ *
+ * "vrr_enabled":
+ * Default &drm_crtc boolean property that notifies the driver that the
+ * content on the CRTC is suitable for variable refresh rate presentation.
+ * The driver will take this property as a hint to enable variable
+ * refresh rate support if the receiver supports it, ie. if the
+ * "vrr_capable" property is true on the &drm_connector object. The
+ * vertical front porch duration will be extended until page-flip or
+ * timeout when enabled.
+ *
+ * The minimum vertical front porch duration is defined as the vertical
+ * front porch duration for the current mode.
+ *
+ * The maximum vertical front porch duration is greater than or equal to
+ * the minimum vertical front porch duration. The duration is derived
+ * from the minimum supported variable refresh rate for the connector.
+ *
+ * The driver may place further restrictions within these minimum
+ * and maximum bounds.
+ *
+ * The semantics for the vertical blank timestamp differ when
+ * variable refresh rate is active. The vertical blank timestamp
+ * is defined to be an estimate using the current mode's fixed
+ * refresh rate timings. The semantics for the page-flip event
+ * timestamp remain the same.
+ */
+
+/**
+ * drm_connector_attach_vrr_capable_property - creates the
+ * vrr_capable property
+ * @connector: connector to create the vrr_capable property on.
+ *
+ * This is used by atomic drivers to add support for querying
+ * variable refresh rate capability for a connector.
+ *
+ * Returns:
+ * Zero on success, negative errono on failure.
+ */
+int drm_connector_attach_vrr_capable_property(
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_property *prop;
+
+ if (!connector->vrr_capable_property) {
+ prop = drm_property_create_bool(dev, DRM_MODE_PROP_IMMUTABLE,
+ "vrr_capable");
+ if (!prop)
+ return -ENOMEM;
+
+ connector->vrr_capable_property = prop;
+ drm_object_attach_property(&connector->base, prop, 0);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_vrr_capable_property);
+
+/**
* drm_connector_attach_scaling_mode_property - attach atomic scaling mode property
* @connector: connector to attach scaling mode property on.
* @scaling_mode_mask: or'ed mask of BIT(%DRM_MODE_SCALE_\*).
@@ -1688,6 +1787,24 @@ int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
EXPORT_SYMBOL(drm_connector_attach_max_bpc_property);
/**
+ * drm_connector_set_vrr_capable_property - sets the variable refresh rate
+ * capable property for a connector
+ * @connector: drm connector
+ * @capable: True if the connector is variable refresh rate capable
+ *
+ * Should be used by atomic drivers to update the indicated support for
+ * variable refresh rate over a connector.
+ */
+void drm_connector_set_vrr_capable_property(
+ struct drm_connector *connector, bool capable)
+{
+ drm_object_property_set_value(&connector->base,
+ connector->vrr_capable_property,
+ capable);
+}
+EXPORT_SYMBOL(drm_connector_set_vrr_capable_property);
+
+/**
* drm_connector_init_panel_orientation_property -
* initialize the connecters panel_orientation property
* @connector: connector for which to init the panel-orientation property.
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index e224b9b7d17a..f660819d406e 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -330,6 +330,8 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
drm_object_attach_property(&crtc->base, config->prop_mode_id, 0);
drm_object_attach_property(&crtc->base,
config->prop_out_fence_ptr, 0);
+ drm_object_attach_property(&crtc->base,
+ config->prop_vrr_enabled, 0);
}
return 0;
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
new file mode 100644
index 000000000000..31032407254d
--- /dev/null
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright (c) 2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Deepak Rawat <drawat@vmware.com>
+ * Rob Clark <robdclark@gmail.com>
+ *
+ **************************************************************************/
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_damage_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * FB_DAMAGE_CLIPS is an optional plane property which provides a means to
+ * specify a list of damage rectangles on a plane in framebuffer coordinates of
+ * the framebuffer attached to the plane. In current context damage is the area
+ * of plane framebuffer that has changed since last plane update (also called
+ * page-flip), irrespective of whether currently attached framebuffer is same as
+ * framebuffer attached during last plane update or not.
+ *
+ * FB_DAMAGE_CLIPS is a hint to kernel which could be helpful for some drivers
+ * to optimize internally especially for virtual devices where each framebuffer
+ * change needs to be transmitted over network, usb, etc.
+ *
+ * Since FB_DAMAGE_CLIPS is a hint so it is an optional property. User-space can
+ * ignore damage clips property and in that case driver will do a full plane
+ * update. In case damage clips are provided then it is guaranteed that the area
+ * inside damage clips will be updated to plane. For efficiency driver can do
+ * full update or can update more than specified in damage clips. Since driver
+ * is free to read more, user-space must always render the entire visible
+ * framebuffer. Otherwise there can be corruptions. Also, if a user-space
+ * provides damage clips which doesn't encompass the actual damage to
+ * framebuffer (since last plane update) can result in incorrect rendering.
+ *
+ * FB_DAMAGE_CLIPS is a blob property with the layout of blob data is simply an
+ * array of &drm_mode_rect. Unlike plane &drm_plane_state.src coordinates,
+ * damage clips are not in 16.16 fixed point. Similar to plane src in
+ * framebuffer, damage clips cannot be negative. In damage clip, x1/y1 are
+ * inclusive and x2/y2 are exclusive. While kernel does not error for overlapped
+ * damage clips, it is strongly discouraged.
+ *
+ * Drivers that are interested in damage interface for plane should enable
+ * FB_DAMAGE_CLIPS property by calling drm_plane_enable_fb_damage_clips().
+ * Drivers implementing damage can use drm_atomic_helper_damage_iter_init() and
+ * drm_atomic_helper_damage_iter_next() helper iterator function to get damage
+ * rectangles clipped to &drm_plane_state.src.
+ */
+
+static void convert_clip_rect_to_rect(const struct drm_clip_rect *src,
+ struct drm_mode_rect *dest,
+ uint32_t num_clips, uint32_t src_inc)
+{
+ while (num_clips > 0) {
+ dest->x1 = src->x1;
+ dest->y1 = src->y1;
+ dest->x2 = src->x2;
+ dest->y2 = src->y2;
+ src += src_inc;
+ dest++;
+ num_clips--;
+ }
+}
+
+/**
+ * drm_plane_enable_fb_damage_clips - Enables plane fb damage clips property.
+ * @plane: Plane on which to enable damage clips property.
+ *
+ * This function lets driver to enable the damage clips property on a plane.
+ */
+void drm_plane_enable_fb_damage_clips(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ drm_object_attach_property(&plane->base, config->prop_fb_damage_clips,
+ 0);
+}
+EXPORT_SYMBOL(drm_plane_enable_fb_damage_clips);
+
+/**
+ * drm_atomic_helper_check_plane_damage - Verify plane damage on atomic_check.
+ * @state: The driver state object.
+ * @plane_state: Plane state for which to verify damage.
+ *
+ * This helper function makes sure that damage from plane state is discarded
+ * for full modeset. If there are more reasons a driver would want to do a full
+ * plane update rather than processing individual damage regions, then those
+ * cases should be taken care of here.
+ *
+ * Note that &drm_plane_state.fb_damage_clips == NULL in plane state means that
+ * full plane update should happen. It also ensure helper iterator will return
+ * &drm_plane_state.src as damage.
+ */
+void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_crtc_state *crtc_state;
+
+ if (plane_state->crtc) {
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ plane_state->crtc);
+
+ if (WARN_ON(!crtc_state))
+ return;
+
+ if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ drm_property_blob_put(plane_state->fb_damage_clips);
+ plane_state->fb_damage_clips = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_check_plane_damage);
+
+/**
+ * drm_atomic_helper_dirtyfb - Helper for dirtyfb.
+ * @fb: DRM framebuffer.
+ * @file_priv: Drm file for the ioctl call.
+ * @flags: Dirty fb annotate flags.
+ * @color: Color for annotate fill.
+ * @clips: Dirty region.
+ * @num_clips: Count of clip in clips.
+ *
+ * A helper to implement &drm_framebuffer_funcs.dirty using damage interface
+ * during plane update. If num_clips is 0 then this helper will do a full plane
+ * update. This is the same behaviour expected by DIRTFB IOCTL.
+ *
+ * Note that this helper is blocking implementation. This is what current
+ * drivers and userspace expect in their DIRTYFB IOCTL implementation, as a way
+ * to rate-limit userspace and make sure its rendering doesn't get ahead of
+ * uploading new data too much.
+ *
+ * Return: Zero on success, negative errno on failure.
+ */
+int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned int flags,
+ unsigned int color, struct drm_clip_rect *clips,
+ unsigned int num_clips)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_property_blob *damage = NULL;
+ struct drm_mode_rect *rects = NULL;
+ struct drm_atomic_state *state;
+ struct drm_plane *plane;
+ int ret = 0;
+
+ /*
+ * When called from ioctl, we are interruptable, but not when called
+ * internally (ie. defio worker)
+ */
+ drm_modeset_acquire_init(&ctx,
+ file_priv ? DRM_MODESET_ACQUIRE_INTERRUPTIBLE : 0);
+
+ state = drm_atomic_state_alloc(fb->dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out_drop_locks;
+ }
+ state->acquire_ctx = &ctx;
+
+ if (clips) {
+ uint32_t inc = 1;
+
+ if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+ inc = 2;
+ num_clips /= 2;
+ }
+
+ rects = kcalloc(num_clips, sizeof(*rects), GFP_KERNEL);
+ if (!rects) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ convert_clip_rect_to_rect(clips, rects, num_clips, inc);
+ damage = drm_property_create_blob(fb->dev,
+ num_clips * sizeof(*rects),
+ rects);
+ if (IS_ERR(damage)) {
+ ret = PTR_ERR(damage);
+ damage = NULL;
+ goto out;
+ }
+ }
+
+retry:
+ drm_for_each_plane(plane, fb->dev) {
+ struct drm_plane_state *plane_state;
+
+ if (plane->state->fb != fb)
+ continue;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto out;
+ }
+
+ drm_property_replace_blob(&plane_state->fb_damage_clips,
+ damage);
+ }
+
+ ret = drm_atomic_commit(state);
+
+out:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
+
+ drm_property_blob_put(damage);
+ kfree(rects);
+ drm_atomic_state_put(state);
+
+out_drop_locks:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+
+}
+EXPORT_SYMBOL(drm_atomic_helper_dirtyfb);
+
+/**
+ * drm_atomic_helper_damage_iter_init - Initialize the damage iterator.
+ * @iter: The iterator to initialize.
+ * @old_state: Old plane state for validation.
+ * @state: Plane state from which to iterate the damage clips.
+ *
+ * Initialize an iterator, which clips plane damage
+ * &drm_plane_state.fb_damage_clips to plane &drm_plane_state.src. This iterator
+ * returns full plane src in case damage is not present because either
+ * user-space didn't sent or driver discarded it (it want to do full plane
+ * update). Currently this iterator returns full plane src in case plane src
+ * changed but that can be changed in future to return damage.
+ *
+ * For the case when plane is not visible or plane update should not happen the
+ * first call to iter_next will return false. Note that this helper use clipped
+ * &drm_plane_state.src, so driver calling this helper should have called
+ * drm_atomic_helper_check_plane_state() earlier.
+ */
+void
+drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
+ const struct drm_plane_state *old_state,
+ const struct drm_plane_state *state)
+{
+ memset(iter, 0, sizeof(*iter));
+
+ if (!state || !state->crtc || !state->fb || !state->visible)
+ return;
+
+ iter->clips = drm_helper_get_plane_damage_clips(state);
+ iter->num_clips = drm_plane_get_damage_clips_count(state);
+
+ /* Round down for x1/y1 and round up for x2/y2 to catch all pixels */
+ iter->plane_src.x1 = state->src.x1 >> 16;
+ iter->plane_src.y1 = state->src.y1 >> 16;
+ iter->plane_src.x2 = (state->src.x2 >> 16) + !!(state->src.x2 & 0xFFFF);
+ iter->plane_src.y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF);
+
+ if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) {
+ iter->clips = 0;
+ iter->num_clips = 0;
+ iter->full_update = true;
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_damage_iter_init);
+
+/**
+ * drm_atomic_helper_damage_iter_next - Advance the damage iterator.
+ * @iter: The iterator to advance.
+ * @rect: Return a rectangle in fb coordinate clipped to plane src.
+ *
+ * Since plane src is in 16.16 fixed point and damage clips are whole number,
+ * this iterator round off clips that intersect with plane src. Round down for
+ * x1/y1 and round up for x2/y2 for the intersected coordinate. Similar rounding
+ * off for full plane src, in case it's returned as damage. This iterator will
+ * skip damage clips outside of plane src.
+ *
+ * Return: True if the output is valid, false if reached the end.
+ *
+ * If the first call to iterator next returns false then it means no need to
+ * update the plane.
+ */
+bool
+drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
+ struct drm_rect *rect)
+{
+ bool ret = false;
+
+ if (iter->full_update) {
+ *rect = iter->plane_src;
+ iter->full_update = false;
+ return true;
+ }
+
+ while (iter->curr_clip < iter->num_clips) {
+ *rect = iter->clips[iter->curr_clip];
+ iter->curr_clip++;
+
+ if (drm_rect_intersect(rect, &iter->plane_src)) {
+ ret = true;
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_damage_iter_next);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 6d483487f2b4..2d6c491a0542 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1428,17 +1428,19 @@ u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
}
EXPORT_SYMBOL(drm_dp_dsc_sink_line_buf_depth);
-u8 drm_dp_dsc_sink_max_color_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ u8 dsc_bpc[3])
{
+ int num_bpc = 0;
u8 color_depth = dsc_dpcd[DP_DSC_DEC_COLOR_DEPTH_CAP - DP_DSC_SUPPORT];
if (color_depth & DP_DSC_12_BPC)
- return 12;
+ dsc_bpc[num_bpc++] = 12;
if (color_depth & DP_DSC_10_BPC)
- return 10;
+ dsc_bpc[num_bpc++] = 10;
if (color_depth & DP_DSC_8_BPC)
- return 8;
+ dsc_bpc[num_bpc++] = 8;
- return 0;
+ return num_bpc;
}
-EXPORT_SYMBOL(drm_dp_dsc_sink_max_color_depth);
+EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs);
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
new file mode 100644
index 000000000000..bc2b23adb072
--- /dev/null
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corp
+ *
+ * Author:
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/byteorder/generic.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_dsc.h>
+
+/**
+ * DOC: dsc helpers
+ *
+ * These functions contain some common logic and helpers to deal with VESA
+ * Display Stream Compression standard required for DSC on Display Port/eDP or
+ * MIPI display interfaces.
+ */
+
+/**
+ * drm_dsc_dp_pps_header_init() - Initializes the PPS Header
+ * for DisplayPort as per the DP 1.4 spec.
+ * @pps_sdp: Secondary data packet for DSC Picture Parameter Set
+ */
+void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp)
+{
+ memset(&pps_sdp->pps_header, 0, sizeof(pps_sdp->pps_header));
+
+ pps_sdp->pps_header.HB1 = DP_SDP_PPS;
+ pps_sdp->pps_header.HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1;
+}
+EXPORT_SYMBOL(drm_dsc_dp_pps_header_init);
+
+/**
+ * drm_dsc_pps_infoframe_pack() - Populates the DSC PPS infoframe
+ * using the DSC configuration parameters in the order expected
+ * by the DSC Display Sink device. For the DSC, the sink device
+ * expects the PPS payload in the big endian format for the fields
+ * that span more than 1 byte.
+ *
+ * @pps_sdp:
+ * Secondary data packet for DSC Picture Parameter Set
+ * @dsc_cfg:
+ * DSC Configuration data filled by driver
+ */
+void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
+ const struct drm_dsc_config *dsc_cfg)
+{
+ int i;
+
+ /* Protect against someone accidently changing struct size */
+ BUILD_BUG_ON(sizeof(pps_sdp->pps_payload) !=
+ DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 + 1);
+
+ memset(&pps_sdp->pps_payload, 0, sizeof(pps_sdp->pps_payload));
+
+ /* PPS 0 */
+ pps_sdp->pps_payload.dsc_version =
+ dsc_cfg->dsc_version_minor |
+ dsc_cfg->dsc_version_major << DSC_PPS_VERSION_MAJOR_SHIFT;
+
+ /* PPS 1, 2 is 0 */
+
+ /* PPS 3 */
+ pps_sdp->pps_payload.pps_3 =
+ dsc_cfg->line_buf_depth |
+ dsc_cfg->bits_per_component << DSC_PPS_BPC_SHIFT;
+
+ /* PPS 4 */
+ pps_sdp->pps_payload.pps_4 =
+ ((dsc_cfg->bits_per_pixel & DSC_PPS_BPP_HIGH_MASK) >>
+ DSC_PPS_MSB_SHIFT) |
+ dsc_cfg->vbr_enable << DSC_PPS_VBR_EN_SHIFT |
+ dsc_cfg->enable422 << DSC_PPS_SIMPLE422_SHIFT |
+ dsc_cfg->convert_rgb << DSC_PPS_CONVERT_RGB_SHIFT |
+ dsc_cfg->block_pred_enable << DSC_PPS_BLOCK_PRED_EN_SHIFT;
+
+ /* PPS 5 */
+ pps_sdp->pps_payload.bits_per_pixel_low =
+ (dsc_cfg->bits_per_pixel & DSC_PPS_LSB_MASK);
+
+ /*
+ * The DSC panel expects the PPS packet to have big endian format
+ * for data spanning 2 bytes. Use a macro cpu_to_be16() to convert
+ * to big endian format. If format is little endian, it will swap
+ * bytes to convert to Big endian else keep it unchanged.
+ */
+
+ /* PPS 6, 7 */
+ pps_sdp->pps_payload.pic_height = cpu_to_be16(dsc_cfg->pic_height);
+
+ /* PPS 8, 9 */
+ pps_sdp->pps_payload.pic_width = cpu_to_be16(dsc_cfg->pic_width);
+
+ /* PPS 10, 11 */
+ pps_sdp->pps_payload.slice_height = cpu_to_be16(dsc_cfg->slice_height);
+
+ /* PPS 12, 13 */
+ pps_sdp->pps_payload.slice_width = cpu_to_be16(dsc_cfg->slice_width);
+
+ /* PPS 14, 15 */
+ pps_sdp->pps_payload.chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size);
+
+ /* PPS 16 */
+ pps_sdp->pps_payload.initial_xmit_delay_high =
+ ((dsc_cfg->initial_xmit_delay &
+ DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK) >>
+ DSC_PPS_MSB_SHIFT);
+
+ /* PPS 17 */
+ pps_sdp->pps_payload.initial_xmit_delay_low =
+ (dsc_cfg->initial_xmit_delay & DSC_PPS_LSB_MASK);
+
+ /* PPS 18, 19 */
+ pps_sdp->pps_payload.initial_dec_delay =
+ cpu_to_be16(dsc_cfg->initial_dec_delay);
+
+ /* PPS 20 is 0 */
+
+ /* PPS 21 */
+ pps_sdp->pps_payload.initial_scale_value =
+ dsc_cfg->initial_scale_value;
+
+ /* PPS 22, 23 */
+ pps_sdp->pps_payload.scale_increment_interval =
+ cpu_to_be16(dsc_cfg->scale_increment_interval);
+
+ /* PPS 24 */
+ pps_sdp->pps_payload.scale_decrement_interval_high =
+ ((dsc_cfg->scale_decrement_interval &
+ DSC_PPS_SCALE_DEC_INT_HIGH_MASK) >>
+ DSC_PPS_MSB_SHIFT);
+
+ /* PPS 25 */
+ pps_sdp->pps_payload.scale_decrement_interval_low =
+ (dsc_cfg->scale_decrement_interval & DSC_PPS_LSB_MASK);
+
+ /* PPS 26[7:0], PPS 27[7:5] RESERVED */
+
+ /* PPS 27 */
+ pps_sdp->pps_payload.first_line_bpg_offset =
+ dsc_cfg->first_line_bpg_offset;
+
+ /* PPS 28, 29 */
+ pps_sdp->pps_payload.nfl_bpg_offset =
+ cpu_to_be16(dsc_cfg->nfl_bpg_offset);
+
+ /* PPS 30, 31 */
+ pps_sdp->pps_payload.slice_bpg_offset =
+ cpu_to_be16(dsc_cfg->slice_bpg_offset);
+
+ /* PPS 32, 33 */
+ pps_sdp->pps_payload.initial_offset =
+ cpu_to_be16(dsc_cfg->initial_offset);
+
+ /* PPS 34, 35 */
+ pps_sdp->pps_payload.final_offset = cpu_to_be16(dsc_cfg->final_offset);
+
+ /* PPS 36 */
+ pps_sdp->pps_payload.flatness_min_qp = dsc_cfg->flatness_min_qp;
+
+ /* PPS 37 */
+ pps_sdp->pps_payload.flatness_max_qp = dsc_cfg->flatness_max_qp;
+
+ /* PPS 38, 39 */
+ pps_sdp->pps_payload.rc_model_size =
+ cpu_to_be16(DSC_RC_MODEL_SIZE_CONST);
+
+ /* PPS 40 */
+ pps_sdp->pps_payload.rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
+
+ /* PPS 41 */
+ pps_sdp->pps_payload.rc_quant_incr_limit0 =
+ dsc_cfg->rc_quant_incr_limit0;
+
+ /* PPS 42 */
+ pps_sdp->pps_payload.rc_quant_incr_limit1 =
+ dsc_cfg->rc_quant_incr_limit1;
+
+ /* PPS 43 */
+ pps_sdp->pps_payload.rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST |
+ DSC_RC_TGT_OFFSET_HI_CONST << DSC_PPS_RC_TGT_OFFSET_HI_SHIFT;
+
+ /* PPS 44 - 57 */
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++)
+ pps_sdp->pps_payload.rc_buf_thresh[i] =
+ dsc_cfg->rc_buf_thresh[i];
+
+ /* PPS 58 - 87 */
+ /*
+ * For DSC sink programming the RC Range parameter fields
+ * are as follows: Min_qp[15:11], max_qp[10:6], offset[5:0]
+ */
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ pps_sdp->pps_payload.rc_range_parameters[i] =
+ ((dsc_cfg->rc_range_params[i].range_min_qp <<
+ DSC_PPS_RC_RANGE_MINQP_SHIFT) |
+ (dsc_cfg->rc_range_params[i].range_max_qp <<
+ DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
+ (dsc_cfg->rc_range_params[i].range_bpg_offset));
+ pps_sdp->pps_payload.rc_range_parameters[i] =
+ cpu_to_be16(pps_sdp->pps_payload.rc_range_parameters[i]);
+ }
+
+ /* PPS 88 */
+ pps_sdp->pps_payload.native_422_420 = dsc_cfg->native_422 |
+ dsc_cfg->native_420 << DSC_PPS_NATIVE_420_SHIFT;
+
+ /* PPS 89 */
+ pps_sdp->pps_payload.second_line_bpg_offset =
+ dsc_cfg->second_line_bpg_offset;
+
+ /* PPS 90, 91 */
+ pps_sdp->pps_payload.nsl_bpg_offset =
+ cpu_to_be16(dsc_cfg->nsl_bpg_offset);
+
+ /* PPS 92, 93 */
+ pps_sdp->pps_payload.second_line_offset_adj =
+ cpu_to_be16(dsc_cfg->second_line_offset_adj);
+
+ /* PPS 94 - 127 are O */
+}
+EXPORT_SYMBOL(drm_dsc_pps_infoframe_pack);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 883a0c44714c..ca706fb1d975 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -71,7 +71,7 @@ MODULE_PARM_DESC(drm_fbdev_overalloc,
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
static bool drm_leak_fbdev_smem = false;
module_param_unsafe(drm_leak_fbdev_smem, bool, 0600);
-MODULE_PARM_DESC(fbdev_emulation,
+MODULE_PARM_DESC(drm_leak_fbdev_smem,
"Allow unsafe leaking fbdev physical smem address [default=false]");
#endif
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index ffa8dc35515f..46f48f245eb5 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -525,7 +525,7 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
struct drm_device *dev = file_priv->minor->dev;
ssize_t ret;
- if (!access_ok(VERIFY_WRITE, buffer, count))
+ if (!access_ok(buffer, count))
return -EFAULT;
ret = mutex_lock_interruptible(&file_priv->event_read_lock);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 331112b2ae88..251d67e04c2d 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -101,6 +101,8 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
int drm_sysfs_connector_add(struct drm_connector *connector);
void drm_sysfs_connector_remove(struct drm_connector *connector);
+void drm_sysfs_lease_event(struct drm_device *dev);
+
/* drm_gem.c */
int drm_gem_init(struct drm_device *dev);
void drm_gem_destroy(struct drm_device *dev);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 94bd872d56c4..7e6746b2d704 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -37,6 +37,7 @@
#include <linux/pci.h>
#include <linux/export.h>
+#include <linux/nospec.h>
/**
* DOC: getunique and setversion story
@@ -800,13 +801,17 @@ long drm_ioctl(struct file *filp,
if (is_driver_ioctl) {
/* driver ioctl */
- if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls)
+ unsigned int index = nr - DRM_COMMAND_BASE;
+
+ if (index >= dev->driver->num_ioctls)
goto err_i1;
- ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+ index = array_index_nospec(index, dev->driver->num_ioctls);
+ ioctl = &dev->driver->ioctls[index];
} else {
/* core ioctl */
if (nr >= DRM_CORE_IOCTL_COUNT)
goto err_i1;
+ nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
ioctl = &drm_ioctls[nr];
}
@@ -888,6 +893,7 @@ bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
if (nr >= DRM_CORE_IOCTL_COUNT)
return false;
+ nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
*flags = drm_ioctls[nr].flags;
return true;
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index d9b4d3ff06f6..b735704653cb 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -292,7 +292,7 @@ void drm_lease_destroy(struct drm_master *master)
if (master->lessor) {
/* Tell the master to check the lessee list */
- drm_sysfs_hotplug_event(dev);
+ drm_sysfs_lease_event(dev);
drm_master_put(&master->lessor);
}
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 037e243ec863..4a1c2023ccf0 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -297,6 +297,12 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.prop_crtc_id = prop;
+ prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "FB_DAMAGE_CLIPS",
+ 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_fb_damage_clips = prop;
+
prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
"ACTIVE");
if (!prop)
@@ -310,6 +316,12 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.prop_mode_id = prop;
+ prop = drm_property_create_bool(dev, 0,
+ "VRR_ENABLED");
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_vrr_enabled = prop;
+
prop = drm_property_create(dev,
DRM_MODE_PROP_BLOB,
"DEGAMMA_LUT", 0);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index b3c1daad1169..ecb7b33002bb 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -301,6 +301,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
connector->kdev = NULL;
}
+void drm_sysfs_lease_event(struct drm_device *dev)
+{
+ char *event_string = "LEASE=1";
+ char *envp[] = { event_string, NULL };
+
+ DRM_DEBUG("generating lease event\n");
+
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
+}
+
/**
* drm_sysfs_hotplug_event - generate a DRM uevent
* @dev: DRM device
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 7fea74861a87..160ce3c060a5 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -439,6 +439,4 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
if (drm_debug & DRM_UT_DRIVER)
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
-
- gpu->lastctx = cmdbuf->ctx;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 52802e6049e0..18c27f795cf6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -72,14 +72,8 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
for (i = 0; i < ETNA_MAX_PIPES; i++) {
struct etnaviv_gpu *gpu = priv->gpu[i];
- if (gpu) {
- mutex_lock(&gpu->lock);
- if (gpu->lastctx == ctx)
- gpu->lastctx = NULL;
- mutex_unlock(&gpu->lock);
-
+ if (gpu)
drm_sched_entity_destroy(&ctx->sched_entity[i]);
- }
}
kfree(ctx);
@@ -345,7 +339,6 @@ static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_etnaviv_gem_userptr *args = data;
- int access;
if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
args->flags == 0)
@@ -357,12 +350,7 @@ static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
args->user_ptr & ~PAGE_MASK)
return -EINVAL;
- if (args->flags & ETNA_USERPTR_WRITE)
- access = VERIFY_WRITE;
- else
- access = VERIFY_READ;
-
- if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr,
+ if (!access_ok((void __user *)(unsigned long)args->user_ptr,
args->user_size))
return -EFAULT;
@@ -523,7 +511,7 @@ static int etnaviv_bind(struct device *dev)
if (!priv) {
dev_err(dev, "failed to allocate private data\n");
ret = -ENOMEM;
- goto out_unref;
+ goto out_put;
}
drm->dev_private = priv;
@@ -549,7 +537,7 @@ out_register:
component_unbind_all(dev, drm);
out_bind:
kfree(priv);
-out_unref:
+out_put:
drm_dev_put(drm);
return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 8d02d1b7dcf5..4bf698de5996 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -107,17 +107,6 @@ static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base)
return base + nelem * elem_size;
}
-/* returns true if fence a comes after fence b */
-static inline bool fence_after(u32 a, u32 b)
-{
- return (s32)(a - b) > 0;
-}
-
-static inline bool fence_after_eq(u32 a, u32 b)
-{
- return (s32)(a - b) >= 0;
-}
-
/*
* Etnaviv timeouts are specified wrt CLOCK_MONOTONIC, not jiffies.
* We need to calculate the timeout in terms of number of jiffies
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 9146e30e24a6..3fbb4855396c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -118,6 +118,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
unsigned int n_obj, n_bomap_pages;
size_t file_size, mmu_size;
__le64 *bomap, *bomap_start;
+ unsigned long flags;
/* Only catch the first event, or when manually re-armed */
if (!etnaviv_dump_core)
@@ -134,13 +135,13 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
mmu_size + gpu->buffer.size;
/* Add in the active command buffers */
- spin_lock(&gpu->sched.job_list_lock);
+ spin_lock_irqsave(&gpu->sched.job_list_lock, flags);
list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
submit = to_etnaviv_submit(s_job);
file_size += submit->cmdbuf.size;
n_obj++;
}
- spin_unlock(&gpu->sched.job_list_lock);
+ spin_unlock_irqrestore(&gpu->sched.job_list_lock, flags);
/* Add in the active buffer objects */
list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
@@ -182,14 +183,14 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
gpu->buffer.size,
etnaviv_cmdbuf_get_va(&gpu->buffer));
- spin_lock(&gpu->sched.job_list_lock);
+ spin_lock_irqsave(&gpu->sched.job_list_lock, flags);
list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
submit = to_etnaviv_submit(s_job);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
submit->cmdbuf.vaddr, submit->cmdbuf.size,
etnaviv_cmdbuf_get_va(&submit->cmdbuf));
}
- spin_unlock(&gpu->sched.job_list_lock);
+ spin_unlock_irqrestore(&gpu->sched.job_list_lock, flags);
/* Reserve space for the bomap */
if (n_bomap_pages) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index f225fbc6edd2..6904535475de 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -3,10 +3,12 @@
* Copyright (C) 2015-2018 Etnaviv Project
*/
+#include <linux/clk.h>
#include <linux/component.h>
#include <linux/dma-fence.h>
#include <linux/moduleparam.h>
#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/thermal.h>
#include "etnaviv_cmdbuf.h"
@@ -976,7 +978,6 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
{
- unsigned long flags;
unsigned int i = 0;
dev_err(gpu->dev, "recover hung GPU!\n");
@@ -989,15 +990,13 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
etnaviv_hw_reset(gpu);
/* complete all events, the GPU won't do it after the reset */
- spin_lock_irqsave(&gpu->event_spinlock, flags);
+ spin_lock(&gpu->event_spinlock);
for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS)
complete(&gpu->event_free);
bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
- spin_unlock_irqrestore(&gpu->event_spinlock, flags);
- gpu->completed_fence = gpu->active_fence;
+ spin_unlock(&gpu->event_spinlock);
etnaviv_gpu_hw_init(gpu);
- gpu->lastctx = NULL;
gpu->exec_state = -1;
mutex_unlock(&gpu->lock);
@@ -1032,7 +1031,7 @@ static bool etnaviv_fence_signaled(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
- return fence_completed(f->gpu, f->base.seqno);
+ return (s32)(f->gpu->completed_fence - f->base.seqno) >= 0;
}
static void etnaviv_fence_release(struct dma_fence *fence)
@@ -1071,6 +1070,12 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
return &f->base;
}
+/* returns true if fence a comes after fence b */
+static inline bool fence_after(u32 a, u32 b)
+{
+ return (s32)(a - b) > 0;
+}
+
/*
* event management:
*/
@@ -1078,7 +1083,7 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
unsigned int *events)
{
- unsigned long flags, timeout = msecs_to_jiffies(10 * 10000);
+ unsigned long timeout = msecs_to_jiffies(10 * 10000);
unsigned i, acquired = 0;
for (i = 0; i < nr_events; i++) {
@@ -1095,7 +1100,7 @@ static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
timeout = ret;
}
- spin_lock_irqsave(&gpu->event_spinlock, flags);
+ spin_lock(&gpu->event_spinlock);
for (i = 0; i < nr_events; i++) {
int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
@@ -1105,7 +1110,7 @@ static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
set_bit(event, gpu->event_bitmap);
}
- spin_unlock_irqrestore(&gpu->event_spinlock, flags);
+ spin_unlock(&gpu->event_spinlock);
return 0;
@@ -1118,18 +1123,11 @@ out:
static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
{
- unsigned long flags;
-
- spin_lock_irqsave(&gpu->event_spinlock, flags);
-
if (!test_bit(event, gpu->event_bitmap)) {
dev_warn(gpu->dev, "event %u is already marked as free",
event);
- spin_unlock_irqrestore(&gpu->event_spinlock, flags);
} else {
clear_bit(event, gpu->event_bitmap);
- spin_unlock_irqrestore(&gpu->event_spinlock, flags);
-
complete(&gpu->event_free);
}
}
@@ -1306,8 +1304,6 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
goto out_unlock;
}
- gpu->active_fence = gpu_fence->seqno;
-
if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
kref_get(&submit->refcount);
@@ -1549,7 +1545,6 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
etnaviv_gpu_update_clock(gpu);
etnaviv_gpu_hw_init(gpu);
- gpu->lastctx = NULL;
gpu->exec_state = -1;
mutex_unlock(&gpu->lock);
@@ -1806,8 +1801,8 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev)
struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
u32 idle, mask;
- /* If we have outstanding fences, we're not idle */
- if (gpu->completed_fence != gpu->active_fence)
+ /* If there are any jobs in the HW queue, we're not idle */
+ if (atomic_read(&gpu->sched.hw_rq_count))
return -EBUSY;
/* Check whether the hardware (except FE) is idle */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 9a75a6937268..9bcf151f706b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -6,9 +6,6 @@
#ifndef __ETNAVIV_GPU_H__
#define __ETNAVIV_GPU_H__
-#include <linux/clk.h>
-#include <linux/regulator/consumer.h>
-
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
@@ -88,6 +85,8 @@ struct etnaviv_event {
struct etnaviv_cmdbuf_suballoc;
struct etnaviv_cmdbuf;
+struct regulator;
+struct clk;
#define ETNA_NR_EVENTS 30
@@ -98,7 +97,6 @@ struct etnaviv_gpu {
struct mutex lock;
struct etnaviv_chip_identity identity;
enum etnaviv_sec_mode sec_mode;
- struct etnaviv_file_private *lastctx;
struct workqueue_struct *wq;
struct drm_gpu_scheduler sched;
@@ -121,7 +119,6 @@ struct etnaviv_gpu {
struct mutex fence_lock;
struct idr fence_idr;
u32 next_fence;
- u32 active_fence;
u32 completed_fence;
wait_queue_head_t fence_event;
u64 fence_context;
@@ -161,11 +158,6 @@ static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
return readl(gpu->mmio + reg);
}
-static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence)
-{
- return fence_after_eq(gpu->completed_fence, fence);
-}
-
int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 208bc27be3cc..3691a140c950 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,11 +10,6 @@ config DRM_EXYNOS
if DRM_EXYNOS
-config DRM_EXYNOS_IOMMU
- bool
- depends on EXYNOS_IOMMU
- default y
-
comment "CRTCs"
config DRM_EXYNOS_FIMD
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 2ad146bbf4f5..2fd2f3ee4fcf 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -4,10 +4,9 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
- exynos_drm_gem.o exynos_drm_plane.o
+ exynos_drm_gem.o exynos_drm_plane.o exynos_drm_dma.o
exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
-exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON) += exynos5433_drm_decon.o
exynosdrm-$(CONFIG_DRM_EXYNOS7_DECON) += exynos7_drm_decon.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index aef487dd8731..5b4e0e8b23bc 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -25,7 +25,6 @@
#include "exynos_drm_crtc.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_plane.h"
-#include "exynos_drm_iommu.h"
#include "regs-decon5433.h"
#define DSD_CFG_MUX 0x1004
@@ -84,6 +83,14 @@ static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
[CURSON_WIN] = DRM_PLANE_TYPE_CURSOR,
};
+static const unsigned int capabilities[WINDOWS_NR] = {
+ 0,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+};
+
static inline void decon_set_bits(struct decon_context *ctx, u32 reg, u32 mask,
u32 val)
{
@@ -252,11 +259,76 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
}
+static void decon_win_set_bldeq(struct decon_context *ctx, unsigned int win,
+ unsigned int alpha, unsigned int pixel_alpha)
+{
+ u32 mask = BLENDERQ_A_FUNC_F(0xf) | BLENDERQ_B_FUNC_F(0xf);
+ u32 val = 0;
+
+ switch (pixel_alpha) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ case DRM_MODE_BLEND_COVERAGE:
+ val |= BLENDERQ_A_FUNC_F(BLENDERQ_ALPHA_A);
+ val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
+ break;
+ case DRM_MODE_BLEND_PREMULTI:
+ default:
+ if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
+ val |= BLENDERQ_A_FUNC_F(BLENDERQ_ALPHA0);
+ val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
+ } else {
+ val |= BLENDERQ_A_FUNC_F(BLENDERQ_ONE);
+ val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
+ }
+ break;
+ }
+ decon_set_bits(ctx, DECON_BLENDERQx(win), mask, val);
+}
+
+static void decon_win_set_bldmod(struct decon_context *ctx, unsigned int win,
+ unsigned int alpha, unsigned int pixel_alpha)
+{
+ u32 win_alpha = alpha >> 8;
+ u32 val = 0;
+
+ switch (pixel_alpha) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ break;
+ case DRM_MODE_BLEND_COVERAGE:
+ case DRM_MODE_BLEND_PREMULTI:
+ default:
+ val |= WINCONx_ALPHA_SEL_F;
+ val |= WINCONx_BLD_PIX_F;
+ val |= WINCONx_ALPHA_MUL_F;
+ break;
+ }
+ decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_BLEND_MODE_MASK, val);
+
+ if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
+ val = VIDOSD_Wx_ALPHA_R_F(win_alpha) |
+ VIDOSD_Wx_ALPHA_G_F(win_alpha) |
+ VIDOSD_Wx_ALPHA_B_F(win_alpha);
+ decon_set_bits(ctx, DECON_VIDOSDxC(win),
+ VIDOSDxC_ALPHA0_RGB_MASK, val);
+ decon_set_bits(ctx, DECON_BLENDCON, BLEND_NEW, BLEND_NEW);
+ }
+}
+
static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
struct drm_framebuffer *fb)
{
+ struct exynos_drm_plane plane = ctx->planes[win];
+ struct exynos_drm_plane_state *state =
+ to_exynos_plane_state(plane.base.state);
+ unsigned int alpha = state->base.alpha;
+ unsigned int pixel_alpha;
unsigned long val;
+ if (fb->format->has_alpha)
+ pixel_alpha = state->base.pixel_blend_mode;
+ else
+ pixel_alpha = DRM_MODE_BLEND_PIXEL_NONE;
+
val = readl(ctx->addr + DECON_WINCONx(win));
val &= WINCONx_ENWIN_F;
@@ -279,7 +351,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
case DRM_FORMAT_ARGB8888:
default:
val |= WINCONx_BPPMODE_32BPP_A8888;
- val |= WINCONx_WSWP_F | WINCONx_BLD_PIX_F | WINCONx_ALPHA_SEL_F;
+ val |= WINCONx_WSWP_F;
val |= WINCONx_BURSTLEN_16WORD;
break;
}
@@ -298,8 +370,12 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_8WORD;
}
+ decon_set_bits(ctx, DECON_WINCONx(win), ~WINCONx_BLEND_MODE_MASK, val);
- writel(val, ctx->addr + DECON_WINCONx(win));
+ if (win > 0) {
+ decon_win_set_bldmod(ctx, win, alpha, pixel_alpha);
+ decon_win_set_bldeq(ctx, win, alpha, pixel_alpha);
+ }
}
static void decon_shadow_protect(struct decon_context *ctx, bool protect)
@@ -552,6 +628,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
ctx->configs[win].num_pixel_formats = ARRAY_SIZE(decon_formats);
ctx->configs[win].zpos = win - ctx->first_win;
ctx->configs[win].type = decon_win_types[win];
+ ctx->configs[win].capabilities = capabilities[win];
ret = exynos_plane_init(drm_dev, &ctx->planes[win], win,
&ctx->configs[win]);
@@ -569,7 +646,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
decon_clear_channels(ctx->crtc);
- return drm_iommu_attach_device(drm_dev, dev);
+ return exynos_drm_register_dma(drm_dev, dev);
}
static void decon_unbind(struct device *dev, struct device *master, void *data)
@@ -579,7 +656,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
decon_disable(ctx->crtc);
/* detach this sub driver from iommu mapping if supported. */
- drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
}
static const struct component_ops decon_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 88cbd000eb09..381aa3d60e37 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -30,7 +30,6 @@
#include "exynos_drm_plane.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
-#include "exynos_drm_iommu.h"
#include "regs-decon7.h"
/*
@@ -133,13 +132,13 @@ static int decon_ctx_initialize(struct decon_context *ctx,
decon_clear_channels(ctx->crtc);
- return drm_iommu_attach_device(drm_dev, ctx->dev);
+ return exynos_drm_register_dma(drm_dev, ctx->dev);
}
static void decon_ctx_remove(struct decon_context *ctx)
{
/* detach this sub driver from iommu mapping if supported. */
- drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
}
static u32 decon_calc_clkdiv(struct decon_context *ctx,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
new file mode 100644
index 000000000000..3432c5ee9f0c
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2012 Samsung Electronics Co., Ltd.
+// Author: Inki Dae <inki.dae@samsung.com>
+// Author: Andrzej Hajda <a.hajda@samsung.com>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include <linux/dma-iommu.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+
+#include "exynos_drm_drv.h"
+
+#if defined(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#else
+#define arm_iommu_create_mapping(...) ({ NULL; })
+#define arm_iommu_attach_device(...) ({ -ENODEV; })
+#define arm_iommu_release_mapping(...) ({ })
+#define arm_iommu_detach_device(...) ({ })
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
+#if !defined(CONFIG_IOMMU_DMA)
+#define iommu_dma_init_domain(...) ({ -EINVAL; })
+#endif
+
+#define EXYNOS_DEV_ADDR_START 0x20000000
+#define EXYNOS_DEV_ADDR_SIZE 0x40000000
+
+static inline int configure_dma_max_seg_size(struct device *dev)
+{
+ if (!dev->dma_parms)
+ dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
+ if (!dev->dma_parms)
+ return -ENOMEM;
+
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ return 0;
+}
+
+static inline void clear_dma_max_seg_size(struct device *dev)
+{
+ kfree(dev->dma_parms);
+ dev->dma_parms = NULL;
+}
+
+/*
+ * drm_iommu_attach_device- attach device to iommu mapping
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be attach
+ *
+ * This function should be called by sub drivers to attach it to iommu
+ * mapping.
+ */
+static int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ int ret;
+
+ if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
+ DRM_ERROR("Device %s lacks support for IOMMU\n",
+ dev_name(subdrv_dev));
+ return -EINVAL;
+ }
+
+ ret = configure_dma_max_seg_size(subdrv_dev);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
+ if (to_dma_iommu_mapping(subdrv_dev))
+ arm_iommu_detach_device(subdrv_dev);
+
+ ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
+ } else if (IS_ENABLED(CONFIG_IOMMU_DMA)) {
+ ret = iommu_attach_device(priv->mapping, subdrv_dev);
+ }
+
+ if (ret)
+ clear_dma_max_seg_size(subdrv_dev);
+
+ return 0;
+}
+
+/*
+ * drm_iommu_detach_device -detach device address space mapping from device
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be detached
+ *
+ * This function should be called by sub drivers to detach it from iommu
+ * mapping
+ */
+static void drm_iommu_detach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+ arm_iommu_detach_device(subdrv_dev);
+ else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+ iommu_detach_device(priv->mapping, subdrv_dev);
+
+ clear_dma_max_seg_size(subdrv_dev);
+}
+
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
+{
+ struct exynos_drm_private *priv = drm->dev_private;
+
+ if (!priv->dma_dev) {
+ priv->dma_dev = dev;
+ DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
+ dev_name(dev));
+ }
+
+ if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
+ return 0;
+
+ if (!priv->mapping) {
+ void *mapping;
+
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+ mapping = arm_iommu_create_mapping(&platform_bus_type,
+ EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
+ else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+ mapping = iommu_get_domain_for_dev(priv->dma_dev);
+
+ if (IS_ERR(mapping))
+ return PTR_ERR(mapping);
+ priv->mapping = mapping;
+ }
+
+ return drm_iommu_attach_device(drm, dev);
+}
+
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev)
+{
+ if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
+ drm_iommu_detach_device(drm, dev);
+}
+
+void exynos_drm_cleanup_dma(struct drm_device *drm)
+{
+ struct exynos_drm_private *priv = drm->dev_private;
+
+ if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
+ return;
+
+ arm_iommu_release_mapping(priv->mapping);
+ priv->mapping = NULL;
+ priv->dma_dev = NULL;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 6f76baf4550a..2c75e789b2a7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -30,7 +30,6 @@
#include "exynos_drm_ipp.h"
#include "exynos_drm_vidi.h"
#include "exynos_drm_g2d.h"
-#include "exynos_drm_iommu.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
@@ -175,8 +174,7 @@ struct exynos_drm_driver_info {
#define DRM_COMPONENT_DRIVER BIT(0) /* supports component framework */
#define DRM_VIRTUAL_DEVICE BIT(1) /* create virtual platform device */
-#define DRM_DMA_DEVICE BIT(2) /* can be used for dma allocations */
-#define DRM_FIMC_DEVICE BIT(3) /* devices shared with V4L2 subsystem */
+#define DRM_FIMC_DEVICE BIT(2) /* devices shared with V4L2 subsystem */
#define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL)
@@ -187,16 +185,16 @@ struct exynos_drm_driver_info {
static struct exynos_drm_driver_info exynos_drm_drivers[] = {
{
DRV_PTR(fimd_driver, CONFIG_DRM_EXYNOS_FIMD),
- DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(exynos5433_decon_driver, CONFIG_DRM_EXYNOS5433_DECON),
- DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(decon_driver, CONFIG_DRM_EXYNOS7_DECON),
- DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER),
- DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
DRM_COMPONENT_DRIVER
@@ -267,27 +265,6 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
return match ?: ERR_PTR(-ENODEV);
}
-static struct device *exynos_drm_get_dma_device(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
- struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
- struct device *dev;
-
- if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
- continue;
-
- while ((dev = bus_find_device(&platform_bus_type, NULL,
- &info->driver->driver,
- (void *)platform_bus_type.match))) {
- put_device(dev);
- return dev;
- }
- }
- return NULL;
-}
-
static int exynos_drm_bind(struct device *dev)
{
struct exynos_drm_private *private;
@@ -312,23 +289,6 @@ static int exynos_drm_bind(struct device *dev)
dev_set_drvdata(dev, drm);
drm->dev_private = (void *)private;
- /* the first real CRTC device is used for all dma mapping operations */
- private->dma_dev = exynos_drm_get_dma_device();
- if (!private->dma_dev) {
- DRM_ERROR("no device found for DMA mapping operations.\n");
- ret = -ENODEV;
- goto err_free_private;
- }
- DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
- dev_name(private->dma_dev));
-
- /* create common IOMMU mapping for all devices attached to Exynos DRM */
- ret = drm_create_iommu_mapping(drm);
- if (ret < 0) {
- DRM_ERROR("failed to create iommu mapping.\n");
- goto err_free_private;
- }
-
drm_mode_config_init(drm);
exynos_drm_mode_config_init(drm);
@@ -385,8 +345,7 @@ err_unbind_all:
component_unbind_all(drm->dev, drm);
err_mode_config_cleanup:
drm_mode_config_cleanup(drm);
- drm_release_iommu_mapping(drm);
-err_free_private:
+ exynos_drm_cleanup_dma(drm);
kfree(private);
err_free_drm:
drm_dev_put(drm);
@@ -405,7 +364,7 @@ static void exynos_drm_unbind(struct device *dev)
component_unbind_all(drm->dev, drm);
drm_mode_config_cleanup(drm);
- drm_release_iommu_mapping(drm);
+ exynos_drm_cleanup_dma(drm);
kfree(drm->dev_private);
drm->dev_private = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 5e61e707f955..71eb240bc1f4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -214,6 +214,17 @@ static inline struct device *to_dma_dev(struct drm_device *dev)
return priv->dma_dev;
}
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+
+ return priv->mapping ? true : false;
+}
+
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev);
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev);
+void exynos_drm_cleanup_dma(struct drm_device *drm);
+
#ifdef CONFIG_DRM_EXYNOS_DPI
struct drm_encoder *exynos_dpi_probe(struct device *dev);
int exynos_dpi_remove(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 9f52382e19ee..31eb538a44ae 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -24,7 +24,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_fbdev.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_crtc.h"
static int check_fb_gem_memory_type(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 01d182289efa..ce9604ca8041 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -23,7 +23,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_fbdev.h"
-#include "exynos_drm_iommu.h"
#define MAX_CONNECTOR 4
#define PREFERRED_BPP 32
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index e8d0670bb5f8..90dfea0aec4d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -25,7 +25,6 @@
#include <drm/exynos_drm.h>
#include "regs-fimc.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_ipp.h"
/*
@@ -1129,7 +1128,7 @@ static int fimc_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
- drm_iommu_attach_device(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev);
exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1149,7 +1148,7 @@ static void fimc_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(drm_dev, ipp);
- drm_iommu_detach_device(drm_dev, dev);
+ exynos_drm_unregister_dma(drm_dev, dev);
}
static const struct component_ops fimc_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index b7f56935a46b..786a8ee6f10f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -32,7 +32,6 @@
#include "exynos_drm_fb.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_plane.h"
-#include "exynos_drm_iommu.h"
/*
* FIMD stands for Fully Interactive Mobile Display and
@@ -229,6 +228,21 @@ static const uint32_t fimd_formats[] = {
DRM_FORMAT_ARGB8888,
};
+static const unsigned int capabilities[WINDOWS_NR] = {
+ 0,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+};
+
+static inline void fimd_set_bits(struct fimd_context *ctx, u32 reg, u32 mask,
+ u32 val)
+{
+ val = (val & mask) | (readl(ctx->regs + reg) & ~mask);
+ writel(val, ctx->regs + reg);
+}
+
static int fimd_enable_vblank(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
@@ -552,13 +566,88 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
writel(val, ctx->regs + VIDCON0);
}
+static void fimd_win_set_bldeq(struct fimd_context *ctx, unsigned int win,
+ unsigned int alpha, unsigned int pixel_alpha)
+{
+ u32 mask = BLENDEQ_A_FUNC_F(0xf) | BLENDEQ_B_FUNC_F(0xf);
+ u32 val = 0;
+
+ switch (pixel_alpha) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ case DRM_MODE_BLEND_COVERAGE:
+ val |= BLENDEQ_A_FUNC_F(BLENDEQ_ALPHA_A);
+ val |= BLENDEQ_B_FUNC_F(BLENDEQ_ONE_MINUS_ALPHA_A);
+ break;
+ case DRM_MODE_BLEND_PREMULTI:
+ default:
+ if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
+ val |= BLENDEQ_A_FUNC_F(BLENDEQ_ALPHA0);
+ val |= BLENDEQ_B_FUNC_F(BLENDEQ_ONE_MINUS_ALPHA_A);
+ } else {
+ val |= BLENDEQ_A_FUNC_F(BLENDEQ_ONE);
+ val |= BLENDEQ_B_FUNC_F(BLENDEQ_ONE_MINUS_ALPHA_A);
+ }
+ break;
+ }
+ fimd_set_bits(ctx, BLENDEQx(win), mask, val);
+}
-static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
- uint32_t pixel_format, int width)
+static void fimd_win_set_bldmod(struct fimd_context *ctx, unsigned int win,
+ unsigned int alpha, unsigned int pixel_alpha)
{
- unsigned long val;
+ u32 win_alpha_l = (alpha >> 8) & 0xf;
+ u32 win_alpha_h = alpha >> 12;
+ u32 val = 0;
- val = WINCONx_ENWIN;
+ switch (pixel_alpha) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ break;
+ case DRM_MODE_BLEND_COVERAGE:
+ case DRM_MODE_BLEND_PREMULTI:
+ default:
+ val |= WINCON1_ALPHA_SEL;
+ val |= WINCON1_BLD_PIX;
+ val |= WINCON1_ALPHA_MUL;
+ break;
+ }
+ fimd_set_bits(ctx, WINCON(win), WINCONx_BLEND_MODE_MASK, val);
+
+ /* OSD alpha */
+ val = VIDISD14C_ALPHA0_R(win_alpha_h) |
+ VIDISD14C_ALPHA0_G(win_alpha_h) |
+ VIDISD14C_ALPHA0_B(win_alpha_h) |
+ VIDISD14C_ALPHA1_R(0x0) |
+ VIDISD14C_ALPHA1_G(0x0) |
+ VIDISD14C_ALPHA1_B(0x0);
+ writel(val, ctx->regs + VIDOSD_C(win));
+
+ val = VIDW_ALPHA_R(win_alpha_l) | VIDW_ALPHA_G(win_alpha_l) |
+ VIDW_ALPHA_B(win_alpha_l);
+ writel(val, ctx->regs + VIDWnALPHA0(win));
+
+ val = VIDW_ALPHA_R(0x0) | VIDW_ALPHA_G(0x0) |
+ VIDW_ALPHA_B(0x0);
+ writel(val, ctx->regs + VIDWnALPHA1(win));
+
+ fimd_set_bits(ctx, BLENDCON, BLENDCON_NEW_MASK,
+ BLENDCON_NEW_8BIT_ALPHA_VALUE);
+}
+
+static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
+ struct drm_framebuffer *fb, int width)
+{
+ struct exynos_drm_plane plane = ctx->planes[win];
+ struct exynos_drm_plane_state *state =
+ to_exynos_plane_state(plane.base.state);
+ uint32_t pixel_format = fb->format->format;
+ unsigned int alpha = state->base.alpha;
+ u32 val = WINCONx_ENWIN;
+ unsigned int pixel_alpha;
+
+ if (fb->format->has_alpha)
+ pixel_alpha = state->base.pixel_blend_mode;
+ else
+ pixel_alpha = DRM_MODE_BLEND_PIXEL_NONE;
/*
* In case of s3c64xx, window 0 doesn't support alpha channel.
@@ -592,8 +681,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
break;
case DRM_FORMAT_ARGB8888:
default:
- val |= WINCON1_BPPMODE_25BPP_A1888
- | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
+ val |= WINCON1_BPPMODE_25BPP_A1888;
val |= WINCONx_WSWP;
val |= WINCONx_BURSTLEN_16WORD;
break;
@@ -611,25 +699,12 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_4WORD;
}
-
- writel(val, ctx->regs + WINCON(win));
+ fimd_set_bits(ctx, WINCON(win), ~WINCONx_BLEND_MODE_MASK, val);
/* hardware window 0 doesn't support alpha channel. */
if (win != 0) {
- /* OSD alpha */
- val = VIDISD14C_ALPHA0_R(0xf) |
- VIDISD14C_ALPHA0_G(0xf) |
- VIDISD14C_ALPHA0_B(0xf) |
- VIDISD14C_ALPHA1_R(0xf) |
- VIDISD14C_ALPHA1_G(0xf) |
- VIDISD14C_ALPHA1_B(0xf);
-
- writel(val, ctx->regs + VIDOSD_C(win));
-
- val = VIDW_ALPHA_R(0xf) | VIDW_ALPHA_G(0xf) |
- VIDW_ALPHA_G(0xf);
- writel(val, ctx->regs + VIDWnALPHA0(win));
- writel(val, ctx->regs + VIDWnALPHA1(win));
+ fimd_win_set_bldmod(ctx, win, alpha, pixel_alpha);
+ fimd_win_set_bldeq(ctx, win, alpha, pixel_alpha);
}
}
@@ -786,7 +861,7 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
}
- fimd_win_set_pixfmt(ctx, win, fb->format->format, state->src.w);
+ fimd_win_set_pixfmt(ctx, win, fb, state->src.w);
/* hardware window 0 doesn't support color key. */
if (win != 0)
@@ -988,6 +1063,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
ctx->configs[i].num_pixel_formats = ARRAY_SIZE(fimd_formats);
ctx->configs[i].zpos = i;
ctx->configs[i].type = fimd_win_types[i];
+ ctx->configs[i].capabilities = capabilities[i];
ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
&ctx->configs[i]);
if (ret)
@@ -1011,7 +1087,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
if (is_drm_iommu_supported(drm_dev))
fimd_clear_channels(ctx->crtc);
- return drm_iommu_attach_device(drm_dev, dev);
+ return exynos_drm_register_dma(drm_dev, dev);
}
static void fimd_unbind(struct device *dev, struct device *master,
@@ -1021,7 +1097,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
fimd_disable(ctx->crtc);
- drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
if (ctx->encoder)
exynos_dpi_remove(ctx->encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f2481a2014bb..24c536d6d9cf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -25,7 +25,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_g2d.h"
#include "exynos_drm_gem.h"
-#include "exynos_drm_iommu.h"
#define G2D_HW_MAJOR_VER 4
#define G2D_HW_MINOR_VER 1
@@ -1405,7 +1404,7 @@ static int g2d_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ret = drm_iommu_attach_device(drm_dev, dev);
+ ret = exynos_drm_register_dma(drm_dev, dev);
if (ret < 0) {
dev_err(dev, "failed to enable iommu.\n");
g2d_fini_cmdlist(g2d);
@@ -1430,7 +1429,7 @@ static void g2d_unbind(struct device *dev, struct device *master, void *data)
priv->g2d_dev = NULL;
cancel_work_sync(&g2d->runqueue_work);
- drm_iommu_detach_device(g2d->drm_dev, dev);
+ exynos_drm_unregister_dma(g2d->drm_dev, dev);
}
static const struct component_ops g2d_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 34ace85feb68..df66c383a877 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -19,7 +19,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
-#include "exynos_drm_iommu.h"
static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index ce15d46bfce8..f048d97fe9e2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -24,7 +24,6 @@
#include <drm/exynos_drm.h>
#include "regs-gsc.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_ipp.h"
/*
@@ -1170,7 +1169,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
- drm_iommu_attach_device(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev);
exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1190,7 +1189,7 @@ static void gsc_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(drm_dev, ipp);
- drm_iommu_detach_device(drm_dev, dev);
+ exynos_drm_unregister_dma(drm_dev, dev);
}
static const struct component_ops gsc_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
deleted file mode 100644
index 0f373702414e..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/* exynos_drm_iommu.c
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-
-#include <linux/dma-mapping.h>
-#include <linux/iommu.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
-
-static inline int configure_dma_max_seg_size(struct device *dev)
-{
- if (!dev->dma_parms)
- dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
- if (!dev->dma_parms)
- return -ENOMEM;
-
- dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
- return 0;
-}
-
-static inline void clear_dma_max_seg_size(struct device *dev)
-{
- kfree(dev->dma_parms);
- dev->dma_parms = NULL;
-}
-
-/*
- * drm_create_iommu_mapping - create a mapping structure
- *
- * @drm_dev: DRM device
- */
-int drm_create_iommu_mapping(struct drm_device *drm_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
-
- return __exynos_iommu_create_mapping(priv, EXYNOS_DEV_ADDR_START,
- EXYNOS_DEV_ADDR_SIZE);
-}
-
-/*
- * drm_release_iommu_mapping - release iommu mapping structure
- *
- * @drm_dev: DRM device
- */
-void drm_release_iommu_mapping(struct drm_device *drm_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
-
- __exynos_iommu_release_mapping(priv);
-}
-
-/*
- * drm_iommu_attach_device- attach device to iommu mapping
- *
- * @drm_dev: DRM device
- * @subdrv_dev: device to be attach
- *
- * This function should be called by sub drivers to attach it to iommu
- * mapping.
- */
-int drm_iommu_attach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
- int ret;
-
- if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
- DRM_ERROR("Device %s lacks support for IOMMU\n",
- dev_name(subdrv_dev));
- return -EINVAL;
- }
-
- ret = configure_dma_max_seg_size(subdrv_dev);
- if (ret)
- return ret;
-
- ret = __exynos_iommu_attach(priv, subdrv_dev);
- if (ret)
- clear_dma_max_seg_size(subdrv_dev);
-
- return 0;
-}
-
-/*
- * drm_iommu_detach_device -detach device address space mapping from device
- *
- * @drm_dev: DRM device
- * @subdrv_dev: device to be detached
- *
- * This function should be called by sub drivers to detach it from iommu
- * mapping
- */
-void drm_iommu_detach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
-
- __exynos_iommu_detach(priv, subdrv_dev);
- clear_dma_max_seg_size(subdrv_dev);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
deleted file mode 100644
index 797d9ee5f15a..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* exynos_drm_iommu.h
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Authoer: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_IOMMU_H_
-#define _EXYNOS_DRM_IOMMU_H_
-
-#define EXYNOS_DEV_ADDR_START 0x20000000
-#define EXYNOS_DEV_ADDR_SIZE 0x40000000
-
-#ifdef CONFIG_DRM_EXYNOS_IOMMU
-
-#if defined(CONFIG_ARM_DMA_USE_IOMMU)
-#include <asm/dma-iommu.h>
-
-static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
- unsigned long start, unsigned long size)
-{
- priv->mapping = arm_iommu_create_mapping(&platform_bus_type, start,
- size);
- return IS_ERR(priv->mapping);
-}
-
-static inline void
-__exynos_iommu_release_mapping(struct exynos_drm_private *priv)
-{
- arm_iommu_release_mapping(priv->mapping);
-}
-
-static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
- struct device *dev)
-{
- if (dev->archdata.mapping)
- arm_iommu_detach_device(dev);
-
- return arm_iommu_attach_device(dev, priv->mapping);
-}
-
-static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
- struct device *dev)
-{
- arm_iommu_detach_device(dev);
-}
-
-#elif defined(CONFIG_IOMMU_DMA)
-#include <linux/dma-iommu.h>
-
-static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
- unsigned long start, unsigned long size)
-{
- priv->mapping = iommu_get_domain_for_dev(priv->dma_dev);
- return 0;
-}
-
-static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
-{
- priv->mapping = NULL;
-}
-
-static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
- struct device *dev)
-{
- struct iommu_domain *domain = priv->mapping;
-
- if (dev != priv->dma_dev)
- return iommu_attach_device(domain, dev);
- return 0;
-}
-
-static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
- struct device *dev)
-{
- struct iommu_domain *domain = priv->mapping;
-
- if (dev != priv->dma_dev)
- iommu_detach_device(domain, dev);
-}
-#else
-#error Unsupported architecture and IOMMU/DMA-mapping glue code
-#endif
-
-int drm_create_iommu_mapping(struct drm_device *drm_dev);
-
-void drm_release_iommu_mapping(struct drm_device *drm_dev);
-
-int drm_iommu_attach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev);
-
-void drm_iommu_detach_device(struct drm_device *dev_dev,
- struct device *subdrv_dev);
-
-static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
-
- return priv->mapping ? true : false;
-}
-
-#else
-
-static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
-{
- return 0;
-}
-
-static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
-{
-}
-
-static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
- return 0;
-}
-
-static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
-}
-
-static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
-{
- return false;
-}
-
-#endif
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index a820a68429b9..8d67b2a54be3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -23,7 +23,6 @@
#include <drm/exynos_drm.h>
#include "regs-rotator.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_ipp.h"
/*
@@ -244,7 +243,7 @@ static int rotator_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &rot->ipp;
rot->drm_dev = drm_dev;
- drm_iommu_attach_device(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev);
exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
@@ -263,7 +262,7 @@ static void rotator_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &rot->ipp;
exynos_drm_ipp_unregister(drm_dev, ipp);
- drm_iommu_detach_device(rot->drm_dev, rot->dev);
+ exynos_drm_unregister_dma(rot->drm_dev, rot->dev);
}
static const struct component_ops rotator_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index cd66774e817d..71270efa64f3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -23,7 +23,6 @@
#include "regs-scaler.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_ipp.h"
#define scaler_read(offset) readl(scaler->regs + (offset))
@@ -452,7 +451,7 @@ static int scaler_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &scaler->ipp;
scaler->drm_dev = drm_dev;
- drm_iommu_attach_device(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev);
exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -473,7 +472,7 @@ static void scaler_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &scaler->ipp;
exynos_drm_ipp_unregister(drm_dev, ipp);
- drm_iommu_detach_device(scaler->drm_dev, scaler->dev);
+ exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev);
}
static const struct component_ops scaler_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e3a4ecbc503b..0573eab0e190 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -40,7 +40,6 @@
#include "exynos_drm_crtc.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_plane.h"
-#include "exynos_drm_iommu.h"
#define MIXER_WIN_NR 3
#define VP_DEFAULT_WIN 2
@@ -381,19 +380,16 @@ static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_SCAN_MASK);
}
-static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
+static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, struct drm_display_mode *mode)
{
+ enum hdmi_quantization_range range = drm_default_rgb_quant_range(mode);
u32 val;
- switch (height) {
- case 480:
- case 576:
- val = MXR_CFG_RGB601_0_255;
- break;
- case 720:
- case 1080:
- default:
- val = MXR_CFG_RGB709_16_235;
+ if (mode->vdisplay < 720) {
+ val = MXR_CFG_RGB601;
+ } else {
+ val = MXR_CFG_RGB709;
+
/* Configure the BT.709 CSC matrix for full range RGB. */
mixer_reg_write(ctx, MXR_CM_COEFF_Y,
MXR_CSC_CT( 0.184, 0.614, 0.063) |
@@ -402,9 +398,13 @@ static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
MXR_CSC_CT(-0.102, -0.338, 0.440));
mixer_reg_write(ctx, MXR_CM_COEFF_CR,
MXR_CSC_CT( 0.440, -0.399, -0.040));
- break;
}
+ if (range == HDMI_QUANTIZATION_RANGE_FULL)
+ val |= MXR_CFG_QUANT_RANGE_FULL;
+ else
+ val |= MXR_CFG_QUANT_RANGE_LIMITED;
+
mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
}
@@ -461,7 +461,7 @@ static void mixer_commit(struct mixer_context *ctx)
struct drm_display_mode *mode = &ctx->crtc->base.state->adjusted_mode;
mixer_cfg_scan(ctx, mode->hdisplay, mode->vdisplay);
- mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
+ mixer_cfg_rgb_fmt(ctx, mode);
mixer_run(ctx);
}
@@ -878,12 +878,12 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
}
}
- return drm_iommu_attach_device(drm_dev, mixer_ctx->dev);
+ return exynos_drm_register_dma(drm_dev, mixer_ctx->dev);
}
static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
{
- drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
+ exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev);
}
static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
diff --git a/drivers/gpu/drm/exynos/regs-decon5433.h b/drivers/gpu/drm/exynos/regs-decon5433.h
index 19ad9e47945e..63db6974bf14 100644
--- a/drivers/gpu/drm/exynos/regs-decon5433.h
+++ b/drivers/gpu/drm/exynos/regs-decon5433.h
@@ -104,6 +104,7 @@
#define WINCONx_BURSTLEN_16WORD (0x0 << 10)
#define WINCONx_BURSTLEN_8WORD (0x1 << 10)
#define WINCONx_BURSTLEN_4WORD (0x2 << 10)
+#define WINCONx_ALPHA_MUL_F (1 << 7)
#define WINCONx_BLD_PIX_F (1 << 6)
#define WINCONx_BPPMODE_MASK (0xf << 2)
#define WINCONx_BPPMODE_16BPP_565 (0x5 << 2)
@@ -116,11 +117,15 @@
#define WINCONx_BPPMODE_16BPP_A4444 (0xe << 2)
#define WINCONx_ALPHA_SEL_F (1 << 1)
#define WINCONx_ENWIN_F (1 << 0)
+#define WINCONx_BLEND_MODE_MASK (0xc2)
/* SHADOWCON */
#define SHADOWCON_PROTECT_MASK GENMASK(14, 10)
#define SHADOWCON_Wx_PROTECT(n) (1 << (10 + (n)))
+/* VIDOSDxC */
+#define VIDOSDxC_ALPHA0_RGB_MASK (0xffffff)
+
/* VIDOSDxD */
#define VIDOSD_Wx_ALPHA_R_F(n) (((n) & 0xff) << 16)
#define VIDOSD_Wx_ALPHA_G_F(n) (((n) & 0xff) << 8)
@@ -206,4 +211,21 @@
#define CRCCTRL_CRCEN (0x1 << 0)
#define CRCCTRL_MASK (0x7)
+/* BLENDCON */
+#define BLEND_NEW (1 << 0)
+
+/* BLENDERQx */
+#define BLENDERQ_ZERO 0x0
+#define BLENDERQ_ONE 0x1
+#define BLENDERQ_ALPHA_A 0x2
+#define BLENDERQ_ONE_MINUS_ALPHA_A 0x3
+#define BLENDERQ_ALPHA0 0x6
+#define BLENDERQ_Q_FUNC_F(n) (n << 18)
+#define BLENDERQ_P_FUNC_F(n) (n << 12)
+#define BLENDERQ_B_FUNC_F(n) (n << 6)
+#define BLENDERQ_A_FUNC_F(n) (n << 0)
+
+/* BLENDCON */
+#define BLEND_NEW (1 << 0)
+
#endif /* EXYNOS_REGS_DECON5433_H */
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index d2b8194a07bf..5ff095b0c1b3 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -85,10 +85,11 @@
/* bits for MXR_CFG */
#define MXR_CFG_LAYER_UPDATE (1 << 31)
#define MXR_CFG_LAYER_UPDATE_COUNT_MASK (3 << 29)
-#define MXR_CFG_RGB601_0_255 (0 << 9)
-#define MXR_CFG_RGB601_16_235 (1 << 9)
-#define MXR_CFG_RGB709_0_255 (2 << 9)
-#define MXR_CFG_RGB709_16_235 (3 << 9)
+#define MXR_CFG_QUANT_RANGE_FULL (0 << 9)
+#define MXR_CFG_QUANT_RANGE_LIMITED (1 << 9)
+#define MXR_CFG_RGB601 (0 << 10)
+#define MXR_CFG_RGB709 (1 << 10)
+
#define MXR_CFG_RGB_FMT_MASK 0x600
#define MXR_CFG_OUT_YUV444 (0 << 8)
#define MXR_CFG_OUT_RGB888 (1 << 8)
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 33a458b7f1fc..148be8e1a090 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -131,5 +131,5 @@ config DRM_I915_GVT_KVMGT
menu "drm/i915 Debugging"
depends on DRM_I915
depends on EXPERT
-source drivers/gpu/drm/i915/Kconfig.debug
+source "drivers/gpu/drm/i915/Kconfig.debug"
endmenu
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0ff878c994e2..19b5fe5016bf 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -157,14 +157,17 @@ i915-y += dvo_ch7017.o \
intel_sdvo.o \
intel_tv.o \
vlv_dsi.o \
- vlv_dsi_pll.o
+ vlv_dsi_pll.o \
+ intel_vdsc.o
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
i915-$(CONFIG_DRM_I915_SELFTEST) += \
selftests/i915_random.o \
selftests/i915_selftest.o \
- selftests/igt_flush_test.o
+ selftests/igt_flush_test.o \
+ selftests/igt_reset.o \
+ selftests/igt_spinner.o
# virtual gpu code
i915-y += i915_vgpu.o
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index fe754022e356..359d37d5c958 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -61,10 +61,12 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
}
mutex_lock(&dev_priv->drm.struct_mutex);
+ mmio_hw_access_pre(dev_priv);
ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
+ mmio_hw_access_post(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
if (ret)
gvt_err("fail to alloc %s gm space from host\n",
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 77edbfcb0f75..77ae634eb11c 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1900,11 +1900,11 @@ static struct cmd_info cmd_info[] = {
{"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
- {"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
+ {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
D_BDW_PLUS, 0, 8, NULL},
- {"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS,
- ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
+ {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL,
+ D_BDW_PLUS, ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
{"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 481896fb712a..85e6736f0a32 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -235,7 +235,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
plane->bpp = skl_pixel_formats[fmt].bpp;
plane->drm_format = skl_pixel_formats[fmt].drm_format;
} else {
- plane->tiled = !!(val & DISPPLANE_TILED);
+ plane->tiled = val & DISPPLANE_TILED;
fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK);
plane->bpp = bdw_pixel_formats[fmt].bpp;
plane->drm_format = bdw_pixel_formats[fmt].drm_format;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 58e166effa45..c7103dd2d8d5 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2447,10 +2447,11 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
{
- struct intel_gvt_partial_pte *pos;
+ struct intel_gvt_partial_pte *pos, *next;
- list_for_each_entry(pos,
- &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, list) {
+ list_for_each_entry_safe(pos, next,
+ &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
+ list) {
gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
pos->offset, pos->data);
kfree(pos);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 6ef5a7fc70df..733a2a0d0c30 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -437,7 +437,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
ret = intel_gvt_debugfs_init(gvt);
if (ret)
- gvt_err("debugfs registeration failed, go on.\n");
+ gvt_err("debugfs registration failed, go on.\n");
gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 31f6cdbe5c42..b4ab1dad0143 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -159,6 +159,10 @@ struct intel_vgpu_submission {
struct kmem_cache *workloads;
atomic_t running_workload_num;
struct i915_gem_context *shadow_ctx;
+ union {
+ u64 i915_context_pml4;
+ u64 i915_context_pdps[GEN8_3LVL_PDPES];
+ };
DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
void *ring_scan_buffer[I915_NUM_ENGINES];
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index aa280bb07125..b5475c91e2ef 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -475,6 +475,7 @@ static i915_reg_t force_nonpriv_white_list[] = {
_MMIO(0x7704),
_MMIO(0x7708),
_MMIO(0x770c),
+ _MMIO(0x83a8),
_MMIO(0xb110),
GEN8_L3SQCREG4,//_MMIO(0xb118)
_MMIO(0xe100),
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 5daa23ae566b..6b9d1354ff29 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -126,7 +126,7 @@ static const char * const irq_name[INTEL_GVT_EVENT_MAX] = {
[FDI_RX_INTERRUPTS_TRANSCODER_C] = "FDI RX Interrupts Combined C",
[AUDIO_CP_CHANGE_TRANSCODER_C] = "Audio CP Change Transcoder C",
[AUDIO_CP_REQUEST_TRANSCODER_C] = "Audio CP Request Transcoder C",
- [ERR_AND_DBG] = "South Error and Debug Interupts Combined",
+ [ERR_AND_DBG] = "South Error and Debug Interrupts Combined",
[GMBUS] = "Gmbus",
[SDVO_B_HOTPLUG] = "SDVO B hotplug",
[CRT_HOTPLUG] = "CRT Hotplug",
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 36a5147cd01e..d6e02c15ef97 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -158,6 +158,8 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
int ring_id, i;
for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
+ if (!HAS_ENGINE(dev_priv, ring_id))
+ continue;
offset.reg = regs[ring_id];
for (i = 0; i < GEN9_MOCS_SIZE; i++) {
gen9_render_mocs.control_table[ring_id][i] =
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index b8fbe3fabea3..1ad8c5e1455d 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1079,6 +1079,21 @@ err:
return ret;
}
+static void
+i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s)
+{
+ struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
+ int i;
+
+ if (i915_vm_is_48bit(&i915_ppgtt->vm))
+ px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4;
+ else {
+ for (i = 0; i < GEN8_3LVL_PDPES; i++)
+ px_dma(i915_ppgtt->pdp.page_directory[i]) =
+ s->i915_context_pdps[i];
+ }
+}
+
/**
* intel_vgpu_clean_submission - free submission-related resource for vGPU
* @vgpu: a vGPU
@@ -1091,6 +1106,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
struct intel_vgpu_submission *s = &vgpu->submission;
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
+ i915_context_ppgtt_root_restore(s);
i915_gem_context_put(s->shadow_ctx);
kmem_cache_destroy(s->workloads);
}
@@ -1116,6 +1132,21 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
s->ops->reset(vgpu, engine_mask);
}
+static void
+i915_context_ppgtt_root_save(struct intel_vgpu_submission *s)
+{
+ struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
+ int i;
+
+ if (i915_vm_is_48bit(&i915_ppgtt->vm))
+ s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4);
+ else {
+ for (i = 0; i < GEN8_3LVL_PDPES; i++)
+ s->i915_context_pdps[i] =
+ px_dma(i915_ppgtt->pdp.page_directory[i]);
+ }
+}
+
/**
* intel_vgpu_setup_submission - setup submission-related resource for vGPU
* @vgpu: a vGPU
@@ -1138,6 +1169,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
if (IS_ERR(s->shadow_ctx))
return PTR_ERR(s->shadow_ctx);
+ i915_context_ppgtt_root_save(s);
+
bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a63d084c8e96..9bad6a32adae 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -943,30 +943,30 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
size_t count, loff_t *pos)
{
- struct i915_gpu_state *error = file->private_data;
- struct drm_i915_error_state_buf str;
+ struct i915_gpu_state *error;
ssize_t ret;
- loff_t tmp;
+ void *buf;
+ error = file->private_data;
if (!error)
return 0;
- ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
- if (ret)
- return ret;
+ /* Bounce buffer required because of kernfs __user API convenience. */
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- ret = i915_error_state_to_str(&str, error);
- if (ret)
+ ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
+ if (ret <= 0)
goto out;
- tmp = 0;
- ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
- if (ret < 0)
- goto out;
+ if (!copy_to_user(ubuf, buf, ret))
+ *pos += ret;
+ else
+ ret = -EFAULT;
- *pos = str.start + ret;
out:
- i915_error_state_buf_release(&str);
+ kfree(buf);
return ret;
}
@@ -3368,13 +3368,15 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
static int i915_wa_registers(struct seq_file *m, void *unused)
{
- struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
- int i;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
+ struct i915_wa *wa;
+ unsigned int i;
- seq_printf(m, "Workarounds applied: %d\n", wa->count);
- for (i = 0; i < wa->count; ++i)
+ seq_printf(m, "Workarounds applied: %u\n", wal->count);
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
- wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
+ i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
return 0;
}
@@ -3434,31 +3436,32 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
- struct skl_ddb_allocation *ddb;
struct skl_ddb_entry *entry;
- enum pipe pipe;
- int plane;
+ struct intel_crtc *crtc;
if (INTEL_GEN(dev_priv) < 9)
return -ENODEV;
drm_modeset_lock_all(dev);
- ddb = &dev_priv->wm.skl_hw.ddb;
-
seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
- for_each_pipe(dev_priv, pipe) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+
seq_printf(m, "Pipe %c\n", pipe_name(pipe));
- for_each_universal_plane(dev_priv, pipe, plane) {
- entry = &ddb->plane[pipe][plane];
- seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
entry->start, entry->end,
skl_ddb_entry_size(entry));
}
- entry = &ddb->plane[pipe][PLANE_CURSOR];
+ entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
entry->end, skl_ddb_entry_size(entry));
}
@@ -4585,6 +4588,13 @@ static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = m->private;
struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ /* Synchronize with everything first in case there's been an HPD
+ * storm, but we haven't finished handling it in the kernel yet
+ */
+ synchronize_irq(dev_priv->drm.irq);
+ flush_work(&dev_priv->hotplug.dig_port_work);
+ flush_work(&dev_priv->hotplug.hotplug_work);
+
seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
seq_printf(m, "Detected: %s\n",
yesno(delayed_work_pending(&hotplug->reenable_work)));
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b1d23c73c147..b310a897a4ad 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -53,6 +53,7 @@
#include "i915_vgpu.h"
#include "intel_drv.h"
#include "intel_uc.h"
+#include "intel_workarounds.h"
static struct drm_driver driver;
@@ -287,7 +288,7 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
* Use PCH_NOP (PCH but no South Display) for PCH platforms without
* display.
*/
- if (pch && INTEL_INFO(dev_priv)->num_pipes == 0) {
+ if (pch && !HAS_DISPLAY(dev_priv)) {
DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
dev_priv->pch_type = PCH_NOP;
dev_priv->pch_id = 0;
@@ -645,7 +646,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (i915_inject_load_failure())
return -ENODEV;
- if (INTEL_INFO(dev_priv)->num_pipes) {
+ if (HAS_DISPLAY(dev_priv)) {
ret = drm_vblank_init(&dev_priv->drm,
INTEL_INFO(dev_priv)->num_pipes);
if (ret)
@@ -696,7 +697,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_overlay_setup(dev_priv);
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return 0;
ret = intel_fbdev_init(dev);
@@ -868,6 +869,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
pre |= IS_HSW_EARLY_SDV(dev_priv);
pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
+ pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
if (pre) {
DRM_ERROR("This is a pre-production stepping. "
@@ -1383,6 +1385,20 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
}
}
+ if (HAS_EXECLISTS(dev_priv)) {
+ /*
+ * Older GVT emulation depends upon intercepting CSB mmio,
+ * which we no longer use, preferring to use the HWSP cache
+ * instead.
+ */
+ if (intel_vgpu_active(dev_priv) &&
+ !intel_vgpu_has_hwsp_emulation(dev_priv)) {
+ i915_report_error(dev_priv,
+ "old vGPU host found, support for HWSP emulation required\n");
+ return -ENXIO;
+ }
+ }
+
intel_sanitize_options(dev_priv);
i915_perf_init(dev_priv);
@@ -1452,6 +1468,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_uncore_sanitize(dev_priv);
+ intel_gt_init_workarounds(dev_priv);
i915_gem_load_init_fences(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -1551,7 +1568,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
} else
DRM_ERROR("Failed to register driver for userspace access!\n");
- if (INTEL_INFO(dev_priv)->num_pipes) {
+ if (HAS_DISPLAY(dev_priv)) {
/* Must be done after probing outputs */
intel_opregion_register(dev_priv);
acpi_video_register();
@@ -1575,7 +1592,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
* We need to coordinate the hotplugs with the asynchronous fbdev
* configuration, for which we use the fbdev->async_cookie.
*/
- if (INTEL_INFO(dev_priv)->num_pipes)
+ if (HAS_DISPLAY(dev_priv))
drm_kms_helper_poll_init(dev);
intel_power_domains_enable(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4064e49dbf70..b1c31967194b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -53,6 +53,7 @@
#include <drm/drm_auth.h>
#include <drm/drm_cache.h>
#include <drm/drm_util.h>
+#include <drm/drm_dsc.h>
#include "i915_fixed.h"
#include "i915_params.h"
@@ -68,6 +69,7 @@
#include "intel_ringbuffer.h"
#include "intel_uncore.h"
#include "intel_wopcm.h"
+#include "intel_workarounds.h"
#include "intel_uc.h"
#include "i915_gem.h"
@@ -88,8 +90,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20181122"
-#define DRIVER_TIMESTAMP 1542898187
+#define DRIVER_DATE "20181204"
+#define DRIVER_TIMESTAMP 1543944377
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -494,6 +496,7 @@ struct i915_psr {
bool sink_support;
bool prepared, enabled;
struct intel_dp *dp;
+ enum pipe pipe;
bool active;
struct work_struct work;
unsigned busy_frontbuffer_bits;
@@ -504,6 +507,8 @@ struct i915_psr {
u8 sink_sync_latency;
ktime_t last_entry_attempt;
ktime_t last_exit;
+ bool sink_not_reliable;
+ bool irq_aux_error;
};
enum intel_pch {
@@ -1093,9 +1098,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
}
struct skl_ddb_allocation {
- /* packed/y */
- struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
- struct skl_ddb_entry uv_plane[I915_MAX_PIPES][I915_MAX_PLANES];
u8 enabled_slices; /* GEN11 has configurable 2 slices */
};
@@ -1188,20 +1190,6 @@ struct i915_frontbuffer_tracking {
unsigned flip_bits;
};
-struct i915_wa_reg {
- u32 addr;
- u32 value;
- /* bitmask representing WA bits */
- u32 mask;
-};
-
-#define I915_MAX_WA_REGS 16
-
-struct i915_workarounds {
- struct i915_wa_reg reg[I915_MAX_WA_REGS];
- u32 count;
-};
-
struct i915_virtual_gpu {
bool active;
u32 caps;
@@ -1651,7 +1639,7 @@ struct drm_i915_private {
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
- struct i915_workarounds workarounds;
+ struct i915_wa_list gt_wa_list;
struct i915_frontbuffer_tracking fb_tracking;
@@ -1995,6 +1983,8 @@ struct drm_i915_private {
struct delayed_work idle_work;
ktime_t last_init_time;
+
+ struct i915_vma *scratch;
} gt;
/* perform PHY state sanity checks? */
@@ -2448,9 +2438,9 @@ intel_info(const struct drm_i915_private *dev_priv)
((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
})
-#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay)
+#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.display.has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
- ((dev_priv)->info.overlay_needs_physical)
+ ((dev_priv)->info.display.overlay_needs_physical)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
@@ -2471,31 +2461,31 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
!(IS_I915G(dev_priv) || \
IS_I915GM(dev_priv)))
-#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv)
-#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug)
+#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.display.supports_tv)
+#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.display.has_hotplug)
#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
-#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
+#define HAS_FBC(dev_priv) ((dev_priv)->info.display.has_fbc)
#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7)
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
-#define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst)
+#define HAS_DP_MST(dev_priv) ((dev_priv)->info.display.has_dp_mst)
-#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
+#define HAS_DDI(dev_priv) ((dev_priv)->info.display.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
-#define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr)
+#define HAS_PSR(dev_priv) ((dev_priv)->info.display.has_psr)
#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6)
#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p)
#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
-#define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr)
+#define HAS_CSR(dev_priv) ((dev_priv)->info.display.has_csr)
#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
-#define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc)
+#define HAS_IPC(dev_priv) ((dev_priv)->info.display.has_ipc)
/*
* For now, anything with a GuC requires uCode loading, and then supports
@@ -2556,7 +2546,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
-#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
+#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.display.has_gmch_display)
#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
@@ -2568,6 +2558,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
+#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0)
+
#include "i915_trace.h"
static inline bool intel_vtd_active(void)
@@ -3340,6 +3332,9 @@ extern void intel_rps_mark_interactive(struct drm_i915_private *i915,
bool interactive);
extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
+void intel_dsc_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -3720,4 +3715,9 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
return I915_HWS_CSB_WRITE_INDEX;
}
+static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
+{
+ return i915_ggtt_offset(i915->gt.scratch);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b70de1a9e899..7399ac7a5629 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1282,8 +1282,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
if (args->size == 0)
return 0;
- if (!access_ok(VERIFY_WRITE,
- u64_to_user_ptr(args->data_ptr),
+ if (!access_ok(u64_to_user_ptr(args->data_ptr),
args->size))
return -EFAULT;
@@ -1609,9 +1608,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (args->size == 0)
return 0;
- if (!access_ok(VERIFY_READ,
- u64_to_user_ptr(args->data_ptr),
- args->size))
+ if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
return -EFAULT;
obj = i915_gem_object_lookup(file, args->handle);
@@ -2559,7 +2556,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* If there's no chance of allocating enough pages for the whole
* object, bail early.
*/
- if (page_count > totalram_pages)
+ if (page_count > totalram_pages())
return -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -3309,16 +3306,6 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
static void nop_submit_request(struct i915_request *request)
{
- GEM_TRACE("%s fence %llx:%d -> -EIO\n",
- request->engine->name,
- request->fence.context, request->fence.seqno);
- dma_fence_set_error(&request->fence, -EIO);
-
- i915_request_submit(request);
-}
-
-static void nop_complete_submit_request(struct i915_request *request)
-{
unsigned long flags;
GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
@@ -3354,57 +3341,33 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, i915, id)
i915_gem_reset_prepare_engine(engine);
- engine->submit_request = nop_submit_request;
- engine->schedule = NULL;
- }
- i915->caps.scheduler = 0;
-
/* Even if the GPU reset fails, it should still stop the engines */
if (INTEL_GEN(i915) >= 5)
intel_gpu_reset(i915, ALL_ENGINES);
- /*
- * Make sure no one is running the old callback before we proceed with
- * cancelling requests and resetting the completion tracking. Otherwise
- * we might submit a request to the hardware which never completes.
- */
- synchronize_rcu();
-
for_each_engine(engine, i915, id) {
- /* Mark all executing requests as skipped */
- engine->cancel_requests(engine);
-
- /*
- * Only once we've force-cancelled all in-flight requests can we
- * start to complete all requests.
- */
- engine->submit_request = nop_complete_submit_request;
+ engine->submit_request = nop_submit_request;
+ engine->schedule = NULL;
}
+ i915->caps.scheduler = 0;
/*
* Make sure no request can slip through without getting completed by
* either this call here to intel_engine_init_global_seqno, or the one
- * in nop_complete_submit_request.
+ * in nop_submit_request.
*/
synchronize_rcu();
- for_each_engine(engine, i915, id) {
- unsigned long flags;
-
- /*
- * Mark all pending requests as complete so that any concurrent
- * (lockless) lookup doesn't try and wait upon the request as we
- * reset it.
- */
- spin_lock_irqsave(&engine->timeline.lock, flags);
- intel_engine_init_global_seqno(engine,
- intel_engine_last_submit(engine));
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ /* Mark all executing requests as skipped */
+ for_each_engine(engine, i915, id)
+ engine->cancel_requests(engine);
+ for_each_engine(engine, i915, id) {
i915_gem_reset_finish_engine(engine);
+ intel_engine_wakeup(engine);
}
out:
@@ -5334,7 +5297,10 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
- intel_gt_workarounds_apply(dev_priv);
+ /* Apply the GT workarounds... */
+ intel_gt_apply_workarounds(dev_priv);
+ /* ...and determine whether they are sticking. */
+ intel_gt_verify_workarounds(dev_priv, "init");
i915_gem_init_swizzling(dev_priv);
@@ -5529,6 +5495,44 @@ err_active:
goto out_ctx;
}
+static int
+i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
+
+ obj = i915_gem_object_create_stolen(i915, size);
+ if (!obj)
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Failed to allocate scratch page\n");
+ return PTR_ERR(obj);
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unref;
+ }
+
+ ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (ret)
+ goto err_unref;
+
+ i915->gt.scratch = vma;
+ return 0;
+
+err_unref:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+static void i915_gem_fini_scratch(struct drm_i915_private *i915)
+{
+ i915_vma_unpin_and_release(&i915->gt.scratch, 0);
+}
+
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
@@ -5575,12 +5579,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_unlock;
}
- ret = i915_gem_contexts_init(dev_priv);
+ ret = i915_gem_init_scratch(dev_priv,
+ IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_ggtt;
}
+ ret = i915_gem_contexts_init(dev_priv);
+ if (ret) {
+ GEM_BUG_ON(ret == -EIO);
+ goto err_scratch;
+ }
+
ret = intel_engines_init(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
@@ -5653,6 +5664,8 @@ err_pm:
err_context:
if (ret != -EIO)
i915_gem_contexts_fini(dev_priv);
+err_scratch:
+ i915_gem_fini_scratch(dev_priv);
err_ggtt:
err_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -5704,8 +5717,11 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
intel_uc_fini(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_contexts_fini(dev_priv);
+ i915_gem_fini_scratch(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_wa_list_free(&dev_priv->gt_wa_list);
+
intel_cleanup_gt_powersave(dev_priv);
intel_uc_fini_misc(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 66117a8281ef..4ec386950f75 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -535,16 +535,12 @@ static bool needs_preempt_context(struct drm_i915_private *i915)
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
- int ret;
/* Reassure ourselves we are only called once */
GEM_BUG_ON(dev_priv->kernel_context);
GEM_BUG_ON(dev_priv->preempt_context);
- ret = intel_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
-
+ intel_engine_init_ctx_wa(dev_priv->engine[RCS]);
init_contexts(dev_priv);
/* lowest priority; idle task */
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 10a4afb4f235..485b259127c3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -26,7 +26,7 @@
*
*/
-#include <linux/dma_remapping.h>
+#include <linux/intel-iommu.h>
#include <linux/reservation.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
@@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
else if (gen >= 4)
len = 4;
else
- len = 6;
+ len = 3;
batch = reloc_gpu(eb, vma, len);
if (IS_ERR(batch))
@@ -1309,11 +1309,6 @@ relocate_entry(struct i915_vma *vma,
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = addr;
*batch++ = target_offset;
-
- /* And again for good measure (blb/pnv) */
- *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *batch++ = addr;
- *batch++ = target_offset;
}
goto out;
@@ -1452,7 +1447,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
* to read. However, if the array is not writable the user loses
* the updated relocation values.
*/
- if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs))))
+ if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs))))
return -EFAULT;
do {
@@ -1559,7 +1554,7 @@ static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
addr = u64_to_user_ptr(entry->relocs_ptr);
size *= sizeof(struct drm_i915_gem_relocation_entry);
- if (!access_ok(VERIFY_READ, addr, size))
+ if (!access_ok(addr, size))
return -EFAULT;
end = addr + size;
@@ -1610,6 +1605,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
(char __user *)urelocs + copied,
len)) {
end_user:
+ user_access_end();
kvfree(relocs);
err = -EFAULT;
goto err;
@@ -1628,7 +1624,9 @@ end_user:
* happened we would make the mistake of assuming that the
* relocations were valid.
*/
- user_access_begin();
+ if (!user_access_begin(urelocs, size))
+ goto end_user;
+
for (copied = 0; copied < nreloc; copied++)
unsafe_put_user(-1,
&urelocs[copied].presumed_offset,
@@ -2095,7 +2093,7 @@ get_fence_array(struct drm_i915_gem_execbuffer2 *args,
return ERR_PTR(-EINVAL);
user = u64_to_user_ptr(args->cliprects_ptr);
- if (!access_ok(VERIFY_READ, user, nfences * sizeof(*user)))
+ if (!access_ok(user, nfences * sizeof(*user)))
return ERR_PTR(-EFAULT);
fences = kvmalloc_array(nfences, sizeof(*fences),
@@ -2610,7 +2608,16 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
unsigned int i;
/* Copy the new buffer offsets back to the user's exec list. */
- user_access_begin();
+ /*
+ * Note: count * sizeof(*user_exec_list) does not overflow,
+ * because we checked 'count' in check_buffer_count().
+ *
+ * And this range already got effectively checked earlier
+ * when we did the "copy_from_user()" above.
+ */
+ if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
+ goto end_user;
+
for (i = 0; i < args->buffer_count; i++) {
if (!(exec2_list[i].offset & UPDATE))
continue;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 2c9b284036d1..9558582c105e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -113,27 +113,25 @@ static void del_object(struct i915_mmu_object *mo)
}
static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end,
- bool blockable)
+ const struct mmu_notifier_range *range)
{
struct i915_mmu_notifier *mn =
container_of(_mn, struct i915_mmu_notifier, mn);
struct i915_mmu_object *mo;
struct interval_tree_node *it;
LIST_HEAD(cancelled);
+ unsigned long end;
if (RB_EMPTY_ROOT(&mn->objects.rb_root))
return 0;
/* interval ranges are inclusive, but invalidate range is exclusive */
- end--;
+ end = range->end - 1;
spin_lock(&mn->lock);
- it = interval_tree_iter_first(&mn->objects, start, end);
+ it = interval_tree_iter_first(&mn->objects, range->start, end);
while (it) {
- if (!blockable) {
+ if (!range->blockable) {
spin_unlock(&mn->lock);
return -EAGAIN;
}
@@ -151,7 +149,7 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
queue_work(mn->wq, &mo->work);
list_add(&mo->link, &cancelled);
- it = interval_tree_iter_next(it, start, end);
+ it = interval_tree_iter_next(it, range->start, end);
}
list_for_each_entry(mo, &cancelled, link)
del_object(mo);
@@ -791,8 +789,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
if (offset_in_page(args->user_ptr | args->user_size))
return -EINVAL;
- if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
- (char __user *)(unsigned long)args->user_ptr, args->user_size))
+ if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
return -EFAULT;
if (args->flags & I915_USERPTR_READ_ONLY) {
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 8123bf0e4807..07465123c166 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -27,11 +27,14 @@
*
*/
-#include <linux/utsname.h>
+#include <linux/ascii85.h>
+#include <linux/nmi.h>
+#include <linux/scatterlist.h>
#include <linux/stop_machine.h>
+#include <linux/utsname.h>
#include <linux/zlib.h>
+
#include <drm/drm_print.h>
-#include <linux/ascii85.h>
#include "i915_gpu_error.h"
#include "i915_drv.h"
@@ -77,112 +80,110 @@ static const char *purgeable_flag(int purgeable)
return purgeable ? " purgeable" : "";
}
-static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
+static void __sg_set_buf(struct scatterlist *sg,
+ void *addr, unsigned int len, loff_t it)
{
-
- if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
- e->err = -ENOSPC;
- return false;
- }
-
- if (e->bytes == e->size - 1 || e->err)
- return false;
-
- return true;
+ sg->page_link = (unsigned long)virt_to_page(addr);
+ sg->offset = offset_in_page(addr);
+ sg->length = len;
+ sg->dma_address = it;
}
-static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
- unsigned len)
+static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
{
- if (e->pos + len <= e->start) {
- e->pos += len;
+ if (!len)
return false;
- }
- /* First vsnprintf needs to fit in its entirety for memmove */
- if (len >= e->size) {
- e->err = -EIO;
- return false;
- }
+ if (e->bytes + len + 1 <= e->size)
+ return true;
- return true;
-}
+ if (e->bytes) {
+ __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
+ e->iter += e->bytes;
+ e->buf = NULL;
+ e->bytes = 0;
+ }
-static void __i915_error_advance(struct drm_i915_error_state_buf *e,
- unsigned len)
-{
- /* If this is first printf in this window, adjust it so that
- * start position matches start of the buffer
- */
+ if (e->cur == e->end) {
+ struct scatterlist *sgl;
- if (e->pos < e->start) {
- const size_t off = e->start - e->pos;
+ sgl = (typeof(sgl))__get_free_page(GFP_KERNEL);
+ if (!sgl) {
+ e->err = -ENOMEM;
+ return false;
+ }
- /* Should not happen but be paranoid */
- if (off > len || e->bytes) {
- e->err = -EIO;
- return;
+ if (e->cur) {
+ e->cur->offset = 0;
+ e->cur->length = 0;
+ e->cur->page_link =
+ (unsigned long)sgl | SG_CHAIN;
+ } else {
+ e->sgl = sgl;
}
- memmove(e->buf, e->buf + off, len - off);
- e->bytes = len - off;
- e->pos = e->start;
- return;
+ e->cur = sgl;
+ e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
}
- e->bytes += len;
- e->pos += len;
+ e->size = ALIGN(len + 1, SZ_64K);
+ e->buf = kmalloc(e->size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!e->buf) {
+ e->size = PAGE_ALIGN(len + 1);
+ e->buf = kmalloc(e->size, GFP_KERNEL);
+ }
+ if (!e->buf) {
+ e->err = -ENOMEM;
+ return false;
+ }
+
+ return true;
}
__printf(2, 0)
static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
- const char *f, va_list args)
+ const char *fmt, va_list args)
{
- unsigned len;
+ va_list ap;
+ int len;
- if (!__i915_error_ok(e))
+ if (e->err)
return;
- /* Seek the first printf which is hits start position */
- if (e->pos < e->start) {
- va_list tmp;
-
- va_copy(tmp, args);
- len = vsnprintf(NULL, 0, f, tmp);
- va_end(tmp);
-
- if (!__i915_error_seek(e, len))
- return;
+ va_copy(ap, args);
+ len = vsnprintf(NULL, 0, fmt, ap);
+ va_end(ap);
+ if (len <= 0) {
+ e->err = len;
+ return;
}
- len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
- if (len >= e->size - e->bytes)
- len = e->size - e->bytes - 1;
+ if (!__i915_error_grow(e, len))
+ return;
- __i915_error_advance(e, len);
+ GEM_BUG_ON(e->bytes >= e->size);
+ len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
+ if (len < 0) {
+ e->err = len;
+ return;
+ }
+ e->bytes += len;
}
-static void i915_error_puts(struct drm_i915_error_state_buf *e,
- const char *str)
+static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
{
unsigned len;
- if (!__i915_error_ok(e))
+ if (e->err || !str)
return;
len = strlen(str);
+ if (!__i915_error_grow(e, len))
+ return;
- /* Seek the first printf which is hits start position */
- if (e->pos < e->start) {
- if (!__i915_error_seek(e, len))
- return;
- }
-
- if (len >= e->size - e->bytes)
- len = e->size - e->bytes - 1;
+ GEM_BUG_ON(e->bytes + len > e->size);
memcpy(e->buf + e->bytes, str, len);
-
- __i915_error_advance(e, len);
+ e->bytes += len;
}
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
@@ -268,6 +269,8 @@ static int compress_page(struct compress *c,
if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
return -EIO;
+
+ touch_nmi_watchdog();
} while (zstream->avail_in);
/* Fallback to uncompressed if we increase size? */
@@ -635,21 +638,29 @@ static void err_print_uc(struct drm_i915_error_state_buf *m,
print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log);
}
-int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
- const struct i915_gpu_state *error)
+static void err_free_sgl(struct scatterlist *sgl)
{
- struct drm_i915_private *dev_priv = m->i915;
- struct drm_i915_error_object *obj;
- struct timespec64 ts;
- int i, j;
+ while (sgl) {
+ struct scatterlist *sg;
- if (!error) {
- err_printf(m, "No error state collected\n");
- return 0;
+ for (sg = sgl; !sg_is_chain(sg); sg++) {
+ kfree(sg_virt(sg));
+ if (sg_is_last(sg))
+ break;
+ }
+
+ sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
+ free_page((unsigned long)sgl);
+ sgl = sg;
}
+}
- if (IS_ERR(error))
- return PTR_ERR(error);
+static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
+ struct i915_gpu_state *error)
+{
+ struct drm_i915_error_object *obj;
+ struct timespec64 ts;
+ int i, j;
if (*error->error_msg)
err_printf(m, "%s\n", error->error_msg);
@@ -683,12 +694,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
- err_print_pciid(m, error->i915);
+ err_print_pciid(m, m->i915);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
- if (HAS_CSR(dev_priv)) {
- struct intel_csr *csr = &dev_priv->csr;
+ if (HAS_CSR(m->i915)) {
+ struct intel_csr *csr = &m->i915->csr;
err_printf(m, "DMC loaded: %s\n",
yesno(csr->dmc_payload != NULL));
@@ -708,22 +719,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
err_printf(m, "CCID: 0x%08x\n", error->ccid);
- err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
+ err_printf(m, "Missed interrupts: 0x%08lx\n",
+ m->i915->gpu_error.missed_irq_rings);
for (i = 0; i < error->nfence; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
- if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(m->i915) >= 6) {
err_printf(m, "ERROR: 0x%08x\n", error->error);
- if (INTEL_GEN(dev_priv) >= 8)
+ if (INTEL_GEN(m->i915) >= 8)
err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
error->fault_data1, error->fault_data0);
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- if (IS_GEN7(dev_priv))
+ if (IS_GEN7(m->i915))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -745,7 +757,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
len += scnprintf(buf + len, sizeof(buf), "%s%s",
first ? "" : ", ",
- dev_priv->engine[j]->name);
+ m->i915->engine[j]->name);
first = 0;
}
scnprintf(buf + len, sizeof(buf), ")");
@@ -763,7 +775,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
obj = ee->batchbuffer;
if (obj) {
- err_puts(m, dev_priv->engine[i]->name);
+ err_puts(m, m->i915->engine[i]->name);
if (ee->context.pid)
err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)",
ee->context.comm,
@@ -775,16 +787,16 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
- print_error_obj(m, dev_priv->engine[i], NULL, obj);
+ print_error_obj(m, m->i915->engine[i], NULL, obj);
}
for (j = 0; j < ee->user_bo_count; j++)
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"user", ee->user_bo[j]);
if (ee->num_requests) {
err_printf(m, "%s --- %d requests\n",
- dev_priv->engine[i]->name,
+ m->i915->engine[i]->name,
ee->num_requests);
for (j = 0; j < ee->num_requests; j++)
error_print_request(m, " ",
@@ -794,10 +806,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (IS_ERR(ee->waiters)) {
err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
- dev_priv->engine[i]->name);
+ m->i915->engine[i]->name);
} else if (ee->num_waiters) {
err_printf(m, "%s --- %d waiters\n",
- dev_priv->engine[i]->name,
+ m->i915->engine[i]->name,
ee->num_waiters);
for (j = 0; j < ee->num_waiters; j++) {
err_printf(m, " seqno 0x%08x for %s [%d]\n",
@@ -807,22 +819,22 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"ringbuffer", ee->ringbuffer);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"HW Status", ee->hws_page);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"HW context", ee->ctx);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"WA context", ee->wa_ctx);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"WA batchbuffer", ee->wa_batchbuffer);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"NULL context", ee->default_state);
}
@@ -835,43 +847,107 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_print_capabilities(m, &error->device_info, &error->driver_caps);
err_print_params(m, &error->params);
err_print_uc(m, &error->uc);
+}
+
+static int err_print_to_sgl(struct i915_gpu_state *error)
+{
+ struct drm_i915_error_state_buf m;
+
+ if (IS_ERR(error))
+ return PTR_ERR(error);
+
+ if (READ_ONCE(error->sgl))
+ return 0;
+
+ memset(&m, 0, sizeof(m));
+ m.i915 = error->i915;
+
+ __err_print_to_sgl(&m, error);
+
+ if (m.buf) {
+ __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
+ m.bytes = 0;
+ m.buf = NULL;
+ }
+ if (m.cur) {
+ GEM_BUG_ON(m.end < m.cur);
+ sg_mark_end(m.cur - 1);
+ }
+ GEM_BUG_ON(m.sgl && !m.cur);
+
+ if (m.err) {
+ err_free_sgl(m.sgl);
+ return m.err;
+ }
- if (m->bytes == 0 && m->err)
- return m->err;
+ if (cmpxchg(&error->sgl, NULL, m.sgl))
+ err_free_sgl(m.sgl);
return 0;
}
-int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
- struct drm_i915_private *i915,
- size_t count, loff_t pos)
+ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
+ char *buf, loff_t off, size_t rem)
{
- memset(ebuf, 0, sizeof(*ebuf));
- ebuf->i915 = i915;
+ struct scatterlist *sg;
+ size_t count;
+ loff_t pos;
+ int err;
- /* We need to have enough room to store any i915_error_state printf
- * so that we can move it to start position.
- */
- ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
- ebuf->buf = kmalloc(ebuf->size,
- GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
+ if (!error || !rem)
+ return 0;
- if (ebuf->buf == NULL) {
- ebuf->size = PAGE_SIZE;
- ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL);
- }
+ err = err_print_to_sgl(error);
+ if (err)
+ return err;
- if (ebuf->buf == NULL) {
- ebuf->size = 128;
- ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL);
- }
+ sg = READ_ONCE(error->fit);
+ if (!sg || off < sg->dma_address)
+ sg = error->sgl;
+ if (!sg)
+ return 0;
- if (ebuf->buf == NULL)
- return -ENOMEM;
+ pos = sg->dma_address;
+ count = 0;
+ do {
+ size_t len, start;
+
+ if (sg_is_chain(sg)) {
+ sg = sg_chain_ptr(sg);
+ GEM_BUG_ON(sg_is_chain(sg));
+ }
+
+ len = sg->length;
+ if (pos + len <= off) {
+ pos += len;
+ continue;
+ }
- ebuf->start = pos;
+ start = sg->offset;
+ if (pos < off) {
+ GEM_BUG_ON(off - pos > len);
+ len -= off - pos;
+ start += off - pos;
+ pos = off;
+ }
- return 0;
+ len = min(len, rem);
+ GEM_BUG_ON(!len || len > sg->length);
+
+ memcpy(buf, page_address(sg_page(sg)) + start, len);
+
+ count += len;
+ pos += len;
+
+ buf += len;
+ rem -= len;
+ if (!rem) {
+ WRITE_ONCE(error->fit, sg);
+ break;
+ }
+ } while (!sg_is_last(sg++));
+
+ return count;
}
static void i915_error_object_free(struct drm_i915_error_object *obj)
@@ -944,6 +1020,7 @@ void __i915_gpu_state_free(struct kref *error_ref)
cleanup_params(error);
cleanup_uc_state(error);
+ err_free_sgl(error->sgl);
kfree(error);
}
@@ -1494,7 +1571,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
if (HAS_BROKEN_CS_TLB(i915))
ee->wa_batchbuffer =
i915_error_object_create(i915,
- engine->scratch);
+ i915->gt.scratch);
request_record_user_bo(request, ee);
ee->ctx =
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 3ec89a504de5..ff2652bbb0b0 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -192,6 +192,8 @@ struct i915_gpu_state {
} *active_bo[I915_NUM_ENGINES], *pinned_bo;
u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
struct i915_address_space *active_vm[I915_NUM_ENGINES];
+
+ struct scatterlist *sgl, *fit;
};
struct i915_gpu_error {
@@ -298,29 +300,20 @@ struct i915_gpu_error {
struct drm_i915_error_state_buf {
struct drm_i915_private *i915;
- unsigned int bytes;
- unsigned int size;
+ struct scatterlist *sgl, *cur, *end;
+
+ char *buf;
+ size_t bytes;
+ size_t size;
+ loff_t iter;
+
int err;
- u8 *buf;
- loff_t start;
- loff_t pos;
};
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
-int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
- const struct i915_gpu_state *gpu);
-int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
- struct drm_i915_private *i915,
- size_t count, loff_t pos);
-
-static inline void
-i915_error_state_buf_release(struct drm_i915_error_state_buf *eb)
-{
- kfree(eb->buf);
-}
struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
void i915_capture_error_state(struct drm_i915_private *dev_priv,
@@ -334,6 +327,9 @@ i915_gpu_state_get(struct i915_gpu_state *gpu)
return gpu;
}
+ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
+ char *buf, loff_t offset, size_t count);
+
void __i915_gpu_state_free(struct kref *kref);
static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
{
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 0e5c580d117c..e869daf9c8a9 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -52,7 +52,7 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ||
+ if (!access_ok(request, sizeof(*request)) ||
__put_user(req32.param, &request->param) ||
__put_user((void __user *)(unsigned long)req32.value,
&request->value))
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 1b81d7cb209e..6350db5503cd 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -79,8 +79,9 @@
#define GEN2_FEATURES \
GEN(2), \
.num_pipes = 1, \
- .has_overlay = 1, .overlay_needs_physical = 1, \
- .has_gmch_display = 1, \
+ .display.has_overlay = 1, \
+ .display.overlay_needs_physical = 1, \
+ .display.has_gmch_display = 1, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
.ring_mask = RENDER_RING, \
@@ -93,7 +94,8 @@
static const struct intel_device_info intel_i830_info = {
GEN2_FEATURES,
PLATFORM(INTEL_I830),
- .is_mobile = 1, .cursor_needs_physical = 1,
+ .is_mobile = 1,
+ .display.cursor_needs_physical = 1,
.num_pipes = 2, /* legal, last one wins */
};
@@ -107,8 +109,8 @@ static const struct intel_device_info intel_i85x_info = {
PLATFORM(INTEL_I85X),
.is_mobile = 1,
.num_pipes = 2, /* legal, last one wins */
- .cursor_needs_physical = 1,
- .has_fbc = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_fbc = 1,
};
static const struct intel_device_info intel_i865g_info = {
@@ -119,7 +121,7 @@ static const struct intel_device_info intel_i865g_info = {
#define GEN3_FEATURES \
GEN(3), \
.num_pipes = 2, \
- .has_gmch_display = 1, \
+ .display.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -131,8 +133,9 @@ static const struct intel_device_info intel_i915g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I915G),
.has_coherent_ggtt = false,
- .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -141,10 +144,11 @@ static const struct intel_device_info intel_i915gm_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I915GM),
.is_mobile = 1,
- .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .supports_tv = 1,
- .has_fbc = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
+ .display.supports_tv = 1,
+ .display.has_fbc = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -152,8 +156,10 @@ static const struct intel_device_info intel_i915gm_info = {
static const struct intel_device_info intel_i945g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I945G),
- .has_hotplug = 1, .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
+ .display.has_hotplug = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -162,10 +168,12 @@ static const struct intel_device_info intel_i945gm_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I945GM),
.is_mobile = 1,
- .has_hotplug = 1, .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .supports_tv = 1,
- .has_fbc = 1,
+ .display.has_hotplug = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
+ .display.supports_tv = 1,
+ .display.has_fbc = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -173,23 +181,23 @@ static const struct intel_device_info intel_i945gm_info = {
static const struct intel_device_info intel_g33_info = {
GEN3_FEATURES,
PLATFORM(INTEL_G33),
- .has_hotplug = 1,
- .has_overlay = 1,
+ .display.has_hotplug = 1,
+ .display.has_overlay = 1,
};
static const struct intel_device_info intel_pineview_info = {
GEN3_FEATURES,
PLATFORM(INTEL_PINEVIEW),
.is_mobile = 1,
- .has_hotplug = 1,
- .has_overlay = 1,
+ .display.has_hotplug = 1,
+ .display.has_overlay = 1,
};
#define GEN4_FEATURES \
GEN(4), \
.num_pipes = 2, \
- .has_hotplug = 1, \
- .has_gmch_display = 1, \
+ .display.has_hotplug = 1, \
+ .display.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -200,7 +208,7 @@ static const struct intel_device_info intel_pineview_info = {
static const struct intel_device_info intel_i965g_info = {
GEN4_FEATURES,
PLATFORM(INTEL_I965G),
- .has_overlay = 1,
+ .display.has_overlay = 1,
.hws_needs_physical = 1,
.has_snoop = false,
};
@@ -208,9 +216,10 @@ static const struct intel_device_info intel_i965g_info = {
static const struct intel_device_info intel_i965gm_info = {
GEN4_FEATURES,
PLATFORM(INTEL_I965GM),
- .is_mobile = 1, .has_fbc = 1,
- .has_overlay = 1,
- .supports_tv = 1,
+ .is_mobile = 1,
+ .display.has_fbc = 1,
+ .display.has_overlay = 1,
+ .display.supports_tv = 1,
.hws_needs_physical = 1,
.has_snoop = false,
};
@@ -224,15 +233,16 @@ static const struct intel_device_info intel_g45_info = {
static const struct intel_device_info intel_gm45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_GM45),
- .is_mobile = 1, .has_fbc = 1,
- .supports_tv = 1,
+ .is_mobile = 1,
+ .display.has_fbc = 1,
+ .display.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
#define GEN5_FEATURES \
GEN(5), \
.num_pipes = 2, \
- .has_hotplug = 1, \
+ .display.has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -250,14 +260,15 @@ static const struct intel_device_info intel_ironlake_d_info = {
static const struct intel_device_info intel_ironlake_m_info = {
GEN5_FEATURES,
PLATFORM(INTEL_IRONLAKE),
- .is_mobile = 1, .has_fbc = 1,
+ .is_mobile = 1,
+ .display.has_fbc = 1,
};
#define GEN6_FEATURES \
GEN(6), \
.num_pipes = 2, \
- .has_hotplug = 1, \
- .has_fbc = 1, \
+ .display.has_hotplug = 1, \
+ .display.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
@@ -301,8 +312,8 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
#define GEN7_FEATURES \
GEN(7), \
.num_pipes = 3, \
- .has_hotplug = 1, \
- .has_fbc = 1, \
+ .display.has_hotplug = 1, \
+ .display.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
@@ -359,8 +370,8 @@ static const struct intel_device_info intel_valleyview_info = {
.num_pipes = 2,
.has_runtime_pm = 1,
.has_rc6 = 1,
- .has_gmch_display = 1,
- .has_hotplug = 1,
+ .display.has_gmch_display = 1,
+ .display.has_hotplug = 1,
.ppgtt = INTEL_PPGTT_FULL,
.has_snoop = true,
.has_coherent_ggtt = false,
@@ -374,10 +385,10 @@ static const struct intel_device_info intel_valleyview_info = {
#define G75_FEATURES \
GEN7_FEATURES, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
- .has_ddi = 1, \
+ .display.has_ddi = 1, \
.has_fpga_dbg = 1, \
- .has_psr = 1, \
- .has_dp_mst = 1, \
+ .display.has_psr = 1, \
+ .display.has_dp_mst = 1, \
.has_rc6p = 0 /* RC6p removed-by HSW */, \
.has_runtime_pm = 1
@@ -444,14 +455,14 @@ static const struct intel_device_info intel_cherryview_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
.num_pipes = 3,
- .has_hotplug = 1,
+ .display.has_hotplug = 1,
.is_lp = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_64bit_reloc = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_logical_ring_contexts = 1,
- .has_gmch_display = 1,
+ .display.has_gmch_display = 1,
.ppgtt = INTEL_PPGTT_FULL,
.has_reset_engine = 1,
.has_snoop = true,
@@ -473,15 +484,15 @@ static const struct intel_device_info intel_cherryview_info = {
GEN(9), \
GEN9_DEFAULT_PAGE_SIZES, \
.has_logical_ring_preemption = 1, \
- .has_csr = 1, \
+ .display.has_csr = 1, \
.has_guc = 1, \
- .has_ipc = 1, \
+ .display.has_ipc = 1, \
.ddb_size = 896
#define SKL_PLATFORM \
GEN9_FEATURES, \
/* Display WA #0477 WaDisableIPC: skl */ \
- .has_ipc = 0, \
+ .display.has_ipc = 0, \
PLATFORM(INTEL_SKYLAKE)
static const struct intel_device_info intel_skylake_gt1_info = {
@@ -512,19 +523,19 @@ static const struct intel_device_info intel_skylake_gt4_info = {
#define GEN9_LP_FEATURES \
GEN(9), \
.is_lp = 1, \
- .has_hotplug = 1, \
+ .display.has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
.num_pipes = 3, \
.has_64bit_reloc = 1, \
- .has_ddi = 1, \
+ .display.has_ddi = 1, \
.has_fpga_dbg = 1, \
- .has_fbc = 1, \
- .has_psr = 1, \
+ .display.has_fbc = 1, \
+ .display.has_psr = 1, \
.has_runtime_pm = 1, \
.has_pooled_eu = 0, \
- .has_csr = 1, \
+ .display.has_csr = 1, \
.has_rc6 = 1, \
- .has_dp_mst = 1, \
+ .display.has_dp_mst = 1, \
.has_logical_ring_contexts = 1, \
.has_logical_ring_preemption = 1, \
.has_guc = 1, \
@@ -532,7 +543,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.has_reset_engine = 1, \
.has_snoop = true, \
.has_coherent_ggtt = false, \
- .has_ipc = 1, \
+ .display.has_ipc = 1, \
GEN9_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS, \
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 4529edfdcfc8..2b2eb57ca71f 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -3052,7 +3052,7 @@ static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
if (!n_regs)
return NULL;
- if (!access_ok(VERIFY_READ, regs, n_regs * sizeof(u32) * 2))
+ if (!access_ok(regs, n_regs * sizeof(u32) * 2))
return ERR_PTR(-EFAULT);
/* No is_valid function means we're not allowing any register to be programmed. */
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 6fc4b8eeab42..fe56465cdfd6 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -46,7 +46,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
if (topo.flags != 0)
return -EINVAL;
- if (!access_ok(VERIFY_WRITE, u64_to_user_ptr(query_item->data_ptr),
+ if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
total_length))
return -EFAULT;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 47baf2fe8f71..0a7d60509ca7 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -4570,6 +4570,7 @@ enum {
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
#define VIDEO_DIP_VSC_DATA_SIZE 36
+#define VIDEO_DIP_PPS_DATA_SIZE 132
#define VIDEO_DIP_CTL _MMIO(0x61170)
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
@@ -4617,6 +4618,17 @@ enum {
#define _PP_STATUS 0x61200
#define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS)
#define PP_ON (1 << 31)
+
+#define _PP_CONTROL_1 0xc7204
+#define _PP_CONTROL_2 0xc7304
+#define ICP_PP_CONTROL(x) _MMIO(((x) == 1) ? _PP_CONTROL_1 : \
+ _PP_CONTROL_2)
+#define POWER_CYCLE_DELAY_MASK (0x1f << 4)
+#define POWER_CYCLE_DELAY_SHIFT 4
+#define VDD_OVERRIDE_FORCE (1 << 3)
+#define BACKLIGHT_ENABLE (1 << 2)
+#define PWR_DOWN_ON_RESET (1 << 1)
+#define PWR_STATE_TARGET (1 << 0)
/*
* Indicates that all dependencies of the panel are on:
*
@@ -7750,6 +7762,7 @@ enum {
#define ICP_DDIB_HPD_LONG_DETECT (2 << 4)
#define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4)
#define ICP_DDIA_HPD_ENABLE (1 << 3)
+#define ICP_DDIA_HPD_OP_DRIVE_1 (1 << 2)
#define ICP_DDIA_HPD_STATUS_MASK (3 << 0)
#define ICP_DDIA_HPD_NO_DETECT (0 << 0)
#define ICP_DDIA_HPD_SHORT_DETECT (1 << 0)
@@ -9197,6 +9210,7 @@ enum skl_power_gate {
#define _DP_TP_CTL_B 0x64140
#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
#define DP_TP_CTL_ENABLE (1 << 31)
+#define DP_TP_CTL_FEC_ENABLE (1 << 30)
#define DP_TP_CTL_MODE_SST (0 << 27)
#define DP_TP_CTL_MODE_MST (1 << 27)
#define DP_TP_CTL_FORCE_ACT (1 << 25)
@@ -9215,6 +9229,7 @@ enum skl_power_gate {
#define _DP_TP_STATUS_A 0x64044
#define _DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
+#define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28)
#define DP_TP_STATUS_IDLE_DONE (1 << 25)
#define DP_TP_STATUS_ACT_SENT (1 << 24)
#define DP_TP_STATUS_MODE_STATUS_MST (1 << 23)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 191703986c7b..cefefc11d922 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -136,6 +136,9 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
intel_engine_get_seqno(engine),
seqno);
+ if (seqno == engine->timeline.seqno)
+ continue;
+
kthread_park(engine->breadcrumbs.signaler);
if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 11bcdabd5177..7c58b049ecb5 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -1,10 +1,7 @@
/*
- * (C) Copyright 2016 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
+ * (C) Copyright 2016 Intel Corporation
*/
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index fe2ef4dadfc6..0e055ea0179f 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -1,10 +1,9 @@
/*
+ * SPDX-License-Identifier: MIT
+ *
* i915_sw_fence.h - library routines for N:M synchronisation points
*
* Copyright (C) 2016 Intel Corporation
- *
- * This file is released under the GPLv2.
- *
*/
#ifndef _I915_SW_FENCE_H_
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index e5e6f6bb2b05..535caebd9813 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -483,7 +483,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static const struct attribute *gen6_attrs[] = {
+static const struct attribute * const gen6_attrs[] = {
&dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_boost_freq_mhz.attr,
@@ -495,7 +495,7 @@ static const struct attribute *gen6_attrs[] = {
NULL,
};
-static const struct attribute *vlv_attrs[] = {
+static const struct attribute * const vlv_attrs[] = {
&dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_boost_freq_mhz.attr,
@@ -516,26 +516,21 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
{
struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct drm_i915_error_state_buf error_str;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
struct i915_gpu_state *gpu;
ssize_t ret;
- ret = i915_error_state_buf_init(&error_str, dev_priv, count, off);
- if (ret)
- return ret;
-
- gpu = i915_first_error_state(dev_priv);
- ret = i915_error_state_to_str(&error_str, gpu);
- if (ret)
- goto out;
-
- ret = count < error_str.bytes ? count : error_str.bytes;
- memcpy(buf, error_str.buf, ret);
+ gpu = i915_first_error_state(i915);
+ if (gpu) {
+ ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
+ i915_gpu_state_put(gpu);
+ } else {
+ const char *str = "No error state collected\n";
+ size_t len = strlen(str);
-out:
- i915_gpu_state_put(gpu);
- i915_error_state_buf_release(&error_str);
+ ret = min_t(size_t, count, len - off);
+ memcpy(buf, str + off, ret);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 5858a43e19da..9726df37c4c4 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -44,16 +44,19 @@
__stringify(x), (long)(x))
#if defined(GCC_VERSION) && GCC_VERSION >= 70000
-#define add_overflows(A, B) \
- __builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0)
+#define add_overflows_t(T, A, B) \
+ __builtin_add_overflow_p((A), (B), (T)0)
#else
-#define add_overflows(A, B) ({ \
+#define add_overflows_t(T, A, B) ({ \
typeof(A) a = (A); \
typeof(B) b = (B); \
- a + b < a; \
+ (T)(a + b) < a; \
})
#endif
+#define add_overflows(A, B) \
+ add_overflows_t(typeof((A) + (B)), (A), (B))
+
#define range_overflows(start, size, max) ({ \
typeof(start) start__ = (start); \
typeof(size) size__ = (size); \
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 01f422df8c23..4dd793b78996 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -26,6 +26,7 @@
*/
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_atomic_helper.h>
#include "intel_dsi.h"
static inline int header_credits_available(struct drm_i915_private *dev_priv,
@@ -107,6 +108,90 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
}
}
+static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data,
+ u32 len)
+{
+ struct intel_dsi *intel_dsi = host->intel_dsi;
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
+ int free_credits;
+ int i, j;
+
+ for (i = 0; i < len; i += 4) {
+ u32 tmp = 0;
+
+ free_credits = payload_credits_available(dev_priv, dsi_trans);
+ if (free_credits < 1) {
+ DRM_ERROR("Payload credit not available\n");
+ return false;
+ }
+
+ for (j = 0; j < min_t(u32, len - i, 4); j++)
+ tmp |= *data++ << 8 * j;
+
+ I915_WRITE(DSI_CMD_TXPYLD(dsi_trans), tmp);
+ }
+
+ return true;
+}
+
+static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
+ struct mipi_dsi_packet pkt, bool enable_lpdt)
+{
+ struct intel_dsi *intel_dsi = host->intel_dsi;
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
+ u32 tmp;
+ int free_credits;
+
+ /* check if header credit available */
+ free_credits = header_credits_available(dev_priv, dsi_trans);
+ if (free_credits < 1) {
+ DRM_ERROR("send pkt header failed, not enough hdr credits\n");
+ return -1;
+ }
+
+ tmp = I915_READ(DSI_CMD_TXHDR(dsi_trans));
+
+ if (pkt.payload)
+ tmp |= PAYLOAD_PRESENT;
+ else
+ tmp &= ~PAYLOAD_PRESENT;
+
+ tmp &= ~VBLANK_FENCE;
+
+ if (enable_lpdt)
+ tmp |= LP_DATA_TRANSFER;
+
+ tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK);
+ tmp |= ((pkt.header[0] & VC_MASK) << VC_SHIFT);
+ tmp |= ((pkt.header[0] & DT_MASK) << DT_SHIFT);
+ tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT);
+ tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT);
+ I915_WRITE(DSI_CMD_TXHDR(dsi_trans), tmp);
+
+ return 0;
+}
+
+static int dsi_send_pkt_payld(struct intel_dsi_host *host,
+ struct mipi_dsi_packet pkt)
+{
+ /* payload queue can accept *256 bytes*, check limit */
+ if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) {
+ DRM_ERROR("payload size exceeds max queue limit\n");
+ return -1;
+ }
+
+ /* load data into command payload queue */
+ if (!add_payld_to_queue(host, pkt.payload,
+ pkt.payload_length)) {
+ DRM_ERROR("adding payload to queue failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -172,6 +257,45 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
}
}
+static void configure_dual_link_mode(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 dss_ctl1;
+
+ dss_ctl1 = I915_READ(DSS_CTL1);
+ dss_ctl1 |= SPLITTER_ENABLE;
+ dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
+ dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
+
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+ u32 dss_ctl2;
+ u16 hactive = adjusted_mode->crtc_hdisplay;
+ u16 dl_buffer_depth;
+
+ dss_ctl1 &= ~DUAL_LINK_MODE_INTERLEAVE;
+ dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap;
+
+ if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH)
+ DRM_ERROR("DL buffer depth exceed max value\n");
+
+ dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
+ dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+ dss_ctl2 = I915_READ(DSS_CTL2);
+ dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
+ dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+ I915_WRITE(DSS_CTL2, dss_ctl2);
+ } else {
+ /* Interleave */
+ dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
+ }
+
+ I915_WRITE(DSS_CTL1, dss_ctl1);
+}
+
static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -412,6 +536,62 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
}
}
+static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ mutex_lock(&dev_priv->dpll_lock);
+ tmp = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp |= DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+ }
+
+ I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ mutex_lock(&dev_priv->dpll_lock);
+ tmp = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+ }
+
+ I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static void gen11_dsi_map_pll(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum port port;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpll_lock);
+
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+ val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
+ }
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ POSTING_READ(DPCLKA_CFGCR0_ICL);
+
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
static void
gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
@@ -506,7 +686,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
}
- //TODO: configure DSS_CTL1
+ /* configure stream splitting */
+ configure_dual_link_mode(encoder, pipe_config);
}
for_each_dsi_port(port, intel_dsi->ports) {
@@ -758,6 +939,9 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
/* Step (4h, 4i, 4j, 4k): Configure transcoder */
gen11_dsi_configure_transcoder(encoder, pipe_config);
+
+ /* Step 4l: Gate DDI clocks */
+ gen11_dsi_gate_clocks(encoder);
}
static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
@@ -799,18 +983,25 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
wait_for_cmds_dispatched_to_panel(encoder);
}
-static void __attribute__((unused))
-gen11_dsi_pre_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-
/* step2: enable IO power */
gen11_dsi_enable_io_power(encoder);
/* step3: enable DSI PLL */
gen11_dsi_program_esc_clk_div(encoder);
+}
+
+static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+ /* step3b */
+ gen11_dsi_map_pll(encoder, pipe_config);
/* step4: enable DSI port and DPHY */
gen11_dsi_enable_port_and_phy(encoder, pipe_config);
@@ -912,6 +1103,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
u32 tmp;
enum port port;
+ gen11_dsi_ungate_clocks(encoder);
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(DDI_BUF_CTL(port));
tmp &= ~DDI_BUF_CTL_ENABLE;
@@ -923,6 +1115,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
DRM_ERROR("DDI port:%c buffer not idle\n",
port_name(port));
}
+ gen11_dsi_ungate_clocks(encoder);
}
static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
@@ -945,10 +1138,9 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
}
}
-static void __attribute__((unused)) gen11_dsi_disable(
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
+static void gen11_dsi_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -972,10 +1164,289 @@ static void __attribute__((unused)) gen11_dsi_disable(
gen11_dsi_disable_io_power(encoder);
}
+static void gen11_dsi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 pll_id;
+
+ /* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
+ pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+ pipe_config->port_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+ pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk;
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
+}
+
+static bool gen11_dsi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
+ base);
+ struct intel_connector *intel_connector = intel_dsi->attached_connector;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ const struct drm_display_mode *fixed_mode =
+ intel_connector->panel.fixed_mode;
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+
+ intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+ intel_pch_panel_fitting(crtc, pipe_config, conn_state->scaling_mode);
+
+ adjusted_mode->flags = 0;
+
+ /* Dual link goes to trancoder DSI'0' */
+ if (intel_dsi->ports == BIT(PORT_B))
+ pipe_config->cpu_transcoder = TRANSCODER_DSI_1;
+ else
+ pipe_config->cpu_transcoder = TRANSCODER_DSI_0;
+
+ pipe_config->clock_set = true;
+ pipe_config->port_clock = intel_dsi_bitrate(intel_dsi) / 5;
+
+ return true;
+}
+
+static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u64 domains = 0;
+ enum port port;
+
+ for_each_dsi_port(port, intel_dsi->ports)
+ if (port == PORT_A)
+ domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO);
+ else
+ domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO);
+
+ return domains;
+}
+
+static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+ enum transcoder dsi_trans;
+ bool ret = false;
+
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain))
+ return false;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ *pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ *pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ *pipe = PIPE_C;
+ break;
+ default:
+ DRM_ERROR("Invalid PIPE input\n");
+ goto out;
+ }
+
+ tmp = I915_READ(PIPECONF(dsi_trans));
+ ret = tmp & PIPECONF_ENABLE;
+ }
+out:
+ intel_display_power_put(dev_priv, encoder->power_domain);
+ return ret;
+}
+
+static void gen11_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+ intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs gen11_dsi_encoder_funcs = {
+ .destroy = gen11_dsi_encoder_destroy,
+};
+
+static const struct drm_connector_funcs gen11_dsi_connector_funcs = {
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
+ .destroy = intel_connector_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_digital_connector_atomic_get_property,
+ .atomic_set_property = intel_digital_connector_atomic_set_property,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = intel_digital_connector_duplicate_state,
+};
+
+static const struct drm_connector_helper_funcs gen11_dsi_connector_helper_funcs = {
+ .get_modes = intel_dsi_get_modes,
+ .mode_valid = intel_dsi_mode_valid,
+ .atomic_check = intel_digital_connector_atomic_check,
+};
+
+static int gen11_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static int gen11_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static ssize_t gen11_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
+ struct mipi_dsi_packet dsi_pkt;
+ ssize_t ret;
+ bool enable_lpdt = false;
+
+ ret = mipi_dsi_create_packet(&dsi_pkt, msg);
+ if (ret < 0)
+ return ret;
+
+ if (msg->flags & MIPI_DSI_MSG_USE_LPM)
+ enable_lpdt = true;
+
+ /* send packet header */
+ ret = dsi_send_pkt_hdr(intel_dsi_host, dsi_pkt, enable_lpdt);
+ if (ret < 0)
+ return ret;
+
+ /* only long packet contains payload */
+ if (mipi_dsi_packet_format_is_long(msg->type)) {
+ ret = dsi_send_pkt_payld(intel_dsi_host, dsi_pkt);
+ if (ret < 0)
+ return ret;
+ }
+
+ //TODO: add payload receive code if needed
+
+ ret = sizeof(dsi_pkt.header) + dsi_pkt.payload_length;
+
+ return ret;
+}
+
+static const struct mipi_dsi_host_ops gen11_dsi_host_ops = {
+ .attach = gen11_dsi_host_attach,
+ .detach = gen11_dsi_host_detach,
+ .transfer = gen11_dsi_host_transfer,
+};
+
void icl_dsi_init(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_dsi *intel_dsi;
+ struct intel_encoder *encoder;
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+ struct drm_display_mode *scan, *fixed_mode = NULL;
enum port port;
if (!intel_bios_is_dsi_present(dev_priv, &port))
return;
+
+ intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
+ if (!intel_dsi)
+ return;
+
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector) {
+ kfree(intel_dsi);
+ return;
+ }
+
+ encoder = &intel_dsi->base;
+ intel_dsi->attached_connector = intel_connector;
+ connector = &intel_connector->base;
+
+ /* register DSI encoder with DRM subsystem */
+ drm_encoder_init(dev, &encoder->base, &gen11_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
+
+ encoder->pre_pll_enable = gen11_dsi_pre_pll_enable;
+ encoder->pre_enable = gen11_dsi_pre_enable;
+ encoder->disable = gen11_dsi_disable;
+ encoder->port = port;
+ encoder->get_config = gen11_dsi_get_config;
+ encoder->compute_config = gen11_dsi_compute_config;
+ encoder->get_hw_state = gen11_dsi_get_hw_state;
+ encoder->type = INTEL_OUTPUT_DSI;
+ encoder->cloneable = 0;
+ encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
+ encoder->power_domain = POWER_DOMAIN_PORT_DSI;
+ encoder->get_power_domains = gen11_dsi_get_power_domains;
+
+ /* register DSI connector with DRM subsystem */
+ drm_connector_init(dev, connector, &gen11_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ drm_connector_helper_add(connector, &gen11_dsi_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ /* attach connector to encoder */
+ intel_connector_attach_encoder(intel_connector, encoder);
+
+ /* fill mode info from VBT */
+ mutex_lock(&dev->mode_config.mutex);
+ intel_dsi_vbt_get_modes(intel_dsi);
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ break;
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (!fixed_mode) {
+ DRM_ERROR("DSI fixed mode info missing\n");
+ goto err;
+ }
+
+ connector->display_info.width_mm = fixed_mode->width_mm;
+ connector->display_info.height_mm = fixed_mode->height_mm;
+ intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+ intel_panel_setup_backlight(connector, INVALID_PIPE);
+
+
+ if (dev_priv->vbt.dsi.config->dual_link)
+ intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
+ else
+ intel_dsi->ports = BIT(port);
+
+ intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
+ intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ struct intel_dsi_host *host;
+
+ host = intel_dsi_host_init(intel_dsi, &gen11_dsi_host_ops, port);
+ if (!host)
+ goto err;
+
+ intel_dsi->dsi_hosts[port] = host;
+ }
+
+ if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
+ DRM_DEBUG_KMS("no device found\n");
+ goto err;
+ }
+
+ return;
+
+err:
+ drm_encoder_cleanup(&encoder->base);
+ kfree(intel_dsi);
+ kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index a5a2c8fe58a7..8cb02f28d30c 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -184,6 +184,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->fifo_changed = false;
crtc_state->wm.need_postvbl_update = false;
crtc_state->fb_bits = 0;
+ crtc_state->update_planes = 0;
return &crtc_state->base;
}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 905f8ef3ba4f..0a73e6e65c20 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -139,6 +139,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
if (state->visible && state->fb->format->format == DRM_FORMAT_NV12)
crtc_state->nv12_planes |= BIT(intel_plane->id);
+ if (state->visible || old_plane_state->base.visible)
+ crtc_state->update_planes |= BIT(intel_plane->id);
+
return intel_plane_atomic_calc_changes(old_crtc_state,
&crtc_state->base,
old_plane_state,
@@ -168,27 +171,75 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
to_intel_plane_state(new_plane_state));
}
-void intel_update_planes_on_crtc(struct intel_atomic_state *old_state,
- struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
+static struct intel_plane *
+skl_next_plane_to_commit(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct skl_ddb_entry entries_y[I915_MAX_PLANES],
+ struct skl_ddb_entry entries_uv[I915_MAX_PLANES],
+ unsigned int *update_mask)
{
- struct intel_plane_state *new_plane_state;
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane_state *plane_state;
struct intel_plane *plane;
- u32 update_mask;
int i;
- update_mask = old_crtc_state->active_planes;
- update_mask |= new_crtc_state->active_planes;
+ if (*update_mask == 0)
+ return NULL;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ enum plane_id plane_id = plane->id;
- for_each_new_intel_plane_in_state(old_state, plane, new_plane_state, i) {
if (crtc->pipe != plane->pipe ||
- !(update_mask & BIT(plane->id)))
+ !(*update_mask & BIT(plane_id)))
+ continue;
+
+ if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id],
+ entries_y,
+ I915_MAX_PLANES, plane_id) ||
+ skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_uv[plane_id],
+ entries_uv,
+ I915_MAX_PLANES, plane_id))
continue;
+ *update_mask &= ~BIT(plane_id);
+ entries_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id];
+ entries_uv[plane_id] = crtc_state->wm.skl.plane_ddb_uv[plane_id];
+
+ return plane;
+ }
+
+ /* should never happen */
+ WARN_ON(1);
+
+ return NULL;
+}
+
+void skl_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct skl_ddb_entry entries_y[I915_MAX_PLANES];
+ struct skl_ddb_entry entries_uv[I915_MAX_PLANES];
+ u32 update_mask = new_crtc_state->update_planes;
+ struct intel_plane *plane;
+
+ memcpy(entries_y, old_crtc_state->wm.skl.plane_ddb_y,
+ sizeof(old_crtc_state->wm.skl.plane_ddb_y));
+ memcpy(entries_uv, old_crtc_state->wm.skl.plane_ddb_uv,
+ sizeof(old_crtc_state->wm.skl.plane_ddb_uv));
+
+ while ((plane = skl_next_plane_to_commit(state, crtc,
+ entries_y, entries_uv,
+ &update_mask))) {
+ struct intel_plane_state *new_plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+
if (new_plane_state->base.visible) {
trace_intel_update_plane(&plane->base, crtc);
-
plane->update_plane(plane, new_crtc_state, new_plane_state);
} else if (new_plane_state->slave) {
struct intel_plane *master =
@@ -204,15 +255,38 @@ void intel_update_planes_on_crtc(struct intel_atomic_state *old_state,
* plane_state.
*/
new_plane_state =
- intel_atomic_get_new_plane_state(old_state, master);
+ intel_atomic_get_new_plane_state(state, master);
trace_intel_update_plane(&plane->base, crtc);
-
plane->update_slave(plane, new_crtc_state, new_plane_state);
} else {
trace_intel_disable_plane(&plane->base, crtc);
+ plane->disable_plane(plane, new_crtc_state);
+ }
+ }
+}
+
+void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u32 update_mask = new_crtc_state->update_planes;
+ struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ int i;
- plane->disable_plane(plane, crtc);
+ for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
+ if (crtc->pipe != plane->pipe ||
+ !(update_mask & BIT(plane->id)))
+ continue;
+
+ if (new_plane_state->base.visible) {
+ trace_intel_update_plane(&plane->base, crtc);
+ plane->update_plane(plane, new_crtc_state, new_plane_state);
+ } else {
+ trace_intel_disable_plane(&plane->base, crtc);
+ plane->disable_plane(plane, new_crtc_state);
}
}
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 0694aa8bb9bc..6d3e0260d49c 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1752,7 +1752,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
const struct bdb_header *bdb;
u8 __iomem *bios = NULL;
- if (INTEL_INFO(dev_priv)->num_pipes == 0) {
+ if (!HAS_DISPLAY(dev_priv)) {
DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
return;
}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 84bf8d827136..447c5256f63a 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -27,11 +27,7 @@
#include "i915_drv.h"
-#ifdef CONFIG_SMP
-#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_cpu)
-#else
-#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL)
-#endif
+#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq)
static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
{
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index ad11540ac436..f3e1d6a0b7dd 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -28,6 +28,7 @@
#include <drm/drm_scdc_helper.h>
#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_dsi.h"
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
@@ -1363,8 +1364,8 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
return dco_freq / (p0 * p1 * p2 * 5);
}
-static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- enum intel_dpll_id pll_id)
+int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id pll_id)
{
uint32_t cfgcr0, cfgcr1;
uint32_t p0, p1, p2, dco_freq, ref_clock;
@@ -2154,6 +2155,12 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
intel_port_is_tc(dev_priv, encoder->port))
domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port));
+ /*
+ * VDSC power is needed when DSC is enabled
+ */
+ if (crtc_state->dsc_params.compression_enable)
+ domains |= BIT_ULL(intel_dsc_power_domain(crtc_state));
+
return domains;
}
@@ -2785,77 +2792,54 @@ uint32_t icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
return 0;
}
-void icl_map_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+static void icl_map_plls_to_ports(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_connector_state *conn_state;
- struct drm_connector *conn;
- int i;
-
- for_each_new_connector_in_state(old_state, conn, conn_state, i) {
- struct intel_encoder *encoder =
- to_intel_encoder(conn_state->best_encoder);
- enum port port;
- uint32_t val;
-
- if (conn_state->crtc != crtc)
- continue;
-
- port = encoder->port;
- mutex_lock(&dev_priv->dpll_lock);
+ enum port port = encoder->port;
+ u32 val;
- val = I915_READ(DPCLKA_CFGCR0_ICL);
- WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
+ mutex_lock(&dev_priv->dpll_lock);
- if (intel_port_is_combophy(dev_priv, port)) {
- val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
- POSTING_READ(DPCLKA_CFGCR0_ICL);
- }
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
- val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+ if (intel_port_is_combophy(dev_priv, port)) {
+ val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+ val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
I915_WRITE(DPCLKA_CFGCR0_ICL, val);
-
- mutex_unlock(&dev_priv->dpll_lock);
+ POSTING_READ(DPCLKA_CFGCR0_ICL);
}
+
+ val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
+ mutex_unlock(&dev_priv->dpll_lock);
}
-void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_connector_state *old_conn_state;
- struct drm_connector *conn;
- int i;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 val;
- for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
- struct intel_encoder *encoder =
- to_intel_encoder(old_conn_state->best_encoder);
- enum port port;
+ mutex_lock(&dev_priv->dpll_lock);
- if (old_conn_state->crtc != crtc)
- continue;
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
- port = encoder->port;
- mutex_lock(&dev_priv->dpll_lock);
- I915_WRITE(DPCLKA_CFGCR0_ICL,
- I915_READ(DPCLKA_CFGCR0_ICL) |
- icl_dpclka_cfgcr0_clk_off(dev_priv, port));
- mutex_unlock(&dev_priv->dpll_lock);
- }
+ mutex_unlock(&dev_priv->dpll_lock);
}
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val;
- enum port port = encoder->port;
- bool clk_enabled;
+ enum port port;
+ u32 port_mask;
+ bool ddi_clk_needed;
/*
* In case of DP MST, we sanitize the primary encoder only, not the
@@ -2864,9 +2848,6 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
if (encoder->type == INTEL_OUTPUT_DP_MST)
return;
- val = I915_READ(DPCLKA_CFGCR0_ICL);
- clk_enabled = !(val & icl_dpclka_cfgcr0_clk_off(dev_priv, port));
-
if (!encoder->base.crtc && intel_encoder_is_dp(encoder)) {
u8 pipe_mask;
bool is_mst;
@@ -2880,20 +2861,52 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
return;
}
- if (clk_enabled == !!encoder->base.crtc)
- return;
+ port_mask = BIT(encoder->port);
+ ddi_clk_needed = encoder->base.crtc;
- /*
- * Punt on the case now where clock is disabled, but the encoder is
- * enabled, something else is really broken then.
- */
- if (WARN_ON(!clk_enabled))
- return;
+ if (encoder->type == INTEL_OUTPUT_DSI) {
+ struct intel_encoder *other_encoder;
- DRM_NOTE("Port %c is disabled but it has a mapped PLL, unmap it\n",
- port_name(port));
- val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ port_mask = intel_dsi_encoder_ports(encoder);
+ /*
+ * Sanity check that we haven't incorrectly registered another
+ * encoder using any of the ports of this DSI encoder.
+ */
+ for_each_intel_encoder(&dev_priv->drm, other_encoder) {
+ if (other_encoder == encoder)
+ continue;
+
+ if (WARN_ON(port_mask & BIT(other_encoder->port)))
+ return;
+ }
+ /*
+ * DSI ports should have their DDI clock ungated when disabled
+ * and gated when enabled.
+ */
+ ddi_clk_needed = !encoder->base.crtc;
+ }
+
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_port_masked(port, port_mask) {
+ bool ddi_clk_ungated = !(val &
+ icl_dpclka_cfgcr0_clk_off(dev_priv,
+ port));
+
+ if (ddi_clk_needed == ddi_clk_ungated)
+ continue;
+
+ /*
+ * Punt on the case now where clock is gated, but it would
+ * be needed by the port. Something else is really broken then.
+ */
+ if (WARN_ON(ddi_clk_needed))
+ continue;
+
+ DRM_NOTE("Port %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+ port_name(port));
+ val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ }
}
static void intel_ddi_clk_select(struct intel_encoder *encoder,
@@ -3096,6 +3109,53 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
I915_WRITE(MG_DP_MODE(port, 1), ln1);
}
+static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->fec_enable)
+ return;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0)
+ DRM_DEBUG_KMS("Failed to set FEC_READY in the sink\n");
+}
+
+static void intel_ddi_enable_fec(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 val;
+
+ if (!crtc_state->fec_enable)
+ return;
+
+ val = I915_READ(DP_TP_CTL(port));
+ val |= DP_TP_CTL_FEC_ENABLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+
+ if (intel_wait_for_register(dev_priv, DP_TP_STATUS(port),
+ DP_TP_STATUS_FEC_ENABLE_LIVE,
+ DP_TP_STATUS_FEC_ENABLE_LIVE,
+ 1))
+ DRM_ERROR("Timed out waiting for FEC Enable Status\n");
+}
+
+static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 val;
+
+ if (!crtc_state->fec_enable)
+ return;
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~DP_TP_CTL_FEC_ENABLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+ POSTING_READ(DP_TP_CTL(port));
+}
+
static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -3134,14 +3194,21 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_ddi_init_dp_buf_reg(encoder);
if (!is_mst)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
+ true);
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
intel_dp_start_link_train(intel_dp);
if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
intel_dp_stop_link_train(intel_dp);
+ intel_ddi_enable_fec(encoder, crtc_state);
+
icl_enable_phy_clock_gating(dig_port);
if (!is_mst)
intel_ddi_enable_pipe_clock(crtc_state);
+
+ intel_dsc_enable(encoder, crtc_state);
}
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -3208,6 +3275,9 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
WARN_ON(crtc_state->has_pch_encoder);
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_map_plls_to_ports(encoder, crtc_state);
+
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
@@ -3228,7 +3298,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
}
}
-static void intel_disable_ddi_buf(struct intel_encoder *encoder)
+static void intel_disable_ddi_buf(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
@@ -3247,6 +3318,9 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder)
val |= DP_TP_CTL_LINK_TRAIN_PAT1;
I915_WRITE(DP_TP_CTL(port), val);
+ /* Disable FEC in DP Sink */
+ intel_ddi_disable_fec_state(encoder, crtc_state);
+
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
}
@@ -3270,7 +3344,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
}
- intel_disable_ddi_buf(encoder);
+ intel_disable_ddi_buf(encoder, old_crtc_state);
intel_edp_panel_vdd_on(intel_dp);
intel_edp_panel_off(intel_dp);
@@ -3293,7 +3367,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
intel_ddi_disable_pipe_clock(old_crtc_state);
- intel_disable_ddi_buf(encoder);
+ intel_disable_ddi_buf(encoder, old_crtc_state);
intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
@@ -3306,6 +3380,8 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
/*
* When called from DP MST code:
* - old_conn_state will be NULL
@@ -3325,6 +3401,9 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
else
intel_ddi_post_disable_dp(encoder,
old_crtc_state, old_conn_state);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_unmap_plls_to_ports(encoder);
}
void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
@@ -3344,7 +3423,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
val &= ~FDI_RX_ENABLE;
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
- intel_disable_ddi_buf(encoder);
+ intel_disable_ddi_buf(encoder, old_crtc_state);
intel_ddi_clk_disable(encoder);
val = I915_READ(FDI_RX_MISC(PIPE_A));
@@ -3491,6 +3570,9 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
intel_edp_drrs_disable(intel_dp, old_crtc_state);
intel_psr_disable(intel_dp, old_crtc_state);
intel_edp_backlight_off(old_conn_state);
+ /* Disable the decompression in DP Sink */
+ intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
+ false);
}
static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index ceecb5bd5226..1e56319334f3 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -77,6 +77,10 @@ void intel_device_info_dump_flags(const struct intel_device_info *info,
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
+
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
+ DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
+#undef PRINT_FLAG
}
static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
@@ -782,7 +786,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
if (i915_modparams.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");
info->num_pipes = 0;
- } else if (info->num_pipes > 0 &&
+ } else if (HAS_DISPLAY(dev_priv) &&
(IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
HAS_PCH_SPLIT(dev_priv)) {
u32 fuse_strap = I915_READ(FUSE_STRAP);
@@ -807,7 +811,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
DRM_INFO("PipeC fused off\n");
info->num_pipes -= 1;
}
- } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
+ } else if (HAS_DISPLAY(dev_priv) && IS_GEN9(dev_priv)) {
u32 dfsm = I915_READ(SKL_DFSM);
u8 disabled_mask = 0;
bool invalid;
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 88f97210dc49..1caf24e2cf0b 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -89,35 +89,38 @@ enum intel_ppgtt {
func(is_alpha_support); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
- func(has_csr); \
- func(has_ddi); \
- func(has_dp_mst); \
func(has_reset_engine); \
- func(has_fbc); \
func(has_fpga_dbg); \
- func(has_gmch_display); \
func(has_guc); \
func(has_guc_ct); \
- func(has_hotplug); \
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
func(has_logical_ring_elsq); \
func(has_logical_ring_preemption); \
- func(has_overlay); \
func(has_pooled_eu); \
- func(has_psr); \
func(has_rc6); \
func(has_rc6p); \
func(has_runtime_pm); \
func(has_snoop); \
func(has_coherent_ggtt); \
func(unfenced_needs_alignment); \
+ func(hws_needs_physical);
+
+#define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \
+ /* Keep in alphabetical order */ \
func(cursor_needs_physical); \
- func(hws_needs_physical); \
+ func(has_csr); \
+ func(has_ddi); \
+ func(has_dp_mst); \
+ func(has_fbc); \
+ func(has_gmch_display); \
+ func(has_hotplug); \
+ func(has_ipc); \
+ func(has_overlay); \
+ func(has_psr); \
func(overlay_needs_physical); \
- func(supports_tv); \
- func(has_ipc);
+ func(supports_tv);
#define GEN_MAX_SLICES (6) /* CNL upper bound */
#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */
@@ -172,6 +175,13 @@ struct intel_device_info {
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
+
+ struct {
+#define DEFINE_FLAG(name) u8 name:1
+ DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG);
+#undef DEFINE_FLAG
+ } display;
+
u16 ddb_size; /* in blocks */
/* Register offsets for the various display pipes and transcoders */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5a679af03a04..20beb1977a27 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -46,7 +46,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_atomic_uapi.h>
-#include <linux/dma_remapping.h>
+#include <linux/intel-iommu.h>
#include <linux/reservation.h>
/* Primary plane formats for gen <= 3 */
@@ -2341,10 +2341,26 @@ static int intel_fb_offset_to_xy(int *x, int *y,
int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ unsigned int height;
if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
- fb->offsets[color_plane] % intel_tile_size(dev_priv))
+ fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
+ DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
+ fb->offsets[color_plane], color_plane);
return -EINVAL;
+ }
+
+ height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
+ height = ALIGN(height, intel_tile_height(fb, color_plane));
+
+ /* Catch potential overflows early */
+ if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
+ fb->offsets[color_plane])) {
+ DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
+ fb->offsets[color_plane], fb->pitches[color_plane],
+ color_plane);
+ return -ERANGE;
+ }
*x = 0;
*y = 0;
@@ -2767,7 +2783,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_pre_disable_primary_noatomic(&crtc->base);
trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(plane, crtc);
+ plane->disable_plane(plane, crtc_state);
}
static void
@@ -3315,7 +3331,6 @@ static void i9xx_update_plane(struct intel_plane *plane,
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 linear_offset;
u32 dspcntr = plane_state->ctl;
- i915_reg_t reg = DSPCNTR(i9xx_plane);
int x = plane_state->color_plane[0].x;
int y = plane_state->color_plane[0].y;
unsigned long irqflags;
@@ -3330,47 +3345,51 @@ static void i9xx_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
+
if (INTEL_GEN(dev_priv) < 4) {
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
+ I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
I915_WRITE_FW(DSPSIZE(i9xx_plane),
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
- I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+ I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
I915_WRITE_FW(PRIMSIZE(i9xx_plane),
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
- I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
}
- I915_WRITE_FW(reg, dspcntr);
-
- I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- I915_WRITE_FW(DSPSURF(i9xx_plane),
- intel_plane_ggtt_offset(plane_state) +
- dspaddr_offset);
I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
} else if (INTEL_GEN(dev_priv) >= 4) {
+ I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
+ I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
+ }
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
+ I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
+ if (INTEL_GEN(dev_priv) >= 4)
I915_WRITE_FW(DSPSURF(i9xx_plane),
intel_plane_ggtt_offset(plane_state) +
dspaddr_offset);
- I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
- I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
- } else {
+ else
I915_WRITE_FW(DSPADDR(i9xx_plane),
intel_plane_ggtt_offset(plane_state) +
dspaddr_offset);
- }
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i9xx_disable_plane(struct intel_plane *plane,
- struct intel_crtc *crtc)
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
@@ -3456,6 +3475,21 @@ static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
}
}
+static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int rotation)
+{
+ /*
+ * The stride is either expressed as a multiple of 64 bytes chunks for
+ * linear buffers or in number of tiles for tiled buffers.
+ */
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
+ return 64;
+ else if (drm_rotation_90_or_270(rotation))
+ return intel_tile_height(fb, color_plane);
+ else
+ return intel_tile_width_bytes(fb, color_plane);
+}
+
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
int color_plane)
{
@@ -3466,16 +3500,7 @@ u32 skl_plane_stride(const struct intel_plane_state *plane_state,
if (color_plane >= fb->format->num_planes)
return 0;
- /*
- * The stride is either expressed as a multiple of 64 bytes chunks for
- * linear buffers or in number of tiles for tiled buffers.
- */
- if (drm_rotation_90_or_270(rotation))
- stride /= intel_tile_height(fb, color_plane);
- else
- stride /= intel_fb_stride_alignment(fb, color_plane);
-
- return stride;
+ return stride / skl_plane_stride_mult(fb, color_plane, rotation);
}
static u32 skl_plane_ctl_format(uint32_t pixel_format)
@@ -5403,23 +5428,32 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
intel_update_watermarks(crtc);
}
-static void intel_crtc_disable_planes(struct intel_crtc *crtc, unsigned plane_mask)
+static void intel_crtc_disable_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ unsigned int update_mask = new_crtc_state->update_planes;
+ const struct intel_plane_state *old_plane_state;
struct intel_plane *plane;
unsigned fb_bits = 0;
+ int i;
intel_crtc_dpms_overlay_disable(crtc);
- for_each_intel_plane_on_crtc(dev, crtc, plane) {
- if (plane_mask & BIT(plane->id)) {
- plane->disable_plane(plane, crtc);
+ for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
+ if (crtc->pipe != plane->pipe ||
+ !(update_mask & BIT(plane->id)))
+ continue;
+ plane->disable_plane(plane, new_crtc_state);
+
+ if (old_plane_state->base.visible)
fb_bits |= plane->frontbuffer_bit;
- }
}
- intel_frontbuffer_flip(to_i915(dev), fb_bits);
+ intel_frontbuffer_flip(dev_priv, fb_bits);
}
static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
@@ -5692,9 +5726,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (pipe_config->shared_dpll)
intel_enable_shared_dpll(pipe_config);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_map_plls_to_ports(crtc, pipe_config, old_state);
-
intel_encoders_pre_enable(crtc, pipe_config, old_state);
if (intel_crtc_has_dp_encoder(pipe_config))
@@ -5889,6 +5920,8 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(old_crtc_state);
+ intel_dsc_disable(old_crtc_state);
+
if (INTEL_GEN(dev_priv) >= 9)
skylake_scaler_disable(intel_crtc);
else
@@ -5896,9 +5929,6 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
-
intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
}
@@ -6724,7 +6754,7 @@ static void compute_m_n(unsigned int m, unsigned int n,
}
void
-intel_link_compute_m_n(int bits_per_pixel, int nlanes,
+intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
bool constant_n)
@@ -8939,7 +8969,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->width = ((val >> 0) & 0x1fff) + 1;
val = I915_READ(PLANE_STRIDE(pipe, plane_id));
- stride_mult = intel_fb_stride_alignment(fb, 0);
+ stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
aligned_height = intel_fb_align_height(fb, 0, fb->height);
@@ -9303,10 +9333,12 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
to_intel_atomic_state(crtc_state->base.state);
- if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
+ IS_ICELAKE(dev_priv)) {
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
@@ -9444,11 +9476,18 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
+ unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
+ unsigned long enabled_panel_transcoders = 0;
+ enum transcoder panel_transcoder;
u32 tmp;
+ if (IS_ICELAKE(dev_priv))
+ panel_transcoder_mask |=
+ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
+
/*
* The pipe->transcoder mapping is fixed with the exception of the eDP
- * transcoder handled below.
+ * and DSI transcoders handled below.
*/
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -9456,29 +9495,49 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
* XXX: Do intel_display_power_get_if_enabled before reading this (for
* consistency and less surprising code; it's in always on power).
*/
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
- if (tmp & TRANS_DDI_FUNC_ENABLE) {
- enum pipe trans_edp_pipe;
+ for_each_set_bit(panel_transcoder, &panel_transcoder_mask, 32) {
+ enum pipe trans_pipe;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
+ if (!(tmp & TRANS_DDI_FUNC_ENABLE))
+ continue;
+
+ /*
+ * Log all enabled ones, only use the first one.
+ *
+ * FIXME: This won't work for two separate DSI displays.
+ */
+ enabled_panel_transcoders |= BIT(panel_transcoder);
+ if (enabled_panel_transcoders != BIT(panel_transcoder))
+ continue;
+
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
- WARN(1, "unknown pipe linked to edp transcoder\n");
+ WARN(1, "unknown pipe linked to transcoder %s\n",
+ transcoder_name(panel_transcoder));
/* fall through */
case TRANS_DDI_EDP_INPUT_A_ONOFF:
case TRANS_DDI_EDP_INPUT_A_ON:
- trans_edp_pipe = PIPE_A;
+ trans_pipe = PIPE_A;
break;
case TRANS_DDI_EDP_INPUT_B_ONOFF:
- trans_edp_pipe = PIPE_B;
+ trans_pipe = PIPE_B;
break;
case TRANS_DDI_EDP_INPUT_C_ONOFF:
- trans_edp_pipe = PIPE_C;
+ trans_pipe = PIPE_C;
break;
}
- if (trans_edp_pipe == crtc->pipe)
- pipe_config->cpu_transcoder = TRANSCODER_EDP;
+ if (trans_pipe == crtc->pipe)
+ pipe_config->cpu_transcoder = panel_transcoder;
}
+ /*
+ * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
+ */
+ WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
+ enabled_panel_transcoders != BIT(TRANSCODER_EDP));
+
power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
@@ -9611,7 +9670,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
if (!active)
goto out;
- if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
+ if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
+ IS_ICELAKE(dev_priv)) {
haswell_get_ddi_port_state(crtc, pipe_config);
intel_get_pipe_timings(crtc, pipe_config);
}
@@ -9667,7 +9727,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 base;
- if (INTEL_INFO(dev_priv)->cursor_needs_physical)
+ if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
base = obj->phys_handle->busaddr;
else
base = intel_plane_ggtt_offset(plane_state);
@@ -9894,9 +9954,9 @@ static void i845_update_cursor(struct intel_plane *plane,
}
static void i845_disable_cursor(struct intel_plane *plane,
- struct intel_crtc *crtc)
+ const struct intel_crtc_state *crtc_state)
{
- i845_update_cursor(plane, NULL, NULL);
+ i845_update_cursor(plane, crtc_state, NULL);
}
static bool i845_cursor_get_hw_state(struct intel_plane *plane,
@@ -10087,8 +10147,8 @@ static void i9xx_update_cursor(struct intel_plane *plane,
* On some platforms writing CURCNTR first will also
* cause CURPOS to be armed by the CURBASE write.
* Without the CURCNTR write the CURPOS write would
- * arm itself. Thus we always start the full update
- * with a CURCNTR write.
+ * arm itself. Thus we always update CURCNTR before
+ * CURPOS.
*
* On other platforms CURPOS always requires the
* CURBASE write to arm the update. Additonally
@@ -10098,15 +10158,20 @@ static void i9xx_update_cursor(struct intel_plane *plane,
* cursor that doesn't appear to move, or even change
* shape. Thus we always write CURBASE.
*
- * CURCNTR and CUR_FBC_CTL are always
- * armed by the CURBASE write only.
+ * The other registers are armed by by the CURBASE write
+ * except when the plane is getting enabled at which time
+ * the CURCNTR write arms the update.
*/
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_write_cursor_wm(plane, crtc_state);
+
if (plane->cursor.base != base ||
plane->cursor.size != fbc_ctl ||
plane->cursor.cntl != cntl) {
- I915_WRITE_FW(CURCNTR(pipe), cntl);
if (HAS_CUR_FBC(dev_priv))
I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
+ I915_WRITE_FW(CURCNTR(pipe), cntl);
I915_WRITE_FW(CURPOS(pipe), pos);
I915_WRITE_FW(CURBASE(pipe), base);
@@ -10122,9 +10187,9 @@ static void i9xx_update_cursor(struct intel_plane *plane,
}
static void i9xx_disable_cursor(struct intel_plane *plane,
- struct intel_crtc *crtc)
+ const struct intel_crtc_state *crtc_state)
{
- i9xx_update_cursor(plane, NULL, NULL);
+ i9xx_update_cursor(plane, crtc_state, NULL);
}
static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
@@ -10835,8 +10900,10 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
continue;
plane_state->linked_plane = NULL;
- if (plane_state->slave && !plane_state->base.visible)
+ if (plane_state->slave && !plane_state->base.visible) {
crtc_state->active_planes &= ~BIT(plane->id);
+ crtc_state->update_planes |= BIT(plane->id);
+ }
plane_state->slave = false;
}
@@ -10877,6 +10944,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
linked_state->slave = true;
linked_state->linked_plane = plane;
crtc_state->active_planes |= BIT(linked->id);
+ crtc_state->update_planes |= BIT(linked->id);
DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
}
@@ -11887,6 +11955,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
struct skl_pipe_wm hw_wm, *sw_wm;
struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
+ struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const enum pipe pipe = intel_crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
@@ -11897,6 +11967,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
skl_pipe_wm_get_hw_state(crtc, &hw_wm);
sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
+ skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
+
skl_ddb_get_hw_state(dev_priv, &hw_ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
@@ -11939,8 +12011,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
- hw_ddb_entry = &hw_ddb.plane[pipe][plane];
- sw_ddb_entry = &sw_ddb->plane[pipe][plane];
+ hw_ddb_entry = &hw_ddb_y[plane];
+ sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
@@ -11989,8 +12061,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
- hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
- sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+ hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
+ sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
@@ -12672,7 +12744,6 @@ static void intel_update_crtc(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *old_intel_cstate = to_intel_crtc_state(old_crtc_state);
struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
bool modeset = needs_modeset(new_crtc_state);
struct intel_plane_state *new_plane_state =
@@ -12695,8 +12766,10 @@ static void intel_update_crtc(struct drm_crtc *crtc,
intel_begin_crtc_commit(crtc, old_crtc_state);
- intel_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc,
- old_intel_cstate, pipe_config);
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+ else
+ i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
intel_finish_crtc_commit(crtc, old_crtc_state);
}
@@ -12889,7 +12962,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
if (old_crtc_state->active) {
- intel_crtc_disable_planes(intel_crtc, old_intel_crtc_state->active_planes);
+ intel_crtc_disable_planes(intel_state, intel_crtc);
/*
* We need to disable pipe CRC before disabling the pipe,
@@ -13244,7 +13317,7 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
struct i915_vma *vma;
if (plane->id == PLANE_CURSOR &&
- INTEL_INFO(dev_priv)->cursor_needs_physical) {
+ INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
const int align = intel_cursor_alignment(dev_priv);
int err;
@@ -13739,7 +13812,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
to_intel_plane_state(plane->state));
} else {
trace_intel_disable_plane(plane, to_intel_crtc(crtc));
- intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
+ intel_plane->disable_plane(intel_plane, crtc_state);
}
intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
@@ -14190,7 +14263,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_pps_init(dev_priv);
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return;
/*
@@ -14455,7 +14528,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_framebuffer *fb = &intel_fb->base;
- struct drm_format_name_buf format_name;
u32 pitch_limit;
unsigned int tiling, stride;
int ret = -EINVAL;
@@ -14486,39 +14558,14 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
}
}
- /* Passed in modifier sanity checking. */
- switch (mode_cmd->modifier[0]) {
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- break;
- default:
- DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n");
- goto err;
- }
- /* fall through */
- case I915_FORMAT_MOD_Yf_TILED:
- if (mode_cmd->pixel_format == DRM_FORMAT_C8) {
- DRM_DEBUG_KMS("Indexed format does not support Yf tiling\n");
- goto err;
- }
- /* fall through */
- case I915_FORMAT_MOD_Y_TILED:
- if (INTEL_GEN(dev_priv) < 9) {
- DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
- mode_cmd->modifier[0]);
- goto err;
- }
- break;
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- break;
- default:
- DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
+ if (!drm_any_plane_has_format(&dev_priv->drm,
+ mode_cmd->pixel_format,
+ mode_cmd->modifier[0])) {
+ struct drm_format_name_buf format_name;
+
+ DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name),
mode_cmd->modifier[0]);
goto err;
}
@@ -14553,69 +14600,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err;
}
- /* Reject formats not supported by any plane early. */
- switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- break;
- case DRM_FORMAT_XRGB1555:
- if (INTEL_GEN(dev_priv) > 3) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_ABGR8888:
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- INTEL_GEN(dev_priv) < 9) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- if (INTEL_GEN(dev_priv) < 4) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_ABGR2101010:
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_VYUY:
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_NV12:
- if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
- IS_BROXTON(dev_priv)) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format,
- &format_name));
- goto err;
- }
- break;
- default:
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
-
/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
if (mode_cmd->offsets[0] != 0)
goto err;
@@ -16070,7 +16054,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
};
int i;
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return NULL;
error = kzalloc(sizeof(*error), GFP_ATOMIC);
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 5f2955b944da..4262452963b3 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -242,6 +242,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_TRANSCODER_EDP_VDSC,
POWER_DOMAIN_TRANSCODER_DSI_A,
POWER_DOMAIN_TRANSCODER_DSI_C,
POWER_DOMAIN_PORT_DDI_A_LANES,
@@ -398,6 +399,14 @@ struct intel_link_m_n {
for_each_power_well_reverse(__dev_priv, __power_well) \
for_each_if((__power_well)->desc->domains & (__domain_mask))
+#define for_each_old_intel_plane_in_state(__state, plane, old_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), 1); \
+ (__i)++) \
+ for_each_if(plane)
+
#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
@@ -423,10 +432,18 @@ struct intel_link_m_n {
(__i)++) \
for_each_if(plane)
-void intel_link_compute_m_n(int bpp, int nlanes,
+#define for_each_oldnew_intel_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_crtc && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(crtc)
+
+void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
bool constant_n);
-
bool is_ccs_modifier(u64 modifier);
#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 7699f9b7b2d2..fdd2cbc56fa3 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -47,6 +47,8 @@
/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
+#define DP_DSC_MIN_SUPPORTED_BPC 8
+#define DP_DSC_MAX_SUPPORTED_BPC 10
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE 2720000
@@ -543,7 +545,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
dsc_slice_count =
drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
true);
- } else {
+ } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
dsc_max_output_bpp =
intel_dp_dsc_get_output_bpp(max_link_clock,
max_lanes,
@@ -1708,6 +1710,41 @@ struct link_config_limits {
int min_bpp, max_bpp;
};
+static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return INTEL_GEN(dev_priv) >= 11 &&
+ pipe_config->cpu_transcoder != TRANSCODER_A;
+}
+
+static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
+ drm_dp_sink_supports_fec(intel_dp->fec_capable);
+}
+
+static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return INTEL_GEN(dev_priv) >= 10 &&
+ pipe_config->cpu_transcoder != TRANSCODER_A;
+}
+
+static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
+ return false;
+
+ return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
+ drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
+}
+
static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config)
{
@@ -1842,14 +1879,122 @@ intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
return false;
}
+static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
+{
+ int i, num_bpc;
+ u8 dsc_bpc[3] = {0};
+
+ num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
+ dsc_bpc);
+ for (i = 0; i < num_bpc; i++) {
+ if (dsc_max_bpc >= dsc_bpc[i])
+ return dsc_bpc[i] * 3;
+ }
+
+ return 0;
+}
+
+static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ u8 dsc_max_bpc;
+ int pipe_bpp;
+
+ if (!intel_dp_supports_dsc(intel_dp, pipe_config))
+ return false;
+
+ dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
+ conn_state->max_requested_bpc);
+
+ pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
+ if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
+ DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
+ return false;
+ }
+
+ /*
+ * For now enable DSC for max bpp, max link rate, max lane count.
+ * Optimize this later for the minimum possible link rate/lane count
+ * with DSC enabled for the requested mode.
+ */
+ pipe_config->pipe_bpp = pipe_bpp;
+ pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
+ pipe_config->lane_count = limits->max_lane_count;
+
+ if (intel_dp_is_edp(intel_dp)) {
+ pipe_config->dsc_params.compressed_bpp =
+ min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
+ pipe_config->pipe_bpp);
+ pipe_config->dsc_params.slice_count =
+ drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+ true);
+ } else {
+ u16 dsc_max_output_bpp;
+ u8 dsc_dp_slice_count;
+
+ dsc_max_output_bpp =
+ intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay);
+ dsc_dp_slice_count =
+ intel_dp_dsc_get_slice_count(intel_dp,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay);
+ if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
+ DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
+ return false;
+ }
+ pipe_config->dsc_params.compressed_bpp = min_t(u16,
+ dsc_max_output_bpp >> 4,
+ pipe_config->pipe_bpp);
+ pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
+ }
+ /*
+ * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
+ * is greater than the maximum Cdclock and if slice count is even
+ * then we need to use 2 VDSC instances.
+ */
+ if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
+ if (pipe_config->dsc_params.slice_count > 1) {
+ pipe_config->dsc_params.dsc_split = true;
+ } else {
+ DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
+ return false;
+ }
+ }
+ if (intel_dp_compute_dsc_params(intel_dp, pipe_config) < 0) {
+ DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
+ "Compressed BPP = %d\n",
+ pipe_config->pipe_bpp,
+ pipe_config->dsc_params.compressed_bpp);
+ return false;
+ }
+ pipe_config->dsc_params.compression_enable = true;
+ DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
+ "Compressed Bpp = %d Slice Count = %d\n",
+ pipe_config->pipe_bpp,
+ pipe_config->dsc_params.compressed_bpp,
+ pipe_config->dsc_params.slice_count);
+
+ return true;
+}
+
static bool
intel_dp_compute_link_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct link_config_limits limits;
int common_len;
+ bool ret;
common_len = intel_dp_common_len_rate_limit(intel_dp,
intel_dp->max_link_rate);
@@ -1888,7 +2033,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp->common_rates[limits.max_clock],
limits.max_bpp, adjusted_mode->crtc_clock);
- if (intel_dp_is_edp(intel_dp)) {
+ if (intel_dp_is_edp(intel_dp))
/*
* Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
* section A.1: "It is recommended that the minimum number of
@@ -1898,26 +2043,42 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
* Note that we use the max clock and lane count for eDP 1.3 and
* earlier, and fast vs. wide is irrelevant.
*/
- if (!intel_dp_compute_link_config_fast(intel_dp, pipe_config,
- &limits))
- return false;
- } else {
+ ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
+ &limits);
+ else
/* Optimize for slow and wide. */
- if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config,
- &limits))
+ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
+ &limits);
+
+ /* enable compression if the mode doesn't fit available BW */
+ if (!ret) {
+ if (!intel_dp_dsc_compute_config(intel_dp, pipe_config,
+ conn_state, &limits))
return false;
}
- DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
- pipe_config->lane_count, pipe_config->port_clock,
- pipe_config->pipe_bpp);
+ if (pipe_config->dsc_params.compression_enable) {
+ DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp,
+ pipe_config->dsc_params.compressed_bpp);
- DRM_DEBUG_KMS("DP link rate required %i available %i\n",
- intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->pipe_bpp),
- intel_dp_max_data_rate(pipe_config->port_clock,
- pipe_config->lane_count));
+ DRM_DEBUG_KMS("DP link rate required %i available %i\n",
+ intel_dp_link_required(adjusted_mode->crtc_clock,
+ pipe_config->dsc_params.compressed_bpp),
+ intel_dp_max_data_rate(pipe_config->port_clock,
+ pipe_config->lane_count));
+ } else {
+ DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp);
+ DRM_DEBUG_KMS("DP link rate required %i available %i\n",
+ intel_dp_link_required(adjusted_mode->crtc_clock,
+ pipe_config->pipe_bpp),
+ intel_dp_max_data_rate(pipe_config->port_clock,
+ pipe_config->lane_count));
+ }
return true;
}
@@ -1983,7 +2144,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return false;
- if (!intel_dp_compute_link_config(encoder, pipe_config))
+ pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
+ intel_dp_supports_fec(intel_dp, pipe_config);
+
+ if (!intel_dp_compute_link_config(encoder, pipe_config, conn_state))
return false;
if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
@@ -2001,11 +2165,20 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
}
- intel_link_compute_m_n(pipe_config->pipe_bpp, pipe_config->lane_count,
- adjusted_mode->crtc_clock,
- pipe_config->port_clock,
- &pipe_config->dp_m_n,
- constant_n);
+ if (!pipe_config->dsc_params.compression_enable)
+ intel_link_compute_m_n(pipe_config->pipe_bpp,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n,
+ constant_n);
+ else
+ intel_link_compute_m_n(pipe_config->dsc_params.compressed_bpp,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n,
+ constant_n);
if (intel_connector->panel.downclock_mode != NULL &&
dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -2702,6 +2875,22 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
}
+void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable)
+{
+ int ret;
+
+ if (!crtc_state->dsc_params.compression_enable)
+ return;
+
+ ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
+ enable ? DP_DECOMPRESSION_EN : 0);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
+ enable ? "enable" : "disable");
+}
+
/* If the sink supports it, try to set the power state appropriately */
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
{
@@ -3837,15 +4026,14 @@ static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
(int)sizeof(intel_dp->dsc_dpcd),
intel_dp->dsc_dpcd);
+
/* FEC is supported only on DP 1.4 */
- if (!intel_dp_is_edp(intel_dp)) {
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
- &intel_dp->fec_capable) < 0)
- DRM_ERROR("Failed to read FEC DPCD register\n");
+ if (!intel_dp_is_edp(intel_dp) &&
+ drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
+ &intel_dp->fec_capable) < 0)
+ DRM_ERROR("Failed to read FEC DPCD register\n");
- DRM_DEBUG_KMS("FEC CAPABILITY: %x\n",
- intel_dp->fec_capable);
- }
+ DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
}
}
@@ -3936,8 +4124,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
- u8 sink_count;
-
if (!intel_dp_read_dpcd(intel_dp))
return false;
@@ -3947,25 +4133,35 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
intel_dp_set_common_rates(intel_dp);
}
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &sink_count) <= 0)
- return false;
-
/*
- * Sink count can change between short pulse hpd hence
- * a member variable in intel_dp will track any changes
- * between short pulse interrupts.
+ * Some eDP panels do not set a valid value for sink count, that is why
+ * it don't care about read it here and in intel_edp_init_dpcd().
*/
- intel_dp->sink_count = DP_GET_SINK_COUNT(sink_count);
+ if (!intel_dp_is_edp(intel_dp)) {
+ u8 count;
+ ssize_t r;
- /*
- * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
- * a dongle is present but no display. Unless we require to know
- * if a dongle is present or not, we don't need to update
- * downstream port information. So, an early return here saves
- * time from performing other operations which are not required.
- */
- if (!intel_dp_is_edp(intel_dp) && !intel_dp->sink_count)
- return false;
+ r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
+ if (r < 1)
+ return false;
+
+ /*
+ * Sink count can change between short pulse hpd hence
+ * a member variable in intel_dp will track any changes
+ * between short pulse interrupts.
+ */
+ intel_dp->sink_count = DP_GET_SINK_COUNT(count);
+
+ /*
+ * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
+ * a dongle is present but no display. Unless we require to know
+ * if a dongle is present or not, we don't need to update
+ * downstream port information. So, an early return here saves
+ * time from performing other operations which are not required.
+ */
+ if (!intel_dp->sink_count)
+ return false;
+ }
if (!drm_dp_is_branch(intel_dp->dpcd))
return true; /* native DP sink */
@@ -4375,6 +4571,17 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
if (!intel_dp->link_trained)
return false;
+ /*
+ * While PSR source HW is enabled, it will control main-link sending
+ * frames, enabling and disabling it so trying to do a retrain will fail
+ * as the link would or not be on or it could mix training patterns
+ * and frame data at the same time causing retrain to fail.
+ * Also when exiting PSR, HW will retrain the link anyways fixing
+ * any link status error.
+ */
+ if (intel_psr_enabled(intel_dp))
+ return false;
+
if (!intel_dp_get_link_status(intel_dp, link_status))
return false;
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 901e15063b24..d513ca875c67 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -2523,7 +2523,8 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
if (intel_port_is_tc(dev_priv, encoder->port))
ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params);
else
ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 153ee47d9c03..a3252064b8d2 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -706,6 +706,8 @@ struct intel_crtc_wm_state {
/* gen9+ only needs 1-step wm programming */
struct skl_pipe_wm optimal;
struct skl_ddb_entry ddb;
+ struct skl_ddb_entry plane_ddb_y[I915_MAX_PLANES];
+ struct skl_ddb_entry plane_ddb_uv[I915_MAX_PLANES];
} skl;
struct {
@@ -926,6 +928,9 @@ struct intel_crtc_state {
u8 active_planes;
u8 nv12_planes;
+ /* bitmask of planes that will be updated during the commit */
+ u8 update_planes;
+
/* HDMI scrambling status */
bool hdmi_scrambling;
@@ -937,6 +942,18 @@ struct intel_crtc_state {
/* Output down scaling is done in LSPCON device */
bool lspcon_downsampling;
+
+ /* Display Stream compression state */
+ struct {
+ bool compression_enable;
+ bool dsc_split;
+ u16 compressed_bpp;
+ u8 slice_count;
+ } dsc_params;
+ struct drm_dsc_config dp_dsc_cfg;
+
+ /* Forward Error correction State */
+ bool fec_enable;
};
struct intel_crtc {
@@ -1013,7 +1030,7 @@ struct intel_plane {
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void (*disable_plane)(struct intel_plane *plane,
- struct intel_crtc *crtc);
+ const struct intel_crtc_state *crtc_state);
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
@@ -1516,13 +1533,9 @@ u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
u8 voltage_swing);
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable);
-void icl_map_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state);
-void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state);
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
+int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id pll_id);
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
int color_plane, unsigned int height);
@@ -1787,6 +1800,9 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp);
int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable);
void intel_dp_encoder_reset(struct drm_encoder *encoder);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
@@ -1842,6 +1858,12 @@ uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
int mode_hdisplay);
+/* intel_vdsc.c */
+int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config);
+enum intel_display_power_domain
+intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
+
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{
return ~((1 << lane_count) - 1) & 0xf;
@@ -2046,6 +2068,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
u32 *out_value);
+bool intel_psr_enabled(struct intel_dp *intel_dp);
/* intel_quirks.c */
void intel_init_quirks(struct drm_i915_private *dev_priv);
@@ -2180,6 +2203,9 @@ void g4x_wm_get_hw_state(struct drm_device *dev);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
+void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
@@ -2194,6 +2220,10 @@ bool skl_wm_level_equals(const struct skl_wm_level *l1,
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
const struct skl_ddb_entry entries[],
int num_entries, int ignore_idx);
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
bool ilk_disable_lp_wm(struct drm_device *dev);
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
struct intel_crtc_state *cstate);
@@ -2286,10 +2316,10 @@ struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
-void intel_update_planes_on_crtc(struct intel_atomic_state *old_state,
- struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state);
+void skl_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index ee93137f4433..d968f1f13e09 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -146,6 +146,11 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
}
+static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder)
+{
+ return enc_to_intel_dsi(&encoder->base)->ports;
+}
+
/* intel_dsi.c */
int intel_dsi_bitrate(const struct intel_dsi *intel_dsi);
int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi);
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index a72de81f4832..a1a8b3790e61 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -103,6 +103,18 @@ static struct gpio_map vlv_gpio_table[] = {
#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4)
#define CHV_GPIO_CFGLOCK (1 << 31)
+/* ICL DSI Display GPIO Pins */
+#define ICL_GPIO_DDSP_HPD_A 0
+#define ICL_GPIO_L_VDDEN_1 1
+#define ICL_GPIO_L_BKLTEN_1 2
+#define ICL_GPIO_DDPA_CTRLCLK_1 3
+#define ICL_GPIO_DDPA_CTRLDATA_1 4
+#define ICL_GPIO_DDSP_HPD_B 5
+#define ICL_GPIO_L_VDDEN_2 6
+#define ICL_GPIO_L_BKLTEN_2 7
+#define ICL_GPIO_DDPA_CTRLCLK_2 8
+#define ICL_GPIO_DDPA_CTRLDATA_2 9
+
static inline enum port intel_dsi_seq_port_to_port(u8 port)
{
return port ? PORT_C : PORT_A;
@@ -324,6 +336,12 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
gpiod_set_value(gpio_desc, value);
}
+static void icl_exec_gpio(struct drm_i915_private *dev_priv,
+ u8 gpio_source, u8 gpio_index, bool value)
+{
+ DRM_DEBUG_KMS("Skipping ICL GPIO element execution\n");
+}
+
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
@@ -347,7 +365,9 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
/* pull up/down */
value = *data++ & 1;
- if (IS_VALLEYVIEW(dev_priv))
+ if (IS_ICELAKE(dev_priv))
+ icl_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+ else if (IS_VALLEYVIEW(dev_priv))
vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
else if (IS_CHERRYVIEW(dev_priv))
chv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index dfafa79171df..ff5b7bc692ce 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -493,46 +493,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_cmd_parser(engine);
}
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
- unsigned int size)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int ret;
-
- WARN_ON(engine->scratch);
-
- obj = i915_gem_object_create_stolen(engine->i915, size);
- if (!obj)
- obj = i915_gem_object_create_internal(engine->i915, size);
- if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate scratch page\n");
- return PTR_ERR(obj);
- }
-
- vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_unref;
- }
-
- ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
- if (ret)
- goto err_unref;
-
- engine->scratch = vma;
- return 0;
-
-err_unref:
- i915_gem_object_put(obj);
- return ret;
-}
-
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
-{
- i915_vma_unpin_and_release(&engine->scratch, 0);
-}
-
static void cleanup_status_page(struct intel_engine_cs *engine)
{
if (HWS_NEEDS_PHYSICAL(engine->i915)) {
@@ -707,8 +667,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- intel_engine_cleanup_scratch(engine);
-
cleanup_status_page(engine);
intel_engine_fini_breadcrumbs(engine);
@@ -723,6 +681,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
__intel_context_unpin(i915->kernel_context, engine);
i915_timeline_fini(&engine->timeline);
+
+ intel_wa_list_free(&engine->ctx_wa_list);
+ intel_wa_list_free(&engine->wa_list);
+ intel_wa_list_free(&engine->whitelist);
}
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 14cbaf4a0e93..f23570c44323 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -1309,7 +1309,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
fbc->active = false;
if (need_fbc_vtd_wa(dev_priv))
- mkwrite_device_info(dev_priv)->has_fbc = false;
+ mkwrite_device_info(dev_priv)->display.has_fbc = false;
i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 2480c7d6edee..fb5bb5b32a60 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -672,7 +672,7 @@ int intel_fbdev_init(struct drm_device *dev)
struct intel_fbdev *ifbdev;
int ret;
- if (WARN_ON(INTEL_INFO(dev_priv)->num_pipes == 0))
+ if (WARN_ON(!HAS_DISPLAY(dev_priv)))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4d264e577e3b..55aeb97dd66d 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -115,6 +115,8 @@ static u32 hsw_infoframe_enable(unsigned int type)
switch (type) {
case DP_SDP_VSC:
return VIDEO_DIP_ENABLE_VSC_HSW;
+ case DP_SDP_PPS:
+ return VDIP_ENABLE_PPS;
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI_HSW;
case HDMI_INFOFRAME_TYPE_SPD:
@@ -136,6 +138,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
switch (type) {
case DP_SDP_VSC:
return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
+ case DP_SDP_PPS:
+ return ICL_VIDEO_DIP_PPS_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_AVI:
return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_SPD:
@@ -148,6 +152,18 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
}
}
+static int hsw_dip_data_size(unsigned int type)
+{
+ switch (type) {
+ case DP_SDP_VSC:
+ return VIDEO_DIP_VSC_DATA_SIZE;
+ case DP_SDP_PPS:
+ return VIDEO_DIP_PPS_DATA_SIZE;
+ default:
+ return VIDEO_DIP_DATA_SIZE;
+ }
+}
+
static void g4x_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
@@ -382,11 +398,12 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
- int data_size = type == DP_SDP_VSC ?
- VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE;
+ int data_size;
int i;
u32 val = I915_READ(ctl_reg);
+ data_size = hsw_dip_data_size(type);
+
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 33d87ab93fdd..802d0394ccc4 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -817,7 +817,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
unsigned int pin;
int ret;
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return 0;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9399db3260ad..d84c7815ee0c 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -398,8 +398,13 @@ static u64 execlists_update_context(struct i915_request *rq)
* may not be visible to the HW prior to the completion of the UC
* register write and that we may begin execution from the context
* before its image is complete leading to invalid PD chasing.
+ *
+ * Furthermore, Braswell, at least, wants a full mb to be sure that
+ * the writes are coherent in memory (visible to the GPU) prior to
+ * execution, and not just visible to other CPUs (as is the result of
+ * wmb).
*/
- wmb();
+ mb();
return ce->lrc_desc;
}
@@ -767,6 +772,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
static void reset_csb_pointers(struct intel_engine_execlists *execlists)
{
+ const unsigned int reset_value = GEN8_CSB_ENTRIES - 1;
+
/*
* After a reset, the HW starts writing into CSB entry [0]. We
* therefore have to set our HEAD pointer back one entry so that
@@ -776,8 +783,8 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists)
* inline comparison of our cached head position against the last HW
* write works even before the first interrupt.
*/
- execlists->csb_head = execlists->csb_write_reset;
- WRITE_ONCE(*execlists->csb_write, execlists->csb_write_reset);
+ execlists->csb_head = reset_value;
+ WRITE_ONCE(*execlists->csb_write, reset_value);
}
static void nop_submission_tasklet(unsigned long data)
@@ -818,8 +825,11 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline.requests, link) {
GEM_BUG_ON(!rq->global_seqno);
- if (!i915_request_completed(rq))
- dma_fence_set_error(&rq->fence, -EIO);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ continue;
+
+ dma_fence_set_error(&rq->fence, -EIO);
}
/* Flush the queued requests to the timeline list (for retiring). */
@@ -839,6 +849,10 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
kmem_cache_free(engine->i915->priorities, p);
}
+ intel_write_status_page(engine,
+ I915_GEM_HWS_INDEX,
+ intel_engine_last_submit(engine));
+
/* Remaining _unready_ requests will be nop'ed when submitted */
execlists->queue_priority = INT_MIN;
@@ -1279,9 +1293,10 @@ static int execlists_request_alloc(struct i915_request *request)
static u32 *
gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
{
+ /* NB no one else is allowed to scribble over scratch + 256! */
*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+ *batch++ = i915_scratch_offset(engine->i915) + 256;
*batch++ = 0;
*batch++ = MI_LOAD_REGISTER_IMM(1);
@@ -1295,7 +1310,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+ *batch++ = i915_scratch_offset(engine->i915) + 256;
*batch++ = 0;
return batch;
@@ -1332,7 +1347,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
- i915_ggtt_offset(engine->scratch) +
+ i915_scratch_offset(engine->i915) +
2 * CACHELINE_BYTES);
*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -1401,18 +1416,6 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
- /* WaClearSlmSpaceAtContextSwitch:kbl */
- /* Actual scratch location is at 128 bytes offset */
- if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
- batch = gen8_emit_pipe_control(batch,
- PIPE_CONTROL_FLUSH_L3 |
- PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE,
- i915_ggtt_offset(engine->scratch)
- + 2 * CACHELINE_BYTES);
- }
-
/* WaMediaPoolStateCmdInWABB:bxt,glk */
if (HAS_POOLED_EU(engine->i915)) {
/*
@@ -1629,6 +1632,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
+ intel_engine_apply_workarounds(engine);
+
intel_mocs_init_engine(engine);
intel_engine_reset_breadcrumbs(engine);
@@ -1653,7 +1658,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- intel_whitelist_workarounds_apply(engine);
+ intel_engine_apply_whitelist(engine);
/* We need to disable the AsyncFlip performance optimisations in order
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
@@ -1676,7 +1681,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- intel_whitelist_workarounds_apply(engine);
+ intel_engine_apply_whitelist(engine);
return 0;
}
@@ -1974,7 +1979,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
{
struct intel_engine_cs *engine = request->engine;
u32 scratch_addr =
- i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
+ i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
bool vf_flush_wa = false, dc_flush_wa = false;
u32 *cs, flags = 0;
int len;
@@ -2088,7 +2093,7 @@ static int gen8_init_rcs_context(struct i915_request *rq)
{
int ret;
- ret = intel_ctx_workarounds_emit(rq);
+ ret = intel_engine_emit_ctx_wa(rq);
if (ret)
return ret;
@@ -2229,12 +2234,6 @@ logical_ring_setup(struct intel_engine_cs *engine)
logical_ring_default_irqs(engine);
}
-static bool csb_force_mmio(struct drm_i915_private *i915)
-{
- /* Older GVT emulation depends upon intercepting CSB mmio */
- return intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915);
-}
-
static int logical_ring_init(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
@@ -2264,24 +2263,12 @@ static int logical_ring_init(struct intel_engine_cs *engine)
upper_32_bits(ce->lrc_desc);
}
- execlists->csb_read =
- i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
- if (csb_force_mmio(i915)) {
- execlists->csb_status = (u32 __force *)
- (i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
+ execlists->csb_status =
+ &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
- execlists->csb_write = (u32 __force *)execlists->csb_read;
- execlists->csb_write_reset =
- _MASKED_FIELD(GEN8_CSB_WRITE_PTR_MASK,
- GEN8_CSB_ENTRIES - 1);
- } else {
- execlists->csb_status =
- &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+ execlists->csb_write =
+ &engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
- execlists->csb_write =
- &engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
- execlists->csb_write_reset = GEN8_CSB_ENTRIES - 1;
- }
reset_csb_pointers(execlists);
return 0;
@@ -2311,10 +2298,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
if (ret)
return ret;
- ret = intel_engine_create_scratch(engine, PAGE_SIZE);
- if (ret)
- goto err_cleanup_common;
-
ret = intel_init_workaround_bb(engine);
if (ret) {
/*
@@ -2326,11 +2309,10 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
ret);
}
- return 0;
+ intel_engine_init_whitelist(engine);
+ intel_engine_init_workarounds(engine);
-err_cleanup_common:
- intel_engine_cleanup_common(engine);
- return ret;
+ return 0;
}
int logical_xcs_ring_init(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h
index d84b6d2d2fae..4aa68ffbd30e 100644
--- a/drivers/gpu/drm/i915/intel_opregion.h
+++ b/drivers/gpu/drm/i915/intel_opregion.h
@@ -87,12 +87,12 @@ static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
{
}
-void intel_opregion_resume(struct drm_i915_private *dev_priv)
+static inline void intel_opregion_resume(struct drm_i915_private *dev_priv)
{
}
-void intel_opregion_suspend(struct drm_i915_private *dev_priv,
- pci_power_t state)
+static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+ pci_power_t state)
{
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 897a791662c5..a26b4eddda25 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3951,68 +3951,68 @@ static void
skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
const enum pipe pipe,
const enum plane_id plane_id,
- struct skl_ddb_allocation *ddb /* out */)
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv)
{
- u32 val, val2 = 0;
- int fourcc, pixel_format;
+ u32 val, val2;
+ u32 fourcc = 0;
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
val = I915_READ(CUR_BUF_CFG(pipe));
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->plane[pipe][plane_id], val);
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
return;
}
val = I915_READ(PLANE_CTL(pipe, plane_id));
/* No DDB allocated for disabled planes */
- if (!(val & PLANE_CTL_ENABLE))
- return;
-
- pixel_format = val & PLANE_CTL_FORMAT_MASK;
- fourcc = skl_format_to_fourcc(pixel_format,
- val & PLANE_CTL_ORDER_RGBX,
- val & PLANE_CTL_ALPHA_MASK);
+ if (val & PLANE_CTL_ENABLE)
+ fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK,
+ val & PLANE_CTL_ORDER_RGBX,
+ val & PLANE_CTL_ALPHA_MASK);
- val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
- if (fourcc == DRM_FORMAT_NV12 && INTEL_GEN(dev_priv) < 11) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+ } else {
+ val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->plane[pipe][plane_id], val2);
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->uv_plane[pipe][plane_id], val);
- } else {
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->plane[pipe][plane_id], val);
+ if (fourcc == DRM_FORMAT_NV12)
+ swap(val, val2);
+
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
}
}
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb /* out */)
+void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv)
{
- struct intel_crtc *crtc;
-
- memset(ddb, 0, sizeof(*ddb));
-
- ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- enum intel_display_power_domain power_domain;
- enum plane_id plane_id;
- enum pipe pipe = crtc->pipe;
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return;
- power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
- continue;
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ skl_ddb_get_hw_plane_state(dev_priv, pipe,
+ plane_id,
+ &ddb_y[plane_id],
+ &ddb_uv[plane_id]);
- for_each_plane_id_on_crtc(crtc, plane_id)
- skl_ddb_get_hw_plane_state(dev_priv, pipe,
- plane_id, ddb);
+ intel_display_power_put(dev_priv, power_domain);
+}
- intel_display_power_put(dev_priv, power_domain);
- }
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb /* out */)
+{
+ ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
}
/*
@@ -4410,7 +4410,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_crtc *crtc = cstate->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
uint16_t alloc_size, start;
uint16_t minimum[I915_MAX_PLANES] = {};
@@ -4423,8 +4422,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
uint16_t total_min_blocks = 0;
/* Clear the partitioning for disabled planes. */
- memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
- memset(ddb->uv_plane[pipe], 0, sizeof(ddb->uv_plane[pipe]));
+ memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
+ memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv));
if (WARN_ON(!state))
return 0;
@@ -4471,8 +4470,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
}
alloc_size -= total_min_blocks;
- ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
- ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
+ cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
+ cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
/*
* 2. Distribute the remaining space in proportion to the amount of
@@ -4503,8 +4502,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
/* Leave disabled planes at (0,0) */
if (data_rate) {
- ddb->plane[pipe][plane_id].start = start;
- ddb->plane[pipe][plane_id].end = start + plane_blocks;
+ cstate->wm.skl.plane_ddb_y[plane_id].start = start;
+ cstate->wm.skl.plane_ddb_y[plane_id].end = start + plane_blocks;
}
start += plane_blocks;
@@ -4519,8 +4518,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks);
if (uv_data_rate) {
- ddb->uv_plane[pipe][plane_id].start = start;
- ddb->uv_plane[pipe][plane_id].end =
+ cstate->wm.skl.plane_ddb_uv[plane_id].start = start;
+ cstate->wm.skl.plane_ddb_uv[plane_id].end =
start + uv_plane_blocks;
}
@@ -4617,12 +4616,12 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
}
static int
-skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *cstate,
+skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
- struct skl_wm_params *wp, int plane_id)
+ struct skl_wm_params *wp, int color_plane)
{
struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_plane_state *pstate = &intel_pstate->base;
const struct drm_framebuffer *fb = pstate->fb;
uint32_t interm_pbpl;
@@ -4630,11 +4629,8 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
to_intel_atomic_state(cstate->base.state);
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
- if (!intel_wm_plane_visible(cstate, intel_pstate))
- return 0;
-
/* only NV12 format has two planes */
- if (plane_id == 1 && fb->format->format != DRM_FORMAT_NV12) {
+ if (color_plane == 1 && fb->format->format != DRM_FORMAT_NV12) {
DRM_DEBUG_KMS("Non NV12 format have single plane\n");
return -EINVAL;
}
@@ -4659,10 +4655,10 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
}
- if (plane_id == 1 && wp->is_planar)
+ if (color_plane == 1 && wp->is_planar)
wp->width /= 2;
- wp->cpp = fb->format->cpp[plane_id];
+ wp->cpp = fb->format->cpp[color_plane];
wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
intel_pstate);
@@ -4724,8 +4720,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
return 0;
}
-static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *cstate,
+static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
uint16_t ddb_allocation,
int level,
@@ -4733,6 +4728,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_pstate->base.plane->dev);
const struct drm_plane_state *pstate = &intel_pstate->base;
uint32_t latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
@@ -4743,11 +4740,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
uint32_t min_disp_buf_needed;
- if (latency == 0 ||
- !intel_wm_plane_visible(cstate, intel_pstate)) {
- result->plane_en = false;
- return 0;
- }
+ if (latency == 0)
+ return level == 0 ? -EINVAL : 0;
/* Display WA #1141: kbl,cfl */
if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
@@ -4844,8 +4838,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if ((level > 0 && res_lines > 31) ||
res_blocks >= ddb_allocation ||
min_disp_buf_needed >= ddb_allocation) {
- result->plane_en = false;
-
/*
* If there are no valid level 0 watermarks, then we can't
* support this display configuration.
@@ -4872,27 +4864,22 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
}
static int
-skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb,
- const struct intel_crtc_state *cstate,
+skl_compute_wm_levels(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
uint16_t ddb_blocks,
const struct skl_wm_params *wm_params,
- struct skl_plane_wm *wm,
struct skl_wm_level *levels)
{
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_pstate->base.plane->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
struct skl_wm_level *result_prev = &levels[0];
int ret;
- if (WARN_ON(!intel_pstate->base.fb))
- return -EINVAL;
-
for (level = 0; level <= max_level; level++) {
struct skl_wm_level *result = &levels[level];
- ret = skl_compute_plane_wm(dev_priv,
- cstate,
+ ret = skl_compute_plane_wm(cstate,
intel_pstate,
ddb_blocks,
level,
@@ -4905,9 +4892,6 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
result_prev = result;
}
- if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
- wm->is_planar = true;
-
return 0;
}
@@ -4935,10 +4919,9 @@ skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
}
static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
- struct skl_wm_params *wp,
- struct skl_wm_level *wm_l0,
- uint16_t ddb_allocation,
- struct skl_wm_level *trans_wm /* out */)
+ const struct skl_wm_params *wp,
+ struct skl_plane_wm *wm,
+ uint16_t ddb_allocation)
{
struct drm_device *dev = cstate->base.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4946,16 +4929,13 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
const uint16_t trans_amount = 10; /* This is configurable amount */
uint16_t wm0_sel_res_b, trans_offset_b, res_blocks;
- if (!cstate->base.active)
- goto exit;
-
/* Transition WM are not recommended by HW team for GEN9 */
if (INTEL_GEN(dev_priv) <= 9)
- goto exit;
+ return;
/* Transition WM don't make any sense if ipc is disabled */
if (!dev_priv->ipc_enabled)
- goto exit;
+ return;
trans_min = 14;
if (INTEL_GEN(dev_priv) >= 11)
@@ -4973,7 +4953,7 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
* Result Blocks is Result Blocks minus 1 and it should work for the
* current platforms.
*/
- wm0_sel_res_b = wm_l0->plane_res_b - 1;
+ wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
if (wp->y_tiled) {
trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
@@ -4992,107 +4972,129 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
res_blocks += 1;
if (res_blocks < ddb_allocation) {
- trans_wm->plane_res_b = res_blocks;
- trans_wm->plane_en = true;
- return;
+ wm->trans_wm.plane_res_b = res_blocks;
+ wm->trans_wm.plane_en = true;
}
-
-exit:
- trans_wm->plane_en = false;
}
-static int __skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
- struct skl_pipe_wm *pipe_wm,
- enum plane_id plane_id,
- const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
- int color_plane)
+static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ enum plane_id plane_id, int color_plane)
{
- struct drm_i915_private *dev_priv = to_i915(pstate->base.plane->dev);
- struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
- enum pipe pipe = to_intel_plane(pstate->base.plane)->pipe;
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
+ u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_y[plane_id]);
struct skl_wm_params wm_params;
- uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
int ret;
- ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate,
+ ret = skl_compute_plane_wm_params(crtc_state, plane_state,
&wm_params, color_plane);
if (ret)
return ret;
- ret = skl_compute_wm_levels(dev_priv, ddb, cstate, pstate,
- ddb_blocks, &wm_params, wm, wm->wm);
-
+ ret = skl_compute_wm_levels(crtc_state, plane_state,
+ ddb_blocks, &wm_params, wm->wm);
if (ret)
return ret;
- skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
- ddb_blocks, &wm->trans_wm);
+ skl_compute_transition_wm(crtc_state, &wm_params, wm, ddb_blocks);
return 0;
}
-static int skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
- struct skl_pipe_wm *pipe_wm,
- const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate)
-{
- enum plane_id plane_id = to_intel_plane(pstate->base.plane)->id;
-
- return __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0);
-}
-
-static int skl_build_plane_wm_planar(struct skl_ddb_allocation *ddb,
- struct skl_pipe_wm *pipe_wm,
- const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate)
+static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ enum plane_id plane_id)
{
- struct intel_plane *plane = to_intel_plane(pstate->base.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum plane_id plane_id = plane->id;
- struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
+ u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_uv[plane_id]);
struct skl_wm_params wm_params;
- enum pipe pipe = plane->pipe;
- uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
int ret;
- ret = __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0);
- if (ret)
- return ret;
+ wm->is_planar = true;
/* uv plane watermarks must also be validated for NV12/Planar */
- ddb_blocks = skl_ddb_entry_size(&ddb->uv_plane[pipe][plane_id]);
+ ret = skl_compute_plane_wm_params(crtc_state, plane_state,
+ &wm_params, 1);
+ if (ret)
+ return ret;
- ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate, &wm_params, 1);
+ ret = skl_compute_wm_levels(crtc_state, plane_state,
+ ddb_blocks, &wm_params, wm->uv_wm);
if (ret)
return ret;
- return skl_compute_wm_levels(dev_priv, ddb, cstate, pstate,
- ddb_blocks, &wm_params, wm, wm->uv_wm);
+ return 0;
}
-static int icl_build_plane_wm_planar(struct skl_ddb_allocation *ddb,
- struct skl_pipe_wm *pipe_wm,
- const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate)
+static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum plane_id plane_id = plane->id;
int ret;
- enum plane_id y_plane_id = pstate->linked_plane->id;
- enum plane_id uv_plane_id = to_intel_plane(pstate->base.plane)->id;
- ret = __skl_build_plane_wm_single(ddb, pipe_wm, y_plane_id,
- cstate, pstate, 0);
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane_id, 0);
if (ret)
return ret;
- return __skl_build_plane_wm_single(ddb, pipe_wm, uv_plane_id,
- cstate, pstate, 1);
+ if (fb->format->is_yuv && fb->format->num_planes > 1) {
+ ret = skl_build_plane_wm_uv(crtc_state, plane_state,
+ plane_id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id;
+ int ret;
+
+ /* Watermarks calculated in master */
+ if (plane_state->slave)
+ return 0;
+
+ if (plane_state->linked_plane) {
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum plane_id y_plane_id = plane_state->linked_plane->id;
+
+ WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
+ WARN_ON(!fb->format->is_yuv ||
+ fb->format->num_planes == 1);
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ y_plane_id, 0);
+ if (ret)
+ return ret;
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane_id, 1);
+ if (ret)
+ return ret;
+ } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane_id, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
- struct skl_ddb_allocation *ddb,
struct skl_pipe_wm *pipe_wm)
{
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
struct drm_crtc_state *crtc_state = &cstate->base;
struct drm_plane *plane;
const struct drm_plane_state *pstate;
@@ -5108,18 +5110,12 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate =
to_intel_plane_state(pstate);
- /* Watermarks calculated in master */
- if (intel_pstate->slave)
- continue;
-
- if (intel_pstate->linked_plane)
- ret = icl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate);
- else if (intel_pstate->base.fb &&
- intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
- ret = skl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate);
+ if (INTEL_GEN(dev_priv) >= 11)
+ ret = icl_build_plane_wm(pipe_wm,
+ cstate, intel_pstate);
else
- ret = skl_build_plane_wm_single(ddb, pipe_wm, cstate, intel_pstate);
-
+ ret = skl_build_plane_wm(pipe_wm,
+ cstate, intel_pstate);
if (ret)
return ret;
}
@@ -5134,9 +5130,9 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry)
{
if (entry->end)
- I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
+ I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start);
else
- I915_WRITE(reg, 0);
+ I915_WRITE_FW(reg, 0);
}
static void skl_write_wm_level(struct drm_i915_private *dev_priv,
@@ -5151,19 +5147,22 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
}
- I915_WRITE(reg, val);
+ I915_WRITE_FW(reg, val);
}
-static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
- const struct skl_plane_wm *wm,
- const struct skl_ddb_allocation *ddb,
- enum plane_id plane_id)
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int level, max_level = ilk_wm_max_level(dev_priv);
- enum pipe pipe = intel_crtc->pipe;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ const struct skl_ddb_entry *ddb_uv =
+ &crtc_state->wm.skl.plane_ddb_uv[plane_id];
for (level = 0; level <= max_level; level++) {
skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
@@ -5172,29 +5171,32 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
&wm->trans_wm);
- if (wm->is_planar && INTEL_GEN(dev_priv) < 11) {
- skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
- &ddb->uv_plane[pipe][plane_id]);
+ if (INTEL_GEN(dev_priv) >= 11) {
skl_ddb_entry_write(dev_priv,
- PLANE_NV12_BUF_CFG(pipe, plane_id),
- &ddb->plane[pipe][plane_id]);
- } else {
- skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
- &ddb->plane[pipe][plane_id]);
- if (INTEL_GEN(dev_priv) < 11)
- I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
+ PLANE_BUF_CFG(pipe, plane_id), ddb_y);
+ return;
}
+
+ if (wm->is_planar)
+ swap(ddb_y, ddb_uv);
+
+ skl_ddb_entry_write(dev_priv,
+ PLANE_BUF_CFG(pipe, plane_id), ddb_y);
+ skl_ddb_entry_write(dev_priv,
+ PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
}
-static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
- const struct skl_plane_wm *wm,
- const struct skl_ddb_allocation *ddb)
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int level, max_level = ilk_wm_max_level(dev_priv);
- enum pipe pipe = intel_crtc->pipe;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
for (level = 0; level <= max_level; level++) {
skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
@@ -5202,22 +5204,30 @@ static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
}
skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
- skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
- &ddb->plane[pipe][PLANE_CURSOR]);
+ skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
}
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2)
{
- if (l1->plane_en != l2->plane_en)
- return false;
+ return l1->plane_en == l2->plane_en &&
+ l1->plane_res_l == l2->plane_res_l &&
+ l1->plane_res_b == l2->plane_res_b;
+}
- /* If both planes aren't enabled, the rest shouldn't matter */
- if (!l1->plane_en)
- return true;
+static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
+ const struct skl_plane_wm *wm1,
+ const struct skl_plane_wm *wm2)
+{
+ int level, max_level = ilk_wm_max_level(dev_priv);
- return (l1->plane_res_l == l2->plane_res_l &&
- l1->plane_res_b == l2->plane_res_b);
+ for (level = 0; level <= max_level; level++) {
+ if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
+ !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
+ return false;
+ }
+
+ return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
}
static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
@@ -5244,13 +5254,12 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
const struct skl_pipe_wm *old_pipe_wm,
struct skl_pipe_wm *pipe_wm, /* out */
- struct skl_ddb_allocation *ddb, /* out */
bool *changed /* out */)
{
struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
int ret;
- ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
+ ret = skl_build_pipe_wm(intel_cstate, pipe_wm);
if (ret)
return ret;
@@ -5276,42 +5285,29 @@ pipes_modified(struct drm_atomic_state *state)
}
static int
-skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
+skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
{
- struct drm_atomic_state *state = cstate->base.state;
- struct drm_device *dev = state->dev;
- struct drm_crtc *crtc = cstate->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
- struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
- struct drm_plane *plane;
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
- drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
- struct drm_plane_state *plane_state;
- struct intel_plane *linked;
- enum plane_id plane_id = to_intel_plane(plane)->id;
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+ enum plane_id plane_id = plane->id;
- if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
- &new_ddb->plane[pipe][plane_id]) &&
- skl_ddb_entry_equal(&cur_ddb->uv_plane[pipe][plane_id],
- &new_ddb->uv_plane[pipe][plane_id]))
+ if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
+ skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
continue;
- plane_state = drm_atomic_get_plane_state(state, plane);
+ plane_state = intel_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
- /* Make sure linked plane is updated too */
- linked = to_intel_plane_state(plane_state)->linked_plane;
- if (!linked)
- continue;
-
- plane_state = drm_atomic_get_plane_state(state, &linked->base);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
+ new_crtc_state->update_planes |= BIT(plane_id);
}
return 0;
@@ -5323,18 +5319,21 @@ skl_compute_ddb(struct drm_atomic_state *state)
const struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
+ struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
- struct intel_crtc_state *cstate;
int ret, i;
memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
- for_each_new_intel_crtc_in_state(intel_state, crtc, cstate, i) {
- ret = skl_allocate_pipe_ddb(cstate, ddb);
+ for_each_oldnew_intel_crtc_in_state(intel_state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
if (ret)
return ret;
- ret = skl_ddb_add_affected_planes(cstate);
+ ret = skl_ddb_add_affected_planes(old_crtc_state,
+ new_crtc_state);
if (ret)
return ret;
}
@@ -5343,36 +5342,29 @@ skl_compute_ddb(struct drm_atomic_state *state)
}
static void
-skl_print_wm_changes(const struct drm_atomic_state *state)
+skl_print_wm_changes(struct intel_atomic_state *state)
{
- const struct drm_device *dev = state->dev;
- const struct drm_i915_private *dev_priv = to_i915(dev);
- const struct intel_atomic_state *intel_state =
- to_intel_atomic_state(state);
- const struct drm_crtc *crtc;
- const struct drm_crtc_state *cstate;
- const struct intel_plane *intel_plane;
- const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
- const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
int i;
- for_each_new_crtc_in_state(state, crtc, cstate, i) {
- const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
-
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- enum plane_id plane_id = intel_plane->id;
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ enum plane_id plane_id = plane->id;
const struct skl_ddb_entry *old, *new;
- old = &old_ddb->plane[pipe][plane_id];
- new = &new_ddb->plane[pipe][plane_id];
+ old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
+ new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
if (skl_ddb_entry_equal(old, new))
continue;
DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
- intel_plane->base.base.id,
- intel_plane->base.name,
+ plane->base.base.id, plane->base.name,
old->start, old->end,
new->start, new->end);
}
@@ -5469,6 +5461,66 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
return 0;
}
+/*
+ * To make sure the cursor watermark registers are always consistent
+ * with our computed state the following scenario needs special
+ * treatment:
+ *
+ * 1. enable cursor
+ * 2. move cursor entirely offscreen
+ * 3. disable cursor
+ *
+ * Step 2. does call .disable_plane() but does not zero the watermarks
+ * (since we consider an offscreen cursor still active for the purposes
+ * of watermarks). Step 3. would not normally call .disable_plane()
+ * because the actual plane visibility isn't changing, and we don't
+ * deallocate the cursor ddb until the pipe gets disabled. So we must
+ * force step 3. to call .disable_plane() to update the watermark
+ * registers properly.
+ *
+ * Other planes do not suffer from this issues as their watermarks are
+ * calculated based on the actual plane visibility. The only time this
+ * can trigger for the other planes is during the initial readout as the
+ * default value of the watermarks registers is not zero.
+ */
+static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+ enum plane_id plane_id = plane->id;
+
+ /*
+ * Force a full wm update for every plane on modeset.
+ * Required because the reset value of the wm registers
+ * is non-zero, whereas we want all disabled planes to
+ * have zero watermarks. So if we turn off the relevant
+ * power well the hardware state will go out of sync
+ * with the software state.
+ */
+ if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) &&
+ skl_plane_wm_equals(dev_priv,
+ &old_crtc_state->wm.skl.optimal.planes[plane_id],
+ &new_crtc_state->wm.skl.optimal.planes[plane_id]))
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane_id);
+ }
+
+ return 0;
+}
+
static int
skl_compute_wm(struct drm_atomic_state *state)
{
@@ -5508,8 +5560,12 @@ skl_compute_wm(struct drm_atomic_state *state)
&to_intel_crtc_state(crtc->state)->wm.skl.optimal;
pipe_wm = &intel_cstate->wm.skl.optimal;
- ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
- &results->ddb, &changed);
+ ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
+ if (ret)
+ return ret;
+
+ ret = skl_wm_add_affected_planes(intel_state,
+ to_intel_crtc(crtc));
if (ret)
return ret;
@@ -5523,7 +5579,7 @@ skl_compute_wm(struct drm_atomic_state *state)
intel_cstate->update_wm_pre = true;
}
- skl_print_wm_changes(state);
+ skl_print_wm_changes(intel_state);
return 0;
}
@@ -5534,23 +5590,12 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
- const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
return;
I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- if (plane_id != PLANE_CURSOR)
- skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id],
- ddb, plane_id);
- else
- skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id],
- ddb);
- }
}
static void skl_initial_wm(struct intel_atomic_state *state,
@@ -5560,8 +5605,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_ddb_values *results = &state->wm_results;
- struct skl_ddb_values *hw_vals = &dev_priv->wm.skl_hw;
- enum pipe pipe = intel_crtc->pipe;
if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
return;
@@ -5571,11 +5614,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
if (cstate->base.active_changed)
skl_atomic_update_crtc_wm(state, cstate);
- memcpy(hw_vals->ddb.uv_plane[pipe], results->ddb.uv_plane[pipe],
- sizeof(hw_vals->ddb.uv_plane[pipe]));
- memcpy(hw_vals->ddb.plane[pipe], results->ddb.plane[pipe],
- sizeof(hw_vals->ddb.plane[pipe]));
-
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -5726,13 +5764,6 @@ void skl_wm_get_hw_state(struct drm_device *dev)
if (dev_priv->active_crtcs) {
/* Fully recompute DDB on first atomic commit */
dev_priv->wm.distrust_bios_wm = true;
- } else {
- /*
- * Easy/common case; just sanitize DDB now if everything off
- * Keep dbuf slice info intact
- */
- memset(ddb->plane, 0, sizeof(ddb->plane));
- memset(ddb->uv_plane, 0, sizeof(ddb->uv_plane));
}
}
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 54fa17a5596a..419e56342523 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -75,6 +75,10 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
if (i915_modparams.enable_psr == -1)
return false;
+ /* Cannot enable DSC and PSR2 simultaneously */
+ WARN_ON(crtc_state->dsc_params.compression_enable &&
+ crtc_state->has_psr2);
+
switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
@@ -169,6 +173,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
u32 transcoders = BIT(TRANSCODER_EDP);
enum transcoder cpu_transcoder;
ktime_t time_ns = ktime_get();
+ u32 mask = 0;
if (INTEL_GEN(dev_priv) >= 8)
transcoders |= BIT(TRANSCODER_A) |
@@ -178,10 +183,22 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
int shift = edp_psr_shift(cpu_transcoder);
- /* FIXME: Exit PSR and link train manually when this happens. */
- if (psr_iir & EDP_PSR_ERROR(shift))
- DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n",
- transcoder_name(cpu_transcoder));
+ if (psr_iir & EDP_PSR_ERROR(shift)) {
+ DRM_WARN("[transcoder %s] PSR aux error\n",
+ transcoder_name(cpu_transcoder));
+
+ dev_priv->psr.irq_aux_error = true;
+
+ /*
+ * If this interruption is not masked it will keep
+ * interrupting so fast that it prevents the scheduled
+ * work to run.
+ * Also after a PSR error, we don't want to arm PSR
+ * again so we don't care about unmask the interruption
+ * or unset irq_aux_error.
+ */
+ mask |= EDP_PSR_ERROR(shift);
+ }
if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
dev_priv->psr.last_entry_attempt = time_ns;
@@ -203,6 +220,13 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
}
}
}
+
+ if (mask) {
+ mask |= I915_READ(EDP_PSR_IMR);
+ I915_WRITE(EDP_PSR_IMR, mask);
+
+ schedule_work(&dev_priv->psr.work);
+ }
}
static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
@@ -482,6 +506,16 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (!dev_priv->psr.sink_psr2_support)
return false;
+ /*
+ * DSC and PSR2 cannot be enabled simultaneously. If a requested
+ * resolution requires DSC to be enabled, priority is given to DSC
+ * over PSR2.
+ */
+ if (crtc_state->dsc_params.compression_enable) {
+ DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
+ return false;
+ }
+
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
psr_max_h = 4096;
psr_max_v = 2304;
@@ -527,10 +561,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- if (IS_HASWELL(dev_priv) &&
- I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
- S3D_ENABLE) {
- DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
+ if (dev_priv->psr.sink_not_reliable) {
+ DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
return;
}
@@ -687,6 +719,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
dev_priv->psr.busy_frontbuffer_bits = 0;
dev_priv->psr.prepared = true;
+ dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
if (psr_global_enabled(dev_priv->psr.debug))
intel_psr_enable_locked(dev_priv, crtc_state);
@@ -933,6 +966,16 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
return ret;
}
+static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
+{
+ struct i915_psr *psr = &dev_priv->psr;
+
+ intel_psr_disable_locked(psr->dp);
+ psr->sink_not_reliable = true;
+ /* let's make sure that sink is awaken */
+ drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+}
+
static void intel_psr_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
@@ -943,6 +986,9 @@ static void intel_psr_work(struct work_struct *work)
if (!dev_priv->psr.enabled)
goto unlock;
+ if (READ_ONCE(dev_priv->psr.irq_aux_error))
+ intel_psr_handle_irq(dev_priv);
+
/*
* We have to make sure PSR is ready for re-enable
* otherwise it keeps disabled until next full enable/disable cycle.
@@ -981,9 +1027,6 @@ unlock:
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
- struct drm_crtc *crtc;
- enum pipe pipe;
-
if (!CAN_PSR(dev_priv))
return;
@@ -996,10 +1039,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
return;
}
- crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
if (frontbuffer_bits)
@@ -1024,9 +1064,6 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
- struct drm_crtc *crtc;
- enum pipe pipe;
-
if (!CAN_PSR(dev_priv))
return;
@@ -1039,10 +1076,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
return;
}
- crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
/* By definition flush = invalidate + flush */
@@ -1056,7 +1090,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
* but it makes more sense write to the current active
* pipe.
*/
- I915_WRITE(CURSURFLIVE(pipe), 0);
+ I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
}
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
@@ -1073,6 +1107,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
*/
void intel_psr_init(struct drm_i915_private *dev_priv)
{
+ u32 val;
+
if (!HAS_PSR(dev_priv))
return;
@@ -1086,6 +1122,22 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
i915_modparams.enable_psr = 0;
+ /*
+ * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
+ * will still keep the error set even after the reset done in the
+ * irq_preinstall and irq_uninstall hooks.
+ * And enabling in this situation cause the screen to freeze in the
+ * first time that PSR HW tries to activate so lets keep PSR disabled
+ * to avoid any rendering problems.
+ */
+ val = I915_READ(EDP_PSR_IIR);
+ val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP));
+ if (val) {
+ DRM_DEBUG_KMS("PSR interruption error set\n");
+ dev_priv->psr.sink_not_reliable = true;
+ return;
+ }
+
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
@@ -1123,6 +1175,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
}
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
@@ -1140,10 +1193,27 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
if (val & ~errors)
DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
val & ~errors);
- if (val & errors)
+ if (val & errors) {
intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
+ }
/* clear status register */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
exit:
mutex_unlock(&psr->lock);
}
+
+bool intel_psr_enabled(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ bool ret;
+
+ if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+ return false;
+
+ mutex_lock(&dev_priv->psr.lock);
+ ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
+ mutex_unlock(&dev_priv->psr.lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 87eebc13c0d8..fbeaec3994e7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -69,19 +69,28 @@ unsigned int intel_ring_update_space(struct intel_ring *ring)
static int
gen2_render_ring_flush(struct i915_request *rq, u32 mode)
{
+ unsigned int num_store_dw;
u32 cmd, *cs;
cmd = MI_FLUSH;
-
+ num_store_dw = 0;
if (mode & EMIT_INVALIDATE)
cmd |= MI_READ_FLUSH;
+ if (mode & EMIT_FLUSH)
+ num_store_dw = 4;
- cs = intel_ring_begin(rq, 2);
+ cs = intel_ring_begin(rq, 2 + 3 * num_store_dw);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cmd;
- *cs++ = MI_NOOP;
+ while (num_store_dw--) {
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *cs++ = i915_scratch_offset(rq->i915);
+ *cs++ = 0;
+ }
+ *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
+
intel_ring_advance(rq, cs);
return 0;
@@ -150,8 +159,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*/
if (mode & EMIT_INVALIDATE) {
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_ggtt_offset(rq->engine->scratch) |
- PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
@@ -159,8 +167,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*cs++ = MI_FLUSH;
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_ggtt_offset(rq->engine->scratch) |
- PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
}
@@ -212,8 +219,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
static int
intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs;
cs = intel_ring_begin(rq, 6);
@@ -246,8 +252,7 @@ intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
static int
gen6_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
int ret;
@@ -316,8 +321,7 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq)
static int
gen7_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
/*
@@ -529,6 +533,13 @@ static int init_ring_common(struct intel_engine_cs *engine)
intel_engine_reset_breadcrumbs(engine);
+ if (HAS_LEGACY_SEMAPHORES(engine->i915)) {
+ I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
+ I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
+ if (HAS_VEBOX(dev_priv))
+ I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
+ }
+
/* Enforce ordering by reading HEAD register back */
I915_READ_HEAD(engine);
@@ -546,10 +557,11 @@ static int init_ring_common(struct intel_engine_cs *engine)
/* Check that the ring offsets point within the ring! */
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
-
intel_ring_update_space(ring);
+
+ /* First wake the ring up to an empty/idle ring */
I915_WRITE_HEAD(engine, ring->head);
- I915_WRITE_TAIL(engine, ring->tail);
+ I915_WRITE_TAIL(engine, ring->head);
(void)I915_READ_TAIL(engine);
I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
@@ -574,6 +586,12 @@ static int init_ring_common(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) > 2)
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
+ /* Now awake, let it get started */
+ if (ring->tail != ring->head) {
+ I915_WRITE_TAIL(engine, ring->tail);
+ (void)I915_READ_TAIL(engine);
+ }
+
/* Papering over lost _interrupts_ immediately following the restart */
intel_engine_wakeup(engine);
out:
@@ -642,7 +660,7 @@ static int intel_rcs_ctx_init(struct i915_request *rq)
{
int ret;
- ret = intel_ctx_workarounds_emit(rq);
+ ret = intel_engine_emit_ctx_wa(rq);
if (ret != 0)
return ret;
@@ -660,8 +678,6 @@ static int init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- intel_whitelist_workarounds_apply(engine);
-
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
if (IS_GEN(dev_priv, 4, 6))
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
@@ -743,9 +759,18 @@ static void cancel_requests(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->timeline.requests, link) {
GEM_BUG_ON(!request->global_seqno);
- if (!i915_request_completed(request))
- dma_fence_set_error(&request->fence, -EIO);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &request->fence.flags))
+ continue;
+
+ dma_fence_set_error(&request->fence, -EIO);
}
+
+ intel_write_status_page(engine,
+ I915_GEM_HWS_INDEX,
+ intel_engine_last_submit(engine));
+
/* Remaining _unready_ requests will be nop'ed when submitted */
spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -973,7 +998,7 @@ i965_emit_bb_start(struct i915_request *rq,
}
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
-#define I830_BATCH_LIMIT (256*1024)
+#define I830_BATCH_LIMIT SZ_256K
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
@@ -981,7 +1006,9 @@ i830_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
+ u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
+
+ GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -1438,7 +1465,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
struct i915_timeline *timeline;
struct intel_ring *ring;
- unsigned int size;
int err;
intel_engine_setup_common(engine);
@@ -1463,21 +1489,12 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
GEM_BUG_ON(engine->buffer);
engine->buffer = ring;
- size = PAGE_SIZE;
- if (HAS_BROKEN_CS_TLB(engine->i915))
- size = I830_WA_SIZE;
- err = intel_engine_create_scratch(engine, size);
- if (err)
- goto err_unpin;
-
err = intel_engine_init_common(engine);
if (err)
- goto err_scratch;
+ goto err_unpin;
return 0;
-err_scratch:
- intel_engine_cleanup_scratch(engine);
err_unpin:
intel_ring_unpin(ring);
err_ring:
@@ -1551,7 +1568,7 @@ static int flush_pd_dir(struct i915_request *rq)
/* Stall until the page table load is complete */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
- *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
@@ -1660,7 +1677,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* Insert a delay before the next switch! */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 8a2270b209b0..72edaa7ff411 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -15,6 +15,7 @@
#include "i915_selftest.h"
#include "i915_timeline.h"
#include "intel_gpu_commands.h"
+#include "intel_workarounds.h"
struct drm_printer;
struct i915_sched_attr;
@@ -313,13 +314,6 @@ struct intel_engine_execlists {
struct rb_root_cached queue;
/**
- * @csb_read: control register for Context Switch buffer
- *
- * Note this register is always in mmio.
- */
- u32 __iomem *csb_read;
-
- /**
* @csb_write: control register for Context Switch buffer
*
* Note this register may be either mmio or HWSP shadow.
@@ -339,15 +333,6 @@ struct intel_engine_execlists {
u32 preempt_complete_status;
/**
- * @csb_write_reset: reset value for CSB write pointer
- *
- * As the CSB write pointer maybe either in HWSP or as a field
- * inside an mmio register, we want to reprogram it slightly
- * differently to avoid later confusion.
- */
- u32 csb_write_reset;
-
- /**
* @csb_head: context status buffer head
*/
u8 csb_head;
@@ -451,7 +436,9 @@ struct intel_engine_cs {
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
- struct i915_vma *scratch;
+ struct i915_wa_list ctx_wa_list;
+ struct i915_wa_list wa_list;
+ struct i915_wa_list whitelist;
u32 irq_keep_mask; /* always keep these interrupts */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
@@ -908,10 +895,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
- unsigned int size);
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine);
-
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 1c2de9b69a19..4350a5270423 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -76,6 +76,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "TRANSCODER_C";
case POWER_DOMAIN_TRANSCODER_EDP:
return "TRANSCODER_EDP";
+ case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
+ return "TRANSCODER_EDP_VDSC";
case POWER_DOMAIN_TRANSCODER_DSI_A:
return "TRANSCODER_DSI_A";
case POWER_DOMAIN_TRANSCODER_DSI_C:
@@ -2028,9 +2030,9 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
*/
#define ICL_PW_2_POWER_DOMAINS ( \
ICL_PW_3_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \
BIT_ULL(POWER_DOMAIN_INIT))
/*
- * - eDP/DSI VDSC
* - KVMR (HW control)
*/
#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index abe193815ccc..d2e003d8f3db 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -373,14 +373,12 @@ skl_program_scaler(struct intel_plane *plane,
#define BOFF(x) (((x) & 0xffff) << 16)
static void
-icl_program_input_csc_coeff(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+icl_program_input_csc(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- enum pipe pipe = crtc->pipe;
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
static const u16 input_csc_matrix[][9] = {
@@ -508,28 +506,12 @@ skl_program_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
- plane_state->color_ctl);
-
- if (fb->format->is_yuv && icl_is_hdr_plane(plane))
- icl_program_input_csc_coeff(crtc_state, plane_state);
-
- I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
- I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
- I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
-
- I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
+ I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
(plane_state->color_plane[1].offset - surf_addr) | aux_stride);
- if (INTEL_GEN(dev_priv) < 11)
- I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
- (plane_state->color_plane[1].y << 16) |
- plane_state->color_plane[1].x);
-
if (icl_is_hdr_plane(plane)) {
u32 cus_ctl = 0;
@@ -551,15 +533,38 @@ skl_program_plane(struct intel_plane *plane,
I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl);
}
- if (!slave && plane_state->scaler_id >= 0)
- skl_program_scaler(plane, crtc_state, plane_state);
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
+ plane_state->color_ctl);
- I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+ if (fb->format->is_yuv && icl_is_hdr_plane(plane))
+ icl_program_input_csc(plane, crtc_state, plane_state);
+
+ skl_write_plane_wm(plane, crtc_state);
+ I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
+ I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
+ I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
+
+ I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
+
+ if (INTEL_GEN(dev_priv) < 11)
+ I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
+ (plane_state->color_plane[1].y << 16) |
+ plane_state->color_plane[1].x);
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + surf_addr);
+ if (!slave && plane_state->scaler_id >= 0)
+ skl_program_scaler(plane, crtc_state, plane_state);
+
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -589,7 +594,8 @@ icl_update_slave(struct intel_plane *plane,
}
static void
-skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+skl_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
@@ -598,6 +604,8 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ skl_write_plane_wm(plane, crtc_state);
+
I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
@@ -819,35 +827,41 @@ vlv_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- vlv_update_clrc(plane_state);
+ I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
+ plane_state->color_plane[0].stride);
+ I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
+ I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
chv_update_csc(plane_state);
if (key->flags) {
I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
- I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask);
+ I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
}
- I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
- plane_state->color_plane[0].stride);
- I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
+ I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
- I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
-
- I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl);
I915_WRITE_FW(SPSURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+ vlv_update_clrc(plane_state);
+
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
-vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+vlv_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -980,27 +994,32 @@ ivb_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
+ I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+ if (IS_IVYBRIDGE(dev_priv))
+ I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+
if (key->flags) {
I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value);
- I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
+ I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
}
- I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
- I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
-
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
} else {
- I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
+ I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
}
- I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
- if (IS_IVYBRIDGE(dev_priv))
- I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(SPRCTL(pipe), sprctl);
I915_WRITE_FW(SPRSURF(pipe),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
@@ -1009,7 +1028,8 @@ ivb_update_plane(struct intel_plane *plane,
}
static void
-ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+ivb_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -1018,7 +1038,7 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
I915_WRITE_FW(SPRCTL(pipe), 0);
- /* Can't leave the scaler enabled... */
+ /* Disable the scaler */
if (IS_IVYBRIDGE(dev_priv))
I915_WRITE_FW(SPRSCALE(pipe), 0);
I915_WRITE_FW(SPRSURF(pipe), 0);
@@ -1148,20 +1168,25 @@ g4x_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
+ I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+ I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
+
if (key->flags) {
I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value);
- I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
+ I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
}
- I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
- I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
-
- I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
+ I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
- I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
- I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(DVSCNTR(pipe), dvscntr);
I915_WRITE_FW(DVSSURF(pipe),
intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
@@ -1170,7 +1195,8 @@ g4x_update_plane(struct intel_plane *plane,
}
static void
-g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+g4x_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c
new file mode 100644
index 000000000000..c56ba0e04044
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_vdsc.c
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Author: Gaurav K Singh <gaurav.k.singh@intel.com>
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+enum ROW_INDEX_BPP {
+ ROW_INDEX_6BPP = 0,
+ ROW_INDEX_8BPP,
+ ROW_INDEX_10BPP,
+ ROW_INDEX_12BPP,
+ ROW_INDEX_15BPP,
+ MAX_ROW_INDEX
+};
+
+enum COLUMN_INDEX_BPC {
+ COLUMN_INDEX_8BPC = 0,
+ COLUMN_INDEX_10BPC,
+ COLUMN_INDEX_12BPC,
+ COLUMN_INDEX_14BPC,
+ COLUMN_INDEX_16BPC,
+ MAX_COLUMN_INDEX
+};
+
+#define DSC_SUPPORTED_VERSION_MIN 1
+
+/* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */
+static u16 rc_buf_thresh[] = {
+ 896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616,
+ 7744, 7872, 8000, 8064
+};
+
+struct rc_parameters {
+ u16 initial_xmit_delay;
+ u8 first_line_bpg_offset;
+ u16 initial_offset;
+ u8 flatness_min_qp;
+ u8 flatness_max_qp;
+ u8 rc_quant_incr_limit0;
+ u8 rc_quant_incr_limit1;
+ struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES];
+};
+
+/*
+ * Selected Rate Control Related Parameter Recommended Values
+ * from DSC_v1.11 spec & C Model release: DSC_model_20161212
+ */
+static struct rc_parameters rc_params[][MAX_COLUMN_INDEX] = {
+{
+ /* 6BPP/8BPC */
+ { 768, 15, 6144, 3, 13, 11, 11, {
+ { 0, 4, 0 }, { 1, 6, -2 }, { 3, 8, -2 }, { 4, 8, -4 },
+ { 5, 9, -6 }, { 5, 9, -6 }, { 6, 9, -6 }, { 6, 10, -8 },
+ { 7, 11, -8 }, { 8, 12, -10 }, { 9, 12, -10 }, { 10, 12, -12 },
+ { 10, 12, -12 }, { 11, 12, -12 }, { 13, 14, -12 }
+ }
+ },
+ /* 6BPP/10BPC */
+ { 768, 15, 6144, 7, 17, 15, 15, {
+ { 0, 8, 0 }, { 3, 10, -2 }, { 7, 12, -2 }, { 8, 12, -4 },
+ { 9, 13, -6 }, { 9, 13, -6 }, { 10, 13, -6 }, { 10, 14, -8 },
+ { 11, 15, -8 }, { 12, 16, -10 }, { 13, 16, -10 },
+ { 14, 16, -12 }, { 14, 16, -12 }, { 15, 16, -12 },
+ { 17, 18, -12 }
+ }
+ },
+ /* 6BPP/12BPC */
+ { 768, 15, 6144, 11, 21, 19, 19, {
+ { 0, 12, 0 }, { 5, 14, -2 }, { 11, 16, -2 }, { 12, 16, -4 },
+ { 13, 17, -6 }, { 13, 17, -6 }, { 14, 17, -6 }, { 14, 18, -8 },
+ { 15, 19, -8 }, { 16, 20, -10 }, { 17, 20, -10 },
+ { 18, 20, -12 }, { 18, 20, -12 }, { 19, 20, -12 },
+ { 21, 22, -12 }
+ }
+ },
+ /* 6BPP/14BPC */
+ { 768, 15, 6144, 15, 25, 23, 27, {
+ { 0, 16, 0 }, { 7, 18, -2 }, { 15, 20, -2 }, { 16, 20, -4 },
+ { 17, 21, -6 }, { 17, 21, -6 }, { 18, 21, -6 }, { 18, 22, -8 },
+ { 19, 23, -8 }, { 20, 24, -10 }, { 21, 24, -10 },
+ { 22, 24, -12 }, { 22, 24, -12 }, { 23, 24, -12 },
+ { 25, 26, -12 }
+ }
+ },
+ /* 6BPP/16BPC */
+ { 768, 15, 6144, 19, 29, 27, 27, {
+ { 0, 20, 0 }, { 9, 22, -2 }, { 19, 24, -2 }, { 20, 24, -4 },
+ { 21, 25, -6 }, { 21, 25, -6 }, { 22, 25, -6 }, { 22, 26, -8 },
+ { 23, 27, -8 }, { 24, 28, -10 }, { 25, 28, -10 },
+ { 26, 28, -12 }, { 26, 28, -12 }, { 27, 28, -12 },
+ { 29, 30, -12 }
+ }
+ },
+},
+{
+ /* 8BPP/8BPC */
+ { 512, 12, 6144, 3, 12, 11, 11, {
+ { 0, 4, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
+ { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+ { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 }, { 5, 12, -12 },
+ { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
+ }
+ },
+ /* 8BPP/10BPC */
+ { 512, 12, 6144, 7, 16, 15, 15, {
+ { 0, 4, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 5, 10, -2 },
+ { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+ { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 },
+ { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
+ }
+ },
+ /* 8BPP/12BPC */
+ { 512, 12, 6144, 11, 20, 19, 19, {
+ { 0, 12, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 9, 14, -2 },
+ { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+ { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 },
+ { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 },
+ { 21, 23, -12 }
+ }
+ },
+ /* 8BPP/14BPC */
+ { 512, 12, 6144, 15, 24, 23, 23, {
+ { 0, 12, 0 }, { 5, 13, 0 }, { 11, 15, 0 }, { 12, 17, -2 },
+ { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+ { 15, 21, -8 }, { 15, 22, -10 }, { 17, 22, -10 },
+ { 17, 23, -12 }, { 17, 23, -12 }, { 21, 24, -12 },
+ { 24, 25, -12 }
+ }
+ },
+ /* 8BPP/16BPC */
+ { 512, 12, 6144, 19, 28, 27, 27, {
+ { 0, 12, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 15, 20, -2 },
+ { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+ { 19, 25, -8 }, { 19, 26, -10 }, { 21, 26, -10 },
+ { 21, 27, -12 }, { 21, 27, -12 }, { 25, 28, -12 },
+ { 28, 29, -12 }
+ }
+ },
+},
+{
+ /* 10BPP/8BPC */
+ { 410, 15, 5632, 3, 12, 11, 11, {
+ { 0, 3, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 2, 6, -2 },
+ { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+ { 3, 9, -8 }, { 3, 9, -10 }, { 5, 10, -10 }, { 5, 10, -10 },
+ { 5, 11, -12 }, { 7, 11, -12 }, { 11, 12, -12 }
+ }
+ },
+ /* 10BPP/10BPC */
+ { 410, 15, 5632, 7, 16, 15, 15, {
+ { 0, 7, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 6, 10, -2 },
+ { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+ { 7, 13, -8 }, { 7, 13, -10 }, { 9, 14, -10 }, { 9, 14, -10 },
+ { 9, 15, -12 }, { 11, 15, -12 }, { 15, 16, -12 }
+ }
+ },
+ /* 10BPP/12BPC */
+ { 410, 15, 5632, 11, 20, 19, 19, {
+ { 0, 11, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 10, 14, -2 },
+ { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+ { 11, 17, -8 }, { 11, 17, -10 }, { 13, 18, -10 },
+ { 13, 18, -10 }, { 13, 19, -12 }, { 15, 19, -12 },
+ { 19, 20, -12 }
+ }
+ },
+ /* 10BPP/14BPC */
+ { 410, 15, 5632, 15, 24, 23, 23, {
+ { 0, 11, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 13, 18, -2 },
+ { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+ { 15, 21, -8 }, { 15, 21, -10 }, { 17, 22, -10 },
+ { 17, 22, -10 }, { 17, 23, -12 }, { 19, 23, -12 },
+ { 23, 24, -12 }
+ }
+ },
+ /* 10BPP/16BPC */
+ { 410, 15, 5632, 19, 28, 27, 27, {
+ { 0, 11, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 16, 20, -2 },
+ { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+ { 19, 25, -8 }, { 19, 25, -10 }, { 21, 26, -10 },
+ { 21, 26, -10 }, { 21, 27, -12 }, { 23, 27, -12 },
+ { 27, 28, -12 }
+ }
+ },
+},
+{
+ /* 12BPP/8BPC */
+ { 341, 15, 2048, 3, 12, 11, 11, {
+ { 0, 2, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
+ { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+ { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 },
+ { 5, 12, -12 }, { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
+ }
+ },
+ /* 12BPP/10BPC */
+ { 341, 15, 2048, 7, 16, 15, 15, {
+ { 0, 2, 2 }, { 2, 5, 0 }, { 3, 7, 0 }, { 4, 8, -2 },
+ { 6, 9, -4 }, { 7, 10, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+ { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 },
+ { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
+ }
+ },
+ /* 12BPP/12BPC */
+ { 341, 15, 2048, 11, 20, 19, 19, {
+ { 0, 6, 2 }, { 4, 9, 0 }, { 7, 11, 0 }, { 8, 12, -2 },
+ { 10, 13, -4 }, { 11, 14, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+ { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 },
+ { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 },
+ { 21, 23, -12 }
+ }
+ },
+ /* 12BPP/14BPC */
+ { 341, 15, 2048, 15, 24, 23, 23, {
+ { 0, 6, 2 }, { 7, 10, 0 }, { 9, 13, 0 }, { 11, 16, -2 },
+ { 14, 17, -4 }, { 15, 18, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+ { 15, 20, -8 }, { 15, 21, -10 }, { 17, 21, -10 },
+ { 17, 21, -12 }, { 17, 21, -12 }, { 19, 22, -12 },
+ { 22, 23, -12 }
+ }
+ },
+ /* 12BPP/16BPC */
+ { 341, 15, 2048, 19, 28, 27, 27, {
+ { 0, 6, 2 }, { 6, 11, 0 }, { 11, 15, 0 }, { 14, 18, -2 },
+ { 18, 21, -4 }, { 19, 22, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+ { 19, 24, -8 }, { 19, 25, -10 }, { 21, 25, -10 },
+ { 21, 25, -12 }, { 21, 25, -12 }, { 23, 26, -12 },
+ { 26, 27, -12 }
+ }
+ },
+},
+{
+ /* 15BPP/8BPC */
+ { 273, 15, 2048, 3, 12, 11, 11, {
+ { 0, 0, 10 }, { 0, 1, 8 }, { 0, 1, 6 }, { 0, 2, 4 },
+ { 1, 2, 2 }, { 1, 3, 0 }, { 1, 3, -2 }, { 2, 4, -4 },
+ { 2, 5, -6 }, { 3, 5, -8 }, { 4, 6, -10 }, { 4, 7, -10 },
+ { 5, 7, -12 }, { 7, 8, -12 }, { 8, 9, -12 }
+ }
+ },
+ /* 15BPP/10BPC */
+ { 273, 15, 2048, 7, 16, 15, 15, {
+ { 0, 2, 10 }, { 2, 5, 8 }, { 3, 5, 6 }, { 4, 6, 4 },
+ { 5, 6, 2 }, { 5, 7, 0 }, { 5, 7, -2 }, { 6, 8, -4 },
+ { 6, 9, -6 }, { 7, 9, -8 }, { 8, 10, -10 }, { 8, 11, -10 },
+ { 9, 11, -12 }, { 11, 12, -12 }, { 12, 13, -12 }
+ }
+ },
+ /* 15BPP/12BPC */
+ { 273, 15, 2048, 11, 20, 19, 19, {
+ { 0, 4, 10 }, { 2, 7, 8 }, { 4, 9, 6 }, { 6, 11, 4 },
+ { 9, 11, 2 }, { 9, 11, 0 }, { 9, 12, -2 }, { 10, 12, -4 },
+ { 11, 13, -6 }, { 11, 13, -8 }, { 12, 14, -10 },
+ { 13, 15, -10 }, { 13, 15, -12 }, { 15, 16, -12 },
+ { 16, 17, -12 }
+ }
+ },
+ /* 15BPP/14BPC */
+ { 273, 15, 2048, 15, 24, 23, 23, {
+ { 0, 4, 10 }, { 3, 8, 8 }, { 6, 11, 6 }, { 9, 14, 4 },
+ { 13, 15, 2 }, { 13, 15, 0 }, { 13, 16, -2 }, { 14, 16, -4 },
+ { 15, 17, -6 }, { 15, 17, -8 }, { 16, 18, -10 },
+ { 17, 19, -10 }, { 17, 19, -12 }, { 19, 20, -12 },
+ { 20, 21, -12 }
+ }
+ },
+ /* 15BPP/16BPC */
+ { 273, 15, 2048, 19, 28, 27, 27, {
+ { 0, 4, 10 }, { 4, 9, 8 }, { 8, 13, 6 }, { 12, 17, 4 },
+ { 17, 19, 2 }, { 17, 20, 0 }, { 17, 20, -2 }, { 18, 20, -4 },
+ { 19, 21, -6 }, { 19, 21, -8 }, { 20, 22, -10 },
+ { 21, 23, -10 }, { 21, 23, -12 }, { 23, 24, -12 },
+ { 24, 25, -12 }
+ }
+ }
+}
+
+};
+
+static int get_row_index_for_rc_params(u16 compressed_bpp)
+{
+ switch (compressed_bpp) {
+ case 6:
+ return ROW_INDEX_6BPP;
+ case 8:
+ return ROW_INDEX_8BPP;
+ case 10:
+ return ROW_INDEX_10BPP;
+ case 12:
+ return ROW_INDEX_12BPP;
+ case 15:
+ return ROW_INDEX_15BPP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int get_column_index_for_rc_params(u8 bits_per_component)
+{
+ switch (bits_per_component) {
+ case 8:
+ return COLUMN_INDEX_8BPC;
+ case 10:
+ return COLUMN_INDEX_10BPC;
+ case 12:
+ return COLUMN_INDEX_12BPC;
+ case 14:
+ return COLUMN_INDEX_14BPC;
+ case 16:
+ return COLUMN_INDEX_16BPC;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int intel_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
+{
+ unsigned long groups_per_line = 0;
+ unsigned long groups_total = 0;
+ unsigned long num_extra_mux_bits = 0;
+ unsigned long slice_bits = 0;
+ unsigned long hrd_delay = 0;
+ unsigned long final_scale = 0;
+ unsigned long rbs_min = 0;
+
+ /* Number of groups used to code each line of a slice */
+ groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width,
+ DSC_RC_PIXELS_PER_GROUP);
+
+ /* chunksize in Bytes */
+ vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width *
+ vdsc_cfg->bits_per_pixel,
+ (8 * 16));
+
+ if (vdsc_cfg->convert_rgb)
+ num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size +
+ (4 * vdsc_cfg->bits_per_component + 4)
+ - 2);
+ else
+ num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size +
+ (4 * vdsc_cfg->bits_per_component + 4) +
+ 2 * (4 * vdsc_cfg->bits_per_component) - 2;
+ /* Number of bits in one Slice */
+ slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height;
+
+ while ((num_extra_mux_bits > 0) &&
+ ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size))
+ num_extra_mux_bits--;
+
+ if (groups_per_line < vdsc_cfg->initial_scale_value - 8)
+ vdsc_cfg->initial_scale_value = groups_per_line + 8;
+
+ /* scale_decrement_interval calculation according to DSC spec 1.11 */
+ if (vdsc_cfg->initial_scale_value > 8)
+ vdsc_cfg->scale_decrement_interval = groups_per_line /
+ (vdsc_cfg->initial_scale_value - 8);
+ else
+ vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX;
+
+ vdsc_cfg->final_offset = vdsc_cfg->rc_model_size -
+ (vdsc_cfg->initial_xmit_delay *
+ vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits;
+
+ if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) {
+ DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n");
+ return -ERANGE;
+ }
+
+ final_scale = (vdsc_cfg->rc_model_size * 8) /
+ (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset);
+ if (vdsc_cfg->slice_height > 1)
+ /*
+ * NflBpgOffset is 16 bit value with 11 fractional bits
+ * hence we multiply by 2^11 for preserving the
+ * fractional part
+ */
+ vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11),
+ (vdsc_cfg->slice_height - 1));
+ else
+ vdsc_cfg->nfl_bpg_offset = 0;
+
+ /* 2^16 - 1 */
+ if (vdsc_cfg->nfl_bpg_offset > 65535) {
+ DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
+ return -ERANGE;
+ }
+
+ /* Number of groups used to code the entire slice */
+ groups_total = groups_per_line * vdsc_cfg->slice_height;
+
+ /* slice_bpg_offset is 16 bit value with 11 fractional bits */
+ vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size -
+ vdsc_cfg->initial_offset +
+ num_extra_mux_bits) << 11),
+ groups_total);
+
+ if (final_scale > 9) {
+ /*
+ * ScaleIncrementInterval =
+ * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125))
+ * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value,
+ * we need divide by 2^11 from pstDscCfg values
+ */
+ vdsc_cfg->scale_increment_interval =
+ (vdsc_cfg->final_offset * (1 << 11)) /
+ ((vdsc_cfg->nfl_bpg_offset +
+ vdsc_cfg->slice_bpg_offset) *
+ (final_scale - 9));
+ } else {
+ /*
+ * If finalScaleValue is less than or equal to 9, a value of 0 should
+ * be used to disable the scale increment at the end of the slice
+ */
+ vdsc_cfg->scale_increment_interval = 0;
+ }
+
+ if (vdsc_cfg->scale_increment_interval > 65535) {
+ DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
+ return -ERANGE;
+ }
+
+ /*
+ * DSC spec mentions that bits_per_pixel specifies the target
+ * bits/pixel (bpp) rate that is used by the encoder,
+ * in steps of 1/16 of a bit per pixel
+ */
+ rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset +
+ DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay *
+ vdsc_cfg->bits_per_pixel, 16) +
+ groups_per_line * vdsc_cfg->first_line_bpg_offset;
+
+ hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel);
+ vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
+ vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
+
+ return 0;
+}
+
+int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_dsc_config *vdsc_cfg = &pipe_config->dp_dsc_cfg;
+ u16 compressed_bpp = pipe_config->dsc_params.compressed_bpp;
+ u8 i = 0;
+ int row_index = 0;
+ int column_index = 0;
+ u8 line_buf_depth = 0;
+
+ vdsc_cfg->pic_width = pipe_config->base.adjusted_mode.crtc_hdisplay;
+ vdsc_cfg->pic_height = pipe_config->base.adjusted_mode.crtc_vdisplay;
+ vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
+ pipe_config->dsc_params.slice_count);
+ /*
+ * Slice Height of 8 works for all currently available panels. So start
+ * with that if pic_height is an integral multiple of 8.
+ * Eventually add logic to try multiple slice heights.
+ */
+ if (vdsc_cfg->pic_height % 8 == 0)
+ vdsc_cfg->slice_height = 8;
+ else if (vdsc_cfg->pic_height % 4 == 0)
+ vdsc_cfg->slice_height = 4;
+ else
+ vdsc_cfg->slice_height = 2;
+
+ /* Values filled from DSC Sink DPCD */
+ vdsc_cfg->dsc_version_major =
+ (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+ DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
+ vdsc_cfg->dsc_version_minor =
+ min(DSC_SUPPORTED_VERSION_MIN,
+ (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+ DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
+
+ vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
+ DP_DSC_RGB;
+
+ line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
+ if (!line_buf_depth) {
+ DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
+ return -EINVAL;
+ }
+ if (vdsc_cfg->dsc_version_minor == 2)
+ vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
+ DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
+ else
+ vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
+ DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
+
+ /* Gen 11 does not support YCbCr */
+ vdsc_cfg->enable422 = false;
+ /* Gen 11 does not support VBR */
+ vdsc_cfg->vbr_enable = false;
+ vdsc_cfg->block_pred_enable =
+ intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
+ DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
+
+ /* Gen 11 only supports integral values of bpp */
+ vdsc_cfg->bits_per_pixel = compressed_bpp << 4;
+ vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3;
+
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
+ /*
+ * six 0s are appended to the lsb of each threshold value
+ * internally in h/w.
+ * Only 8 bits are allowed for programming RcBufThreshold
+ */
+ vdsc_cfg->rc_buf_thresh[i] = rc_buf_thresh[i] >> 6;
+ }
+
+ /*
+ * For 6bpp, RC Buffer threshold 12 and 13 need a different value
+ * as per C Model
+ */
+ if (compressed_bpp == 6) {
+ vdsc_cfg->rc_buf_thresh[12] = 0x7C;
+ vdsc_cfg->rc_buf_thresh[13] = 0x7D;
+ }
+
+ row_index = get_row_index_for_rc_params(compressed_bpp);
+ column_index =
+ get_column_index_for_rc_params(vdsc_cfg->bits_per_component);
+
+ if (row_index < 0 || column_index < 0)
+ return -EINVAL;
+
+ vdsc_cfg->first_line_bpg_offset =
+ rc_params[row_index][column_index].first_line_bpg_offset;
+ vdsc_cfg->initial_xmit_delay =
+ rc_params[row_index][column_index].initial_xmit_delay;
+ vdsc_cfg->initial_offset =
+ rc_params[row_index][column_index].initial_offset;
+ vdsc_cfg->flatness_min_qp =
+ rc_params[row_index][column_index].flatness_min_qp;
+ vdsc_cfg->flatness_max_qp =
+ rc_params[row_index][column_index].flatness_max_qp;
+ vdsc_cfg->rc_quant_incr_limit0 =
+ rc_params[row_index][column_index].rc_quant_incr_limit0;
+ vdsc_cfg->rc_quant_incr_limit1 =
+ rc_params[row_index][column_index].rc_quant_incr_limit1;
+
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ vdsc_cfg->rc_range_params[i].range_min_qp =
+ rc_params[row_index][column_index].rc_range_params[i].range_min_qp;
+ vdsc_cfg->rc_range_params[i].range_max_qp =
+ rc_params[row_index][column_index].rc_range_params[i].range_max_qp;
+ /*
+ * Range BPG Offset uses 2's complement and is only a 6 bits. So
+ * mask it to get only 6 bits.
+ */
+ vdsc_cfg->rc_range_params[i].range_bpg_offset =
+ rc_params[row_index][column_index].rc_range_params[i].range_bpg_offset &
+ DSC_RANGE_BPG_OFFSET_MASK;
+ }
+
+ /*
+ * BitsPerComponent value determines mux_word_size:
+ * When BitsPerComponent is 12bpc, muxWordSize will be equal to 64 bits
+ * When BitsPerComponent is 8 or 10bpc, muxWordSize will be equal to
+ * 48 bits
+ */
+ if (vdsc_cfg->bits_per_component == 8 ||
+ vdsc_cfg->bits_per_component == 10)
+ vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
+ else if (vdsc_cfg->bits_per_component == 12)
+ vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
+
+ /* RC_MODEL_SIZE is a constant across all configurations */
+ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+ /* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */
+ vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
+ (vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
+
+ return intel_compute_rc_parameters(vdsc_cfg);
+}
+
+enum intel_display_power_domain
+intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
+{
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ /*
+ * On ICL VDSC/joining for eDP transcoder uses a separate power well PW2
+ * This requires POWER_DOMAIN_TRANSCODER_EDP_VDSC power domain.
+ * For any other transcoder, VDSC/joining uses the power well associated
+ * with the pipe/transcoder in use. Hence another reference on the
+ * transcoder power domain will suffice.
+ */
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return POWER_DOMAIN_TRANSCODER_EDP_VDSC;
+ else
+ return POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+}
+
+static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+ enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 pps_val = 0;
+ u32 rc_buf_thresh_dword[4];
+ u32 rc_range_params_dword[8];
+ u8 num_vdsc_instances = (crtc_state->dsc_params.dsc_split) ? 2 : 1;
+ int i = 0;
+
+ /* Populate PICTURE_PARAMETER_SET_0 registers */
+ pps_val = DSC_VER_MAJ | vdsc_cfg->dsc_version_minor <<
+ DSC_VER_MIN_SHIFT |
+ vdsc_cfg->bits_per_component << DSC_BPC_SHIFT |
+ vdsc_cfg->line_buf_depth << DSC_LINE_BUF_DEPTH_SHIFT;
+ if (vdsc_cfg->block_pred_enable)
+ pps_val |= DSC_BLOCK_PREDICTION;
+ if (vdsc_cfg->convert_rgb)
+ pps_val |= DSC_COLOR_SPACE_CONVERSION;
+ if (vdsc_cfg->enable422)
+ pps_val |= DSC_422_ENABLE;
+ if (vdsc_cfg->vbr_enable)
+ pps_val |= DSC_VBR_ENABLE;
+ DRM_INFO("PPS0 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_0, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_1 registers */
+ pps_val = 0;
+ pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
+ DRM_INFO("PPS1 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_1, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_2 registers */
+ pps_val = 0;
+ pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
+ DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
+ DRM_INFO("PPS2 = 0x%08x\n", pps_val);
+ if (encoder->type == INTEL_OUTPUT_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_3 registers */
+ pps_val = 0;
+ pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
+ DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
+ DRM_INFO("PPS3 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_3, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_4 registers */
+ pps_val = 0;
+ pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
+ DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
+ DRM_INFO("PPS4 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_4, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_5 registers */
+ pps_val = 0;
+ pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
+ DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
+ DRM_INFO("PPS5 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_5, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_6 registers */
+ pps_val = 0;
+ pps_val |= DSC_INITIAL_SCALE_VALUE(vdsc_cfg->initial_scale_value) |
+ DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) |
+ DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
+ DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
+ DRM_INFO("PPS6 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_6, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_7 registers */
+ pps_val = 0;
+ pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
+ DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
+ DRM_INFO("PPS7 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_7, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_8 registers */
+ pps_val = 0;
+ pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
+ DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
+ DRM_INFO("PPS8 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_8, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_9 registers */
+ pps_val = 0;
+ pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
+ DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
+ DRM_INFO("PPS9 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_9, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_10 registers */
+ pps_val = 0;
+ pps_val |= DSC_RC_QUANT_INC_LIMIT0(vdsc_cfg->rc_quant_incr_limit0) |
+ DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) |
+ DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
+ DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
+ DRM_INFO("PPS10 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_10, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
+ pps_val);
+ }
+
+ /* Populate Picture parameter set 16 */
+ pps_val = 0;
+ pps_val |= DSC_SLICE_CHUNK_SIZE(vdsc_cfg->slice_chunk_size) |
+ DSC_SLICE_PER_LINE((vdsc_cfg->pic_width / num_vdsc_instances) /
+ vdsc_cfg->slice_width) |
+ DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
+ vdsc_cfg->slice_height);
+ DRM_INFO("PPS16 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_16, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
+ pps_val);
+ }
+
+ /* Populate the RC_BUF_THRESH registers */
+ memset(rc_buf_thresh_dword, 0, sizeof(rc_buf_thresh_dword));
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
+ rc_buf_thresh_dword[i / 4] |=
+ (u32)(vdsc_cfg->rc_buf_thresh[i] <<
+ BITS_PER_BYTE * (i % 4));
+ DRM_INFO(" RC_BUF_THRESH%d = 0x%08x\n", i,
+ rc_buf_thresh_dword[i / 4]);
+ }
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]);
+ I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]);
+ I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]);
+ I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(DSCC_RC_BUF_THRESH_0,
+ rc_buf_thresh_dword[0]);
+ I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW,
+ rc_buf_thresh_dword[1]);
+ I915_WRITE(DSCC_RC_BUF_THRESH_1,
+ rc_buf_thresh_dword[2]);
+ I915_WRITE(DSCC_RC_BUF_THRESH_1_UDW,
+ rc_buf_thresh_dword[3]);
+ }
+ } else {
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0(pipe),
+ rc_buf_thresh_dword[0]);
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
+ rc_buf_thresh_dword[1]);
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1(pipe),
+ rc_buf_thresh_dword[2]);
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
+ rc_buf_thresh_dword[3]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe),
+ rc_buf_thresh_dword[0]);
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
+ rc_buf_thresh_dword[1]);
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1(pipe),
+ rc_buf_thresh_dword[2]);
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe),
+ rc_buf_thresh_dword[3]);
+ }
+ }
+
+ /* Populate the RC_RANGE_PARAMETERS registers */
+ memset(rc_range_params_dword, 0, sizeof(rc_range_params_dword));
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ rc_range_params_dword[i / 2] |=
+ (u32)(((vdsc_cfg->rc_range_params[i].range_bpg_offset <<
+ RC_BPG_OFFSET_SHIFT) |
+ (vdsc_cfg->rc_range_params[i].range_max_qp <<
+ RC_MAX_QP_SHIFT) |
+ (vdsc_cfg->rc_range_params[i].range_min_qp <<
+ RC_MIN_QP_SHIFT)) << 16 * (i % 2));
+ DRM_INFO(" RC_RANGE_PARAM_%d = 0x%08x\n", i,
+ rc_range_params_dword[i / 2]);
+ }
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0,
+ rc_range_params_dword[0]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0_UDW,
+ rc_range_params_dword[1]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1,
+ rc_range_params_dword[2]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1_UDW,
+ rc_range_params_dword[3]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2,
+ rc_range_params_dword[4]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2_UDW,
+ rc_range_params_dword[5]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3,
+ rc_range_params_dword[6]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW,
+ rc_range_params_dword[7]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0,
+ rc_range_params_dword[0]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW,
+ rc_range_params_dword[1]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1,
+ rc_range_params_dword[2]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1_UDW,
+ rc_range_params_dword[3]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2,
+ rc_range_params_dword[4]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2_UDW,
+ rc_range_params_dword[5]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3,
+ rc_range_params_dword[6]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3_UDW,
+ rc_range_params_dword[7]);
+ }
+ } else {
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
+ rc_range_params_dword[0]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe),
+ rc_range_params_dword[1]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
+ rc_range_params_dword[2]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe),
+ rc_range_params_dword[3]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
+ rc_range_params_dword[4]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe),
+ rc_range_params_dword[5]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
+ rc_range_params_dword[6]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
+ rc_range_params_dword[7]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
+ rc_range_params_dword[0]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
+ rc_range_params_dword[1]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe),
+ rc_range_params_dword[2]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe),
+ rc_range_params_dword[3]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe),
+ rc_range_params_dword[4]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe),
+ rc_range_params_dword[5]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe),
+ rc_range_params_dword[6]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe),
+ rc_range_params_dword[7]);
+ }
+ }
+}
+
+static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+ struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
+
+ /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */
+ drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp);
+
+ /* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */
+ drm_dsc_pps_infoframe_pack(&dp_dsc_pps_sdp, vdsc_cfg);
+
+ intel_dig_port->write_infoframe(encoder, crtc_state,
+ DP_SDP_PPS, &dp_dsc_pps_sdp,
+ sizeof(dp_dsc_pps_sdp));
+}
+
+void intel_dsc_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
+ u32 dss_ctl1_val = 0;
+ u32 dss_ctl2_val = 0;
+
+ if (!crtc_state->dsc_params.compression_enable)
+ return;
+
+ /* Enable Power wells for VDSC/joining */
+ intel_display_power_get(dev_priv,
+ intel_dsc_power_domain(crtc_state));
+
+ intel_configure_pps_for_dsc_encoder(encoder, crtc_state);
+
+ intel_dp_write_dsc_pps_sdp(encoder, crtc_state);
+
+ if (crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+ dss_ctl1_reg = DSS_CTL1;
+ dss_ctl2_reg = DSS_CTL2;
+ } else {
+ dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe);
+ dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
+ }
+ dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE;
+ if (crtc_state->dsc_params.dsc_split) {
+ dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
+ dss_ctl1_val |= JOINER_ENABLE;
+ }
+ I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
+ I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+}
+
+void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
+ u32 dss_ctl1_val = 0, dss_ctl2_val = 0;
+
+ if (!old_crtc_state->dsc_params.compression_enable)
+ return;
+
+ if (old_crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+ dss_ctl1_reg = DSS_CTL1;
+ dss_ctl2_reg = DSS_CTL2;
+ } else {
+ dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe);
+ dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
+ }
+ dss_ctl1_val = I915_READ(dss_ctl1_reg);
+ if (dss_ctl1_val & JOINER_ENABLE)
+ dss_ctl1_val &= ~JOINER_ENABLE;
+ I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
+
+ dss_ctl2_val = I915_READ(dss_ctl2_reg);
+ if (dss_ctl2_val & LEFT_BRANCH_VDSC_ENABLE ||
+ dss_ctl2_val & RIGHT_BRANCH_VDSC_ENABLE)
+ dss_ctl2_val &= ~(LEFT_BRANCH_VDSC_ENABLE |
+ RIGHT_BRANCH_VDSC_ENABLE);
+ I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+
+ /* Disable Power wells for VDSC/joining */
+ intel_display_power_put(dev_priv,
+ intel_dsc_power_domain(old_crtc_state));
+}
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index ca1f78a42b17..4f41e326f3f3 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -48,58 +48,112 @@
* - Public functions to init or apply the given workaround type.
*/
-static void wa_add(struct drm_i915_private *i915,
- i915_reg_t reg, const u32 mask, const u32 val)
+static void wa_init_start(struct i915_wa_list *wal, const char *name)
{
- struct i915_workarounds *wa = &i915->workarounds;
- unsigned int start = 0, end = wa->count;
- unsigned int addr = i915_mmio_reg_offset(reg);
- struct i915_wa_reg *r;
+ wal->name = name;
+}
+
+#define WA_LIST_CHUNK (1 << 4)
+
+static void wa_init_finish(struct i915_wa_list *wal)
+{
+ /* Trim unused entries. */
+ if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
+ struct i915_wa *list = kmemdup(wal->list,
+ wal->count * sizeof(*list),
+ GFP_KERNEL);
+
+ if (list) {
+ kfree(wal->list);
+ wal->list = list;
+ }
+ }
+
+ if (!wal->count)
+ return;
+
+ DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
+ wal->wa_count, wal->name);
+}
+
+static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
+{
+ unsigned int addr = i915_mmio_reg_offset(wa->reg);
+ unsigned int start = 0, end = wal->count;
+ const unsigned int grow = WA_LIST_CHUNK;
+ struct i915_wa *wa_;
+
+ GEM_BUG_ON(!is_power_of_2(grow));
+
+ if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
+ struct i915_wa *list;
+
+ list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
+ GFP_KERNEL);
+ if (!list) {
+ DRM_ERROR("No space for workaround init!\n");
+ return;
+ }
+
+ if (wal->list)
+ memcpy(list, wal->list, sizeof(*wa) * wal->count);
+
+ wal->list = list;
+ }
while (start < end) {
unsigned int mid = start + (end - start) / 2;
- if (wa->reg[mid].addr < addr) {
+ if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
start = mid + 1;
- } else if (wa->reg[mid].addr > addr) {
+ } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
end = mid;
} else {
- r = &wa->reg[mid];
+ wa_ = &wal->list[mid];
- if ((mask & ~r->mask) == 0) {
+ if ((wa->mask & ~wa_->mask) == 0) {
DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
- addr, r->mask, r->value);
+ i915_mmio_reg_offset(wa_->reg),
+ wa_->mask, wa_->val);
- r->value &= ~mask;
+ wa_->val &= ~wa->mask;
}
- r->value |= val;
- r->mask |= mask;
+ wal->wa_count++;
+ wa_->val |= wa->val;
+ wa_->mask |= wa->mask;
return;
}
}
- if (WARN_ON_ONCE(wa->count >= I915_MAX_WA_REGS)) {
- DRM_ERROR("Dropping w/a for reg %04x (mask: %08x, value: %08x)\n",
- addr, mask, val);
- return;
- }
-
- r = &wa->reg[wa->count++];
- r->addr = addr;
- r->value = val;
- r->mask = mask;
+ wal->wa_count++;
+ wa_ = &wal->list[wal->count++];
+ *wa_ = *wa;
- while (r-- > wa->reg) {
- GEM_BUG_ON(r[0].addr == r[1].addr);
- if (r[1].addr > r[0].addr)
+ while (wa_-- > wal->list) {
+ GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
+ i915_mmio_reg_offset(wa_[1].reg));
+ if (i915_mmio_reg_offset(wa_[1].reg) >
+ i915_mmio_reg_offset(wa_[0].reg))
break;
- swap(r[1], r[0]);
+ swap(wa_[1], wa_[0]);
}
}
-#define WA_REG(addr, mask, val) wa_add(dev_priv, (addr), (mask), (val))
+static void
+__wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
+{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = mask,
+ .val = val
+ };
+
+ _wa_add(wal, &wa);
+}
+
+#define WA_REG(addr, mask, val) __wa_add(wal, (addr), (mask), (val))
#define WA_SET_BIT_MASKED(addr, mask) \
WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
@@ -110,8 +164,10 @@ static void wa_add(struct drm_i915_private *i915,
#define WA_SET_FIELD_MASKED(addr, mask, value) \
WA_REG(addr, (mask), _MASKED_FIELD(mask, value))
-static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine)
{
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
/* WaDisableAsyncFlipPerfMode:bdw,chv */
@@ -155,17 +211,14 @@ static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv)
WA_SET_FIELD_MASKED(GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
-
- return 0;
}
-static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen8_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen8_ctx_workarounds_init(engine);
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
@@ -185,31 +238,28 @@ static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaForceContextSaveRestoreNonCoherent:bdw */
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
- (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
-
- return 0;
+ (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
}
-static int chv_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void chv_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen8_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen8_ctx_workarounds_init(engine);
/* WaDisableThreadStallDopClockGating:chv */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* Improve HiZ throughput on CHV. */
WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
-
- return 0;
}
-static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- if (HAS_LLC(dev_priv)) {
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
+ if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
*
* Must match Display Engine. See
@@ -228,7 +278,7 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* Syncing dependencies between camera and graphics:skl,bxt,kbl */
- if (!IS_COFFEELAKE(dev_priv))
+ if (!IS_COFFEELAKE(i915))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
@@ -271,9 +321,7 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
HDC_FORCE_NON_COHERENT);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
- if (IS_SKYLAKE(dev_priv) ||
- IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv))
+ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
@@ -300,14 +348,14 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEN9_LP(i915))
WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
-
- return 0;
}
-static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
+static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
@@ -318,7 +366,7 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
* Only consider slices where one, and only one, subslice has 7
* EUs
*/
- if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
+ if (!is_power_of_2(INTEL_INFO(i915)->sseu.subslice_7eu[i]))
continue;
/*
@@ -327,12 +375,12 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
*
* -> 0 <= ss <= 3;
*/
- ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
+ ss = ffs(INTEL_INFO(i915)->sseu.subslice_7eu[i]) - 1;
vals[i] = 3 - ss;
}
if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
- return 0;
+ return;
/* Tune IZ hashing. See intel_device_info_runtime_init() */
WA_SET_FIELD_MASKED(GEN7_GT_MODE,
@@ -342,28 +390,19 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
GEN9_IZ_HASHING(2, vals[2]) |
GEN9_IZ_HASHING(1, vals[1]) |
GEN9_IZ_HASHING(0, vals[0]));
-
- return 0;
}
-static int skl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void skl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
-
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
-
- return skl_tune_iz_hashing(dev_priv);
+ gen9_ctx_workarounds_init(engine);
+ skl_tune_iz_hashing(engine);
}
-static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen9_ctx_workarounds_init(engine);
/* WaDisableThreadStallDopClockGating:bxt */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -372,57 +411,41 @@ static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaToEnableHwFixForPushConstHWBug:bxt */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
- return 0;
}
-static int kbl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
-
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FENCE_DEST_SLM_DISABLE);
+ gen9_ctx_workarounds_init(engine);
/* WaToEnableHwFixForPushConstHWBug:kbl */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
+ if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableSbeCacheDispatchPortSharing:kbl */
WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
- return 0;
}
-static int glk_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void glk_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen9_ctx_workarounds_init(engine);
/* WaToEnableHwFixForPushConstHWBug:glk */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
- return 0;
}
-static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen9_ctx_workarounds_init(engine);
/* WaToEnableHwFixForPushConstHWBug:cfl */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
@@ -431,18 +454,19 @@ static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaDisableSbeCacheDispatchPortSharing:cfl */
WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
- return 0;
}
-static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
/* WaForceContextSaveRestoreNonCoherent:cnl */
WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
/* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
+ if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
@@ -450,7 +474,7 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
+ if (IS_CNL_REVID(i915, 0, CNL_REVID_B0))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
@@ -470,16 +494,17 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaDisableEarlyEOT:cnl */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
-
- return 0;
}
-static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
/* Wa_1604370585:icl (pre-prod)
* Formerly known as WaPushConstantDereferenceHoldDisable
*/
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
PUSH_CONSTANT_DEREF_DISABLE);
@@ -495,7 +520,7 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* Wa_2006611047:icl (pre-prod)
* Formerly known as WaDisableImprovedTdlClkGating
*/
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
@@ -504,70 +529,67 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
GEN11_STATE_CACHE_REDIRECT_TO_CS);
/* Wa_2006665173:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
-
- return 0;
}
-int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
{
- int err = 0;
-
- dev_priv->workarounds.count = 0;
-
- if (INTEL_GEN(dev_priv) < 8)
- err = 0;
- else if (IS_BROADWELL(dev_priv))
- err = bdw_ctx_workarounds_init(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- err = chv_ctx_workarounds_init(dev_priv);
- else if (IS_SKYLAKE(dev_priv))
- err = skl_ctx_workarounds_init(dev_priv);
- else if (IS_BROXTON(dev_priv))
- err = bxt_ctx_workarounds_init(dev_priv);
- else if (IS_KABYLAKE(dev_priv))
- err = kbl_ctx_workarounds_init(dev_priv);
- else if (IS_GEMINILAKE(dev_priv))
- err = glk_ctx_workarounds_init(dev_priv);
- else if (IS_COFFEELAKE(dev_priv))
- err = cfl_ctx_workarounds_init(dev_priv);
- else if (IS_CANNONLAKE(dev_priv))
- err = cnl_ctx_workarounds_init(dev_priv);
- else if (IS_ICELAKE(dev_priv))
- err = icl_ctx_workarounds_init(dev_priv);
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
+ wa_init_start(wal, "context");
+
+ if (INTEL_GEN(i915) < 8)
+ return;
+ else if (IS_BROADWELL(i915))
+ bdw_ctx_workarounds_init(engine);
+ else if (IS_CHERRYVIEW(i915))
+ chv_ctx_workarounds_init(engine);
+ else if (IS_SKYLAKE(i915))
+ skl_ctx_workarounds_init(engine);
+ else if (IS_BROXTON(i915))
+ bxt_ctx_workarounds_init(engine);
+ else if (IS_KABYLAKE(i915))
+ kbl_ctx_workarounds_init(engine);
+ else if (IS_GEMINILAKE(i915))
+ glk_ctx_workarounds_init(engine);
+ else if (IS_COFFEELAKE(i915))
+ cfl_ctx_workarounds_init(engine);
+ else if (IS_CANNONLAKE(i915))
+ cnl_ctx_workarounds_init(engine);
+ else if (IS_ICELAKE(i915))
+ icl_ctx_workarounds_init(engine);
else
- MISSING_CASE(INTEL_GEN(dev_priv));
- if (err)
- return err;
+ MISSING_CASE(INTEL_GEN(i915));
- DRM_DEBUG_DRIVER("Number of context specific w/a: %d\n",
- dev_priv->workarounds.count);
- return 0;
+ wa_init_finish(wal);
}
-int intel_ctx_workarounds_emit(struct i915_request *rq)
+int intel_engine_emit_ctx_wa(struct i915_request *rq)
{
- struct i915_workarounds *w = &rq->i915->workarounds;
+ struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
+ struct i915_wa *wa;
+ unsigned int i;
u32 *cs;
- int ret, i;
+ int ret;
- if (w->count == 0)
+ if (wal->count == 0)
return 0;
ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
if (ret)
return ret;
- cs = intel_ring_begin(rq, (w->count * 2 + 2));
+ cs = intel_ring_begin(rq, (wal->count * 2 + 2));
if (IS_ERR(cs))
return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(w->count);
- for (i = 0; i < w->count; i++) {
- *cs++ = w->reg[i].addr;
- *cs++ = w->reg[i].value;
+ *cs++ = MI_LOAD_REGISTER_IMM(wal->count);
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ *cs++ = i915_mmio_reg_offset(wa->reg);
+ *cs++ = wa->val;
}
*cs++ = MI_NOOP;
@@ -580,160 +602,149 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
return 0;
}
-static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = val,
+ .val = _MASKED_BIT_ENABLE(val)
+ };
+
+ _wa_add(wal, &wa);
+}
+
+static void
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+ u32 val)
{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = mask,
+ .val = val
+ };
+
+ _wa_add(wal, &wa);
}
-static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
+ wa_write_masked_or(wal, reg, ~0, val);
}
-static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
- /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
- I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
- _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+ wa_write_masked_or(wal, reg, val, val);
+}
- /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
- I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
- GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
+{
+ struct i915_wa_list *wal = &i915->gt_wa_list;
/* WaDisableKillLogic:bxt,skl,kbl */
- if (!IS_COFFEELAKE(dev_priv))
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- ECOCHK_DIS_TLB);
+ if (!IS_COFFEELAKE(i915))
+ wa_write_or(wal,
+ GAM_ECOCHK,
+ ECOCHK_DIS_TLB);
- if (HAS_LLC(dev_priv)) {
+ if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
*
* Must match Display Engine. See
* WaCompressedResourceDisplayNewHashMode.
*/
- I915_WRITE(MMCD_MISC_CTRL,
- I915_READ(MMCD_MISC_CTRL) |
- MMCD_PCLA |
- MMCD_HOTSPOT_EN);
+ wa_write_or(wal,
+ MMCD_MISC_CTRL,
+ MMCD_PCLA | MMCD_HOTSPOT_EN);
}
/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- BDW_DISABLE_HDC_INVALIDATION);
-
- /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
- if (IS_GEN9_LP(dev_priv)) {
- u32 val = I915_READ(GEN8_L3SQCREG1);
-
- val &= ~L3_PRIO_CREDITS_MASK;
- val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
- I915_WRITE(GEN8_L3SQCREG1, val);
- }
-
- /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
- I915_WRITE(GEN8_L3SQCREG4,
- I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES);
-
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+ wa_write_or(wal,
+ GAM_ECOCHK,
+ BDW_DISABLE_HDC_INVALIDATION);
}
-static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void skl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:skl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableGafsUnitClkGating:skl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaDisablePooledEuLoadBalancingFix:bxt */
- I915_WRITE(FF_SLICE_CS_CHICKEN2,
- _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
+ gen9_gt_workarounds_init(i915);
/* WaInPlaceDecompressionHang:bxt */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:kbl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableDynamicCreditSharing:kbl */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+ if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
/* WaDisableGafsUnitClkGating:kbl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:kbl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaKBLVECSSemaphoreWaitPoll:kbl */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) {
- struct intel_engine_cs *engine;
- unsigned int tmp;
-
- for_each_engine(engine, dev_priv, tmp) {
- if (engine->id == RCS)
- continue;
-
- I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1);
- }
- }
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void glk_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ gen9_gt_workarounds_init(i915);
}
-static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:cfl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableGafsUnitClkGating:cfl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:cfl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void wa_init_mcr(struct drm_i915_private *dev_priv)
{
const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
- u32 mcr;
+ struct i915_wa_list *wal = &dev_priv->gt_wa_list;
u32 mcr_slice_subslice_mask;
/*
@@ -770,8 +781,6 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
}
- mcr = I915_READ(GEN8_MCR_SELECTOR);
-
if (INTEL_GEN(dev_priv) >= 11)
mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
GEN11_MCR_SUBSLICE_MASK;
@@ -789,186 +798,220 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
* occasions, such as INSTDONE, where this value is dependent
* on s/ss combo, the read should be done with read_subslice_reg.
*/
- mcr &= ~mcr_slice_subslice_mask;
- mcr |= intel_calculate_mcr_s_ss_select(dev_priv);
- I915_WRITE(GEN8_MCR_SELECTOR, mcr);
+ wa_write_masked_or(wal,
+ GEN8_MCR_SELECTOR,
+ mcr_slice_subslice_mask,
+ intel_calculate_mcr_s_ss_select(dev_priv));
}
-static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
{
- wa_init_mcr(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
+
+ wa_init_mcr(i915);
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
+ if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
/* WaInPlaceDecompressionHang:cnl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaEnablePreemptionGranularityControlByUMD:cnl */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void icl_gt_workarounds_init(struct drm_i915_private *i915)
{
- wa_init_mcr(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* This is not an Wa. Enable for better image quality */
- I915_WRITE(_3D_CHICKEN3,
- _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
+ wa_init_mcr(i915);
/* WaInPlaceDecompressionHang:icl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaPipelineFlushCoherentLines:icl */
- I915_WRITE(GEN8_L3SQCREG4,
- I915_READ(GEN8_L3SQCREG4) |
- GEN8_LQSC_FLUSH_COHERENT_LINES);
-
- /* Wa_1405543622:icl
- * Formerly known as WaGAPZPriorityScheme
- */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) |
- GEN11_ARBITRATION_PRIO_ORDER_MASK);
-
- /* Wa_1604223664:icl
- * Formerly known as WaL3BankAddressHashing
- */
- I915_WRITE(GEN8_GARBCNTL,
- (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) |
- GEN11_HASH_CTRL_EXCL_BIT0);
- I915_WRITE(GEN11_GLBLINVL,
- (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) |
- GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaModifyGamTlbPartitioning:icl */
- I915_WRITE(GEN11_GACB_PERF_CTRL,
- (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) |
- GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
-
- /* Wa_1405733216:icl
- * Formerly known as WaDisableCleanEvicts
- */
- I915_WRITE(GEN8_L3SQCREG4,
- I915_READ(GEN8_L3SQCREG4) |
- GEN11_LQSC_CLEAN_EVICT_DISABLE);
+ wa_write_masked_or(wal,
+ GEN11_GACB_PERF_CTRL,
+ GEN11_HASH_CTRL_MASK,
+ GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
/* Wa_1405766107:icl
* Formerly known as WaCL2SFHalfMaxAlloc
*/
- I915_WRITE(GEN11_LSN_UNSLCVC,
- I915_READ(GEN11_LSN_UNSLCVC) |
- GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
- GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
+ wa_write_or(wal,
+ GEN11_LSN_UNSLCVC,
+ GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
+ GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
/* Wa_220166154:icl
* Formerly known as WaDisCtxReload
*/
- I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
- I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
- GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
+ wa_write_or(wal,
+ GEN8_GAMW_ECO_DEV_RW_IA,
+ GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
/* Wa_1405779004:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
- I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
- I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
- MSCUNIT_CLKGATE_DIS);
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ MSCUNIT_CLKGATE_DIS);
/* Wa_1406680159:icl */
- I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE,
- I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) |
- GWUNIT_CLKGATE_DIS);
-
- /* Wa_1604302699:icl */
- I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER,
- I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) |
- GEN11_I2M_WRITE_DISABLE);
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE,
+ GWUNIT_CLKGATE_DIS);
/* Wa_1406838659:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
- I915_WRITE(INF_UNIT_LEVEL_CLKGATE,
- I915_READ(INF_UNIT_LEVEL_CLKGATE) |
- CGPSF_CLKGATE_DIS);
-
- /* WaForwardProgressSoftReset:icl */
- I915_WRITE(GEN10_SCRATCH_LNCF2,
- I915_READ(GEN10_SCRATCH_LNCF2) |
- PMFLUSHDONE_LNICRSDROP |
- PMFLUSH_GAPL3UNBLOCK |
- PMFLUSHDONE_LNEBLK);
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+ wa_write_or(wal,
+ INF_UNIT_LEVEL_CLKGATE,
+ CGPSF_CLKGATE_DIS);
/* Wa_1406463099:icl
* Formerly known as WaGamTlbPendError
*/
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_L3_COH_PIPE);
-
- /* Wa_1406609255:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
- I915_WRITE(GEN7_SARCHKMD,
- I915_READ(GEN7_SARCHKMD) |
- GEN7_DISABLE_DEMAND_PREFETCH |
- GEN7_DISABLE_SAMPLER_PREFETCH);
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_L3_COH_PIPE);
}
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+void intel_gt_init_workarounds(struct drm_i915_private *i915)
{
- if (INTEL_GEN(dev_priv) < 8)
+ struct i915_wa_list *wal = &i915->gt_wa_list;
+
+ wa_init_start(wal, "GT");
+
+ if (INTEL_GEN(i915) < 8)
+ return;
+ else if (IS_BROADWELL(i915))
return;
- else if (IS_BROADWELL(dev_priv))
- bdw_gt_workarounds_apply(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- chv_gt_workarounds_apply(dev_priv);
- else if (IS_SKYLAKE(dev_priv))
- skl_gt_workarounds_apply(dev_priv);
- else if (IS_BROXTON(dev_priv))
- bxt_gt_workarounds_apply(dev_priv);
- else if (IS_KABYLAKE(dev_priv))
- kbl_gt_workarounds_apply(dev_priv);
- else if (IS_GEMINILAKE(dev_priv))
- glk_gt_workarounds_apply(dev_priv);
- else if (IS_COFFEELAKE(dev_priv))
- cfl_gt_workarounds_apply(dev_priv);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_gt_workarounds_apply(dev_priv);
- else if (IS_ICELAKE(dev_priv))
- icl_gt_workarounds_apply(dev_priv);
+ else if (IS_CHERRYVIEW(i915))
+ return;
+ else if (IS_SKYLAKE(i915))
+ skl_gt_workarounds_init(i915);
+ else if (IS_BROXTON(i915))
+ bxt_gt_workarounds_init(i915);
+ else if (IS_KABYLAKE(i915))
+ kbl_gt_workarounds_init(i915);
+ else if (IS_GEMINILAKE(i915))
+ glk_gt_workarounds_init(i915);
+ else if (IS_COFFEELAKE(i915))
+ cfl_gt_workarounds_init(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_gt_workarounds_init(i915);
+ else if (IS_ICELAKE(i915))
+ icl_gt_workarounds_init(i915);
else
- MISSING_CASE(INTEL_GEN(dev_priv));
+ MISSING_CASE(INTEL_GEN(i915));
+
+ wa_init_finish(wal);
}
-struct whitelist {
- i915_reg_t reg[RING_MAX_NONPRIV_SLOTS];
- unsigned int count;
- u32 nopid;
-};
+static enum forcewake_domains
+wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
+ const struct i915_wa_list *wal)
+{
+ enum forcewake_domains fw = 0;
+ struct i915_wa *wa;
+ unsigned int i;
-static void whitelist_reg(struct whitelist *w, i915_reg_t reg)
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ fw |= intel_uncore_forcewake_for_reg(dev_priv,
+ wa->reg,
+ FW_REG_READ |
+ FW_REG_WRITE);
+
+ return fw;
+}
+
+static void
+wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
{
- if (GEM_DEBUG_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS))
+ enum forcewake_domains fw;
+ unsigned long flags;
+ struct i915_wa *wa;
+ unsigned int i;
+
+ if (!wal->count)
return;
- w->reg[w->count++] = reg;
+ fw = wal_get_fw_for_rmw(dev_priv, wal);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, flags);
+ intel_uncore_forcewake_get__locked(dev_priv, fw);
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ u32 val = I915_READ_FW(wa->reg);
+
+ val &= ~wa->mask;
+ val |= wa->val;
+
+ I915_WRITE_FW(wa->reg, val);
+ }
+
+ intel_uncore_forcewake_put__locked(dev_priv, fw);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+
+ DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
+}
+
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
+{
+ wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
+}
+
+static bool
+wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
+{
+ if ((cur ^ wa->val) & wa->mask) {
+ DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
+ name, from, i915_mmio_reg_offset(wa->reg), cur,
+ cur & wa->mask, wa->val, wa->mask);
+
+ return false;
+ }
+
+ return true;
}
-static void bdw_whitelist_build(struct whitelist *w)
+static bool wa_list_verify(struct drm_i915_private *dev_priv,
+ const struct i915_wa_list *wal,
+ const char *from)
{
+ struct i915_wa *wa;
+ unsigned int i;
+ bool ok = true;
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ ok &= wa_verify(wa, I915_READ(wa->reg), wal->name, from);
+
+ return ok;
}
-static void chv_whitelist_build(struct whitelist *w)
+bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+ const char *from)
{
+ return wa_list_verify(dev_priv, &dev_priv->gt_wa_list, from);
}
-static void gen9_whitelist_build(struct whitelist *w)
+static void
+whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
+{
+ struct i915_wa wa = {
+ .reg = reg
+ };
+
+ if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
+ return;
+
+ _wa_add(wal, &wa);
+}
+
+static void gen9_whitelist_build(struct i915_wa_list *w)
{
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
@@ -980,7 +1023,7 @@ static void gen9_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN8_HDC_CHICKEN1);
}
-static void skl_whitelist_build(struct whitelist *w)
+static void skl_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
@@ -988,12 +1031,12 @@ static void skl_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN8_L3SQCREG4);
}
-static void bxt_whitelist_build(struct whitelist *w)
+static void bxt_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
}
-static void kbl_whitelist_build(struct whitelist *w)
+static void kbl_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
@@ -1001,7 +1044,7 @@ static void kbl_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN8_L3SQCREG4);
}
-static void glk_whitelist_build(struct whitelist *w)
+static void glk_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
@@ -1009,18 +1052,18 @@ static void glk_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
}
-static void cfl_whitelist_build(struct whitelist *w)
+static void cfl_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
}
-static void cnl_whitelist_build(struct whitelist *w)
+static void cnl_whitelist_build(struct i915_wa_list *w)
{
/* WaEnablePreemptionGranularityControlByUMD:cnl */
whitelist_reg(w, GEN8_CS_CHICKEN1);
}
-static void icl_whitelist_build(struct whitelist *w)
+static void icl_whitelist_build(struct i915_wa_list *w)
{
/* WaAllowUMDToModifyHalfSliceChicken7:icl */
whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
@@ -1029,22 +1072,21 @@ static void icl_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN10_SAMPLER_MODE);
}
-static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
- struct whitelist *w)
+void intel_engine_init_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *w = &engine->whitelist;
GEM_BUG_ON(engine->id != RCS);
- w->count = 0;
- w->nopid = i915_mmio_reg_offset(RING_NOPID(engine->mmio_base));
+ wa_init_start(w, "whitelist");
if (INTEL_GEN(i915) < 8)
- return NULL;
+ return;
else if (IS_BROADWELL(i915))
- bdw_whitelist_build(w);
+ return;
else if (IS_CHERRYVIEW(i915))
- chv_whitelist_build(w);
+ return;
else if (IS_SKYLAKE(i915))
skl_whitelist_build(w);
else if (IS_BROXTON(i915))
@@ -1062,39 +1104,180 @@ static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
else
MISSING_CASE(INTEL_GEN(i915));
- return w;
+ wa_init_finish(w);
}
-static void whitelist_apply(struct intel_engine_cs *engine,
- const struct whitelist *w)
+void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
+ const struct i915_wa_list *wal = &engine->whitelist;
const u32 base = engine->mmio_base;
+ struct i915_wa *wa;
unsigned int i;
- if (!w)
+ if (!wal->count)
return;
- intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
-
- for (i = 0; i < w->count; i++)
- I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i),
- i915_mmio_reg_offset(w->reg[i]));
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
+ i915_mmio_reg_offset(wa->reg));
/* And clear the rest just in case of garbage */
for (; i < RING_MAX_NONPRIV_SLOTS; i++)
- I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i), w->nopid);
+ I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
+ i915_mmio_reg_offset(RING_NOPID(base)));
- intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
+ DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
}
-void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void rcs_engine_wa_init(struct intel_engine_cs *engine)
{
- struct whitelist w;
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ if (IS_ICELAKE(i915)) {
+ /* This is not an Wa. Enable for better image quality */
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
+
+ /* WaPipelineFlushCoherentLines:icl */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+ /*
+ * Wa_1405543622:icl
+ * Formerly known as WaGAPZPriorityScheme
+ */
+ wa_write_or(wal,
+ GEN8_GARBCNTL,
+ GEN11_ARBITRATION_PRIO_ORDER_MASK);
+
+ /*
+ * Wa_1604223664:icl
+ * Formerly known as WaL3BankAddressHashing
+ */
+ wa_write_masked_or(wal,
+ GEN8_GARBCNTL,
+ GEN11_HASH_CTRL_EXCL_MASK,
+ GEN11_HASH_CTRL_EXCL_BIT0);
+ wa_write_masked_or(wal,
+ GEN11_GLBLINVL,
+ GEN11_BANK_HASH_ADDR_EXCL_MASK,
+ GEN11_BANK_HASH_ADDR_EXCL_BIT0);
- whitelist_apply(engine, whitelist_build(engine, &w));
+ /*
+ * Wa_1405733216:icl
+ * Formerly known as WaDisableCleanEvicts
+ */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN11_LQSC_CLEAN_EVICT_DISABLE);
+
+ /* WaForwardProgressSoftReset:icl */
+ wa_write_or(wal,
+ GEN10_SCRATCH_LNCF2,
+ PMFLUSHDONE_LNICRSDROP |
+ PMFLUSH_GAPL3UNBLOCK |
+ PMFLUSHDONE_LNEBLK);
+
+ /* Wa_1406609255:icl (pre-prod) */
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+ wa_write_or(wal,
+ GEN7_SARCHKMD,
+ GEN7_DISABLE_DEMAND_PREFETCH |
+ GEN7_DISABLE_SAMPLER_PREFETCH);
+ }
+
+ if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
+ wa_masked_en(wal,
+ GEN7_FF_SLICE_CS_CHICKEN1,
+ GEN9_FFSC_PERCTX_PREEMPT_CTRL);
+ }
+
+ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
+ /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
+ wa_write_or(wal,
+ GEN8_GARBCNTL,
+ GEN9_GAPS_TSV_CREDIT_DISABLE);
+ }
+
+ if (IS_BROXTON(i915)) {
+ /* WaDisablePooledEuLoadBalancingFix:bxt */
+ wa_masked_en(wal,
+ FF_SLICE_CS_CHICKEN2,
+ GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+ }
+
+ if (IS_GEN9(i915)) {
+ /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
+ wa_masked_en(wal,
+ GEN9_CSFE_CHICKEN1_RCS,
+ GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
+
+ /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
+ wa_write_or(wal,
+ BDW_SCRATCH1,
+ GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+ /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
+ if (IS_GEN9_LP(i915))
+ wa_write_masked_or(wal,
+ GEN8_L3SQCREG1,
+ L3_PRIO_CREDITS_MASK,
+ L3_GENERAL_PRIO_CREDITS(62) |
+ L3_HIGH_PRIO_CREDITS(2));
+
+ /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
+ }
+}
+
+static void xcs_engine_wa_init(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ /* WaKBLVECSSemaphoreWaitPoll:kbl */
+ if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
+ wa_write(wal,
+ RING_SEMA_WAIT_POLL(engine->mmio_base),
+ 1);
+ }
+}
+
+void intel_engine_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
+ return;
+
+ wa_init_start(wal, engine->name);
+
+ if (engine->id == RCS)
+ rcs_engine_wa_init(engine);
+ else
+ xcs_engine_wa_init(engine);
+
+ wa_init_finish(wal);
+}
+
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
+{
+ wa_list_apply(engine->i915, &engine->wa_list);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+static bool intel_engine_verify_workarounds(struct intel_engine_cs *engine,
+ const char *from)
+{
+ return wa_list_verify(engine->i915, &engine->wa_list, from);
+}
+
#include "selftests/intel_workarounds.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index b11d0623e626..7c734714b05e 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -7,11 +7,39 @@
#ifndef _I915_WORKAROUNDS_H_
#define _I915_WORKAROUNDS_H_
-int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv);
-int intel_ctx_workarounds_emit(struct i915_request *rq);
+#include <linux/slab.h>
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
+struct i915_wa {
+ i915_reg_t reg;
+ u32 mask;
+ u32 val;
+};
-void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
+struct i915_wa_list {
+ const char *name;
+ struct i915_wa *list;
+ unsigned int count;
+ unsigned int wa_count;
+};
+
+static inline void intel_wa_list_free(struct i915_wa_list *wal)
+{
+ kfree(wal->list);
+ memset(wal, 0, sizeof(*wal));
+}
+
+void intel_engine_init_ctx_wa(struct intel_engine_cs *engine);
+int intel_engine_emit_ctx_wa(struct i915_request *rq);
+
+void intel_gt_init_workarounds(struct drm_i915_private *dev_priv);
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv);
+bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+ const char *from);
+
+void intel_engine_init_whitelist(struct intel_engine_cs *engine);
+void intel_engine_apply_whitelist(struct intel_engine_cs *engine);
+
+void intel_engine_init_workarounds(struct intel_engine_cs *engine);
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
#endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 69fe86b30fbb..a9ed0ecc94e2 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -170,7 +170,7 @@ static int igt_ppgtt_alloc(void *arg)
* This should ensure that we do not run into the oomkiller during
* the test and take down the machine wilfully.
*/
- limit = totalram_pages << PAGE_SHIFT;
+ limit = totalram_pages() << PAGE_SHIFT;
limit = min(ppgtt->vm.total, limit);
/* Check we can allocate the entire range */
@@ -1244,7 +1244,7 @@ static int exercise_mock(struct drm_i915_private *i915,
u64 hole_start, u64 hole_end,
unsigned long end_time))
{
- const u64 limit = totalram_pages << PAGE_SHIFT;
+ const u64 limit = totalram_pages() << PAGE_SHIFT;
struct i915_gem_context *ctx;
struct i915_hw_ppgtt *ppgtt;
IGT_TIMEOUT(end_time);
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c
new file mode 100644
index 000000000000..208a966da8ca
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.c
@@ -0,0 +1,44 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "igt_reset.h"
+
+#include "../i915_drv.h"
+#include "../intel_ringbuffer.h"
+
+void igt_global_reset_lock(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ pr_debug("%s: current gpu_error=%08lx\n",
+ __func__, i915->gpu_error.flags);
+
+ while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
+ wait_event(i915->gpu_error.reset_queue,
+ !test_bit(I915_RESET_BACKOFF,
+ &i915->gpu_error.flags));
+
+ for_each_engine(engine, i915, id) {
+ while (test_and_set_bit(I915_RESET_ENGINE + id,
+ &i915->gpu_error.flags))
+ wait_on_bit(&i915->gpu_error.flags,
+ I915_RESET_ENGINE + id,
+ TASK_UNINTERRUPTIBLE);
+ }
+}
+
+void igt_global_reset_unlock(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id)
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+
+ clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ wake_up_all(&i915->gpu_error.reset_queue);
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h
new file mode 100644
index 000000000000..5f0234d045d5
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef __I915_SELFTESTS_IGT_RESET_H__
+#define __I915_SELFTESTS_IGT_RESET_H__
+
+#include "../i915_drv.h"
+
+void igt_global_reset_lock(struct drm_i915_private *i915);
+void igt_global_reset_unlock(struct drm_i915_private *i915);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
new file mode 100644
index 000000000000..8cd34f6e6859
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -0,0 +1,199 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "igt_spinner.h"
+
+int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
+{
+ unsigned int mode;
+ void *vaddr;
+ int err;
+
+ GEM_BUG_ON(INTEL_GEN(i915) < 8);
+
+ memset(spin, 0, sizeof(*spin));
+ spin->i915 = i915;
+
+ spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(spin->hws)) {
+ err = PTR_ERR(spin->hws);
+ goto err;
+ }
+
+ spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(spin->obj)) {
+ err = PTR_ERR(spin->obj);
+ goto err_hws;
+ }
+
+ i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
+ vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+ spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
+
+ mode = i915_coherent_map_type(i915);
+ vaddr = i915_gem_object_pin_map(spin->obj, mode);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_unpin_hws;
+ }
+ spin->batch = vaddr;
+
+ return 0;
+
+err_unpin_hws:
+ i915_gem_object_unpin_map(spin->hws);
+err_obj:
+ i915_gem_object_put(spin->obj);
+err_hws:
+ i915_gem_object_put(spin->hws);
+err:
+ return err;
+}
+
+static unsigned int seqno_offset(u64 fence)
+{
+ return offset_in_page(sizeof(u32) * fence);
+}
+
+static u64 hws_address(const struct i915_vma *hws,
+ const struct i915_request *rq)
+{
+ return hws->node.start + seqno_offset(rq->fence.context);
+}
+
+static int emit_recurse_batch(struct igt_spinner *spin,
+ struct i915_request *rq,
+ u32 arbitration_command)
+{
+ struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
+ struct i915_vma *hws, *vma;
+ u32 *batch;
+ int err;
+
+ vma = i915_vma_instance(spin->obj, vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ hws = i915_vma_instance(spin->hws, vm, NULL);
+ if (IS_ERR(hws))
+ return PTR_ERR(hws);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+
+ err = i915_vma_pin(hws, 0, 0, PIN_USER);
+ if (err)
+ goto unpin_vma;
+
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (err)
+ goto unpin_hws;
+
+ if (!i915_gem_object_has_active_reference(vma->obj)) {
+ i915_gem_object_get(vma->obj);
+ i915_gem_object_set_active_reference(vma->obj);
+ }
+
+ err = i915_vma_move_to_active(hws, rq, 0);
+ if (err)
+ goto unpin_hws;
+
+ if (!i915_gem_object_has_active_reference(hws->obj)) {
+ i915_gem_object_get(hws->obj);
+ i915_gem_object_set_active_reference(hws->obj);
+ }
+
+ batch = spin->batch;
+
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = upper_32_bits(hws_address(hws, rq));
+ *batch++ = rq->fence.seqno;
+
+ *batch++ = arbitration_command;
+
+ *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+ *batch++ = lower_32_bits(vma->node.start);
+ *batch++ = upper_32_bits(vma->node.start);
+ *batch++ = MI_BATCH_BUFFER_END; /* not reached */
+
+ i915_gem_chipset_flush(spin->i915);
+
+ err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+
+unpin_hws:
+ i915_vma_unpin(hws);
+unpin_vma:
+ i915_vma_unpin(vma);
+ return err;
+}
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arbitration_command)
+{
+ struct i915_request *rq;
+ int err;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return rq;
+
+ err = emit_recurse_batch(spin, rq, arbitration_command);
+ if (err) {
+ i915_request_add(rq);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+static u32
+hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
+{
+ u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
+
+ return READ_ONCE(*seqno);
+}
+
+void igt_spinner_end(struct igt_spinner *spin)
+{
+ *spin->batch = MI_BATCH_BUFFER_END;
+ i915_gem_chipset_flush(spin->i915);
+}
+
+void igt_spinner_fini(struct igt_spinner *spin)
+{
+ igt_spinner_end(spin);
+
+ i915_gem_object_unpin_map(spin->obj);
+ i915_gem_object_put(spin->obj);
+
+ i915_gem_object_unpin_map(spin->hws);
+ i915_gem_object_put(spin->hws);
+}
+
+bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
+{
+ if (!wait_event_timeout(rq->execute,
+ READ_ONCE(rq->global_seqno),
+ msecs_to_jiffies(10)))
+ return false;
+
+ return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
+ rq->fence.seqno),
+ 10) &&
+ wait_for(i915_seqno_passed(hws_seqno(spin, rq),
+ rq->fence.seqno),
+ 1000));
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
new file mode 100644
index 000000000000..391777c76dc7
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -0,0 +1,37 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef __I915_SELFTESTS_IGT_SPINNER_H__
+#define __I915_SELFTESTS_IGT_SPINNER_H__
+
+#include "../i915_selftest.h"
+
+#include "../i915_drv.h"
+#include "../i915_request.h"
+#include "../intel_ringbuffer.h"
+#include "../i915_gem_context.h"
+
+struct igt_spinner {
+ struct drm_i915_private *i915;
+ struct drm_i915_gem_object *hws;
+ struct drm_i915_gem_object *obj;
+ u32 *batch;
+ void *seqno;
+};
+
+int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915);
+void igt_spinner_fini(struct igt_spinner *spin);
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arbitration_command);
+void igt_spinner_end(struct igt_spinner *spin);
+
+bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 33494d922fab..5910da3e7d79 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -27,6 +27,7 @@
#include "../i915_selftest.h"
#include "i915_random.h"
#include "igt_flush_test.h"
+#include "igt_reset.h"
#include "igt_wedge_me.h"
#include "mock_context.h"
@@ -308,6 +309,7 @@ static int igt_hang_sanitycheck(void *arg)
goto unlock;
for_each_engine(engine, i915, id) {
+ struct igt_wedge_me w;
long timeout;
if (!intel_engine_can_store_dword(engine))
@@ -328,9 +330,14 @@ static int igt_hang_sanitycheck(void *arg)
i915_request_add(rq);
- timeout = i915_request_wait(rq,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ timeout = 0;
+ igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
+ timeout = i915_request_wait(rq,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (i915_terminally_wedged(&i915->gpu_error))
+ timeout = -EIO;
+
i915_request_put(rq);
if (timeout < 0) {
@@ -348,40 +355,6 @@ unlock:
return err;
}
-static void global_reset_lock(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- pr_debug("%s: current gpu_error=%08lx\n",
- __func__, i915->gpu_error.flags);
-
- while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
- wait_event(i915->gpu_error.reset_queue,
- !test_bit(I915_RESET_BACKOFF,
- &i915->gpu_error.flags));
-
- for_each_engine(engine, i915, id) {
- while (test_and_set_bit(I915_RESET_ENGINE + id,
- &i915->gpu_error.flags))
- wait_on_bit(&i915->gpu_error.flags,
- I915_RESET_ENGINE + id,
- TASK_UNINTERRUPTIBLE);
- }
-}
-
-static void global_reset_unlock(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, i915, id)
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
-
- clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
- wake_up_all(&i915->gpu_error.reset_queue);
-}
-
static int igt_global_reset(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -390,7 +363,7 @@ static int igt_global_reset(void *arg)
/* Check that we can issue a global GPU reset */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags);
mutex_lock(&i915->drm.struct_mutex);
@@ -405,7 +378,7 @@ static int igt_global_reset(void *arg)
mutex_unlock(&i915->drm.struct_mutex);
GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
err = -EIO;
@@ -936,7 +909,7 @@ static int igt_reset_wait(void *arg)
/* Check that we detect a stuck waiter and issue a reset */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
@@ -988,7 +961,7 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
@@ -1066,7 +1039,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
/* Check that we can recover an unbind stuck on a hanging request */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
@@ -1186,7 +1159,7 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
@@ -1266,7 +1239,7 @@ static int igt_reset_queue(void *arg)
/* Check that we replay pending requests following a hang */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
@@ -1397,7 +1370,7 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 94fc0e5c8766..ca461e3a5f27 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -6,216 +6,18 @@
#include "../i915_selftest.h"
#include "igt_flush_test.h"
+#include "igt_spinner.h"
#include "i915_random.h"
#include "mock_context.h"
-struct spinner {
- struct drm_i915_private *i915;
- struct drm_i915_gem_object *hws;
- struct drm_i915_gem_object *obj;
- u32 *batch;
- void *seqno;
-};
-
-static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
-{
- unsigned int mode;
- void *vaddr;
- int err;
-
- GEM_BUG_ON(INTEL_GEN(i915) < 8);
-
- memset(spin, 0, sizeof(*spin));
- spin->i915 = i915;
-
- spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(spin->hws)) {
- err = PTR_ERR(spin->hws);
- goto err;
- }
-
- spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(spin->obj)) {
- err = PTR_ERR(spin->obj);
- goto err_hws;
- }
-
- i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
- vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_obj;
- }
- spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
-
- mode = i915_coherent_map_type(i915);
- vaddr = i915_gem_object_pin_map(spin->obj, mode);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_unpin_hws;
- }
- spin->batch = vaddr;
-
- return 0;
-
-err_unpin_hws:
- i915_gem_object_unpin_map(spin->hws);
-err_obj:
- i915_gem_object_put(spin->obj);
-err_hws:
- i915_gem_object_put(spin->hws);
-err:
- return err;
-}
-
-static unsigned int seqno_offset(u64 fence)
-{
- return offset_in_page(sizeof(u32) * fence);
-}
-
-static u64 hws_address(const struct i915_vma *hws,
- const struct i915_request *rq)
-{
- return hws->node.start + seqno_offset(rq->fence.context);
-}
-
-static int emit_recurse_batch(struct spinner *spin,
- struct i915_request *rq,
- u32 arbitration_command)
-{
- struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
- struct i915_vma *hws, *vma;
- u32 *batch;
- int err;
-
- vma = i915_vma_instance(spin->obj, vm, NULL);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- hws = i915_vma_instance(spin->hws, vm, NULL);
- if (IS_ERR(hws))
- return PTR_ERR(hws);
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- return err;
-
- err = i915_vma_pin(hws, 0, 0, PIN_USER);
- if (err)
- goto unpin_vma;
-
- err = i915_vma_move_to_active(vma, rq, 0);
- if (err)
- goto unpin_hws;
-
- if (!i915_gem_object_has_active_reference(vma->obj)) {
- i915_gem_object_get(vma->obj);
- i915_gem_object_set_active_reference(vma->obj);
- }
-
- err = i915_vma_move_to_active(hws, rq, 0);
- if (err)
- goto unpin_hws;
-
- if (!i915_gem_object_has_active_reference(hws->obj)) {
- i915_gem_object_get(hws->obj);
- i915_gem_object_set_active_reference(hws->obj);
- }
-
- batch = spin->batch;
-
- *batch++ = MI_STORE_DWORD_IMM_GEN4;
- *batch++ = lower_32_bits(hws_address(hws, rq));
- *batch++ = upper_32_bits(hws_address(hws, rq));
- *batch++ = rq->fence.seqno;
-
- *batch++ = arbitration_command;
-
- *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
- *batch++ = lower_32_bits(vma->node.start);
- *batch++ = upper_32_bits(vma->node.start);
- *batch++ = MI_BATCH_BUFFER_END; /* not reached */
-
- i915_gem_chipset_flush(spin->i915);
-
- err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
-
-unpin_hws:
- i915_vma_unpin(hws);
-unpin_vma:
- i915_vma_unpin(vma);
- return err;
-}
-
-static struct i915_request *
-spinner_create_request(struct spinner *spin,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u32 arbitration_command)
-{
- struct i915_request *rq;
- int err;
-
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq))
- return rq;
-
- err = emit_recurse_batch(spin, rq, arbitration_command);
- if (err) {
- i915_request_add(rq);
- return ERR_PTR(err);
- }
-
- return rq;
-}
-
-static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq)
-{
- u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
-
- return READ_ONCE(*seqno);
-}
-
-static void spinner_end(struct spinner *spin)
-{
- *spin->batch = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(spin->i915);
-}
-
-static void spinner_fini(struct spinner *spin)
-{
- spinner_end(spin);
-
- i915_gem_object_unpin_map(spin->obj);
- i915_gem_object_put(spin->obj);
-
- i915_gem_object_unpin_map(spin->hws);
- i915_gem_object_put(spin->hws);
-}
-
-static bool wait_for_spinner(struct spinner *spin, struct i915_request *rq)
-{
- if (!wait_event_timeout(rq->execute,
- READ_ONCE(rq->global_seqno),
- msecs_to_jiffies(10)))
- return false;
-
- return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
- rq->fence.seqno),
- 10) &&
- wait_for(i915_seqno_passed(hws_seqno(spin, rq),
- rq->fence.seqno),
- 1000));
-}
-
static int live_sanitycheck(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
- struct spinner spin;
+ struct igt_spinner spin;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
@@ -224,7 +26,7 @@ static int live_sanitycheck(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin, i915))
+ if (igt_spinner_init(&spin, i915))
goto err_unlock;
ctx = kernel_context(i915);
@@ -234,14 +36,14 @@ static int live_sanitycheck(void *arg)
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = spinner_create_request(&spin, ctx, engine, MI_NOOP);
+ rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin, rq)) {
+ if (!igt_wait_for_spinner(&spin, rq)) {
GEM_TRACE("spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -249,7 +51,7 @@ static int live_sanitycheck(void *arg)
goto err_ctx;
}
- spinner_end(&spin);
+ igt_spinner_end(&spin);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx;
@@ -260,7 +62,7 @@ static int live_sanitycheck(void *arg)
err_ctx:
kernel_context_close(ctx);
err_spin:
- spinner_fini(&spin);
+ igt_spinner_fini(&spin);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
@@ -272,7 +74,7 @@ static int live_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
- struct spinner spin_hi, spin_lo;
+ struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = -ENOMEM;
@@ -283,10 +85,10 @@ static int live_preempt(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
- if (spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
@@ -304,15 +106,15 @@ static int live_preempt(void *arg)
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_lo, rq)) {
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -320,16 +122,16 @@ static int live_preempt(void *arg)
goto err_ctx_lo;
}
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_hi, rq)) {
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -337,8 +139,8 @@ static int live_preempt(void *arg)
goto err_ctx_lo;
}
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx_lo;
@@ -351,9 +153,9 @@ err_ctx_lo:
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
- spinner_fini(&spin_lo);
+ igt_spinner_fini(&spin_lo);
err_spin_hi:
- spinner_fini(&spin_hi);
+ igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
@@ -365,7 +167,7 @@ static int live_late_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
- struct spinner spin_hi, spin_lo;
+ struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {};
enum intel_engine_id id;
@@ -377,10 +179,10 @@ static int live_late_preempt(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
- if (spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
@@ -394,28 +196,29 @@ static int live_late_preempt(void *arg)
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_lo, rq)) {
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
pr_err("First context failed to start\n");
goto err_wedged;
}
- rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP);
+ rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_NOOP);
if (IS_ERR(rq)) {
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (wait_for_spinner(&spin_hi, rq)) {
+ if (igt_wait_for_spinner(&spin_hi, rq)) {
pr_err("Second context overtook first?\n");
goto err_wedged;
}
@@ -423,14 +226,14 @@ static int live_late_preempt(void *arg)
attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
engine->schedule(rq, &attr);
- if (!wait_for_spinner(&spin_hi, rq)) {
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
pr_err("High priority context failed to preempt the low priority context\n");
GEM_TRACE_DUMP();
goto err_wedged;
}
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx_lo;
@@ -443,9 +246,9 @@ err_ctx_lo:
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
- spinner_fini(&spin_lo);
+ igt_spinner_fini(&spin_lo);
err_spin_hi:
- spinner_fini(&spin_hi);
+ igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
@@ -453,8 +256,8 @@ err_unlock:
return err;
err_wedged:
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
i915_gem_set_wedged(i915);
err = -EIO;
goto err_ctx_lo;
@@ -464,7 +267,7 @@ static int live_preempt_hang(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
- struct spinner spin_hi, spin_lo;
+ struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = -ENOMEM;
@@ -478,10 +281,10 @@ static int live_preempt_hang(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
- if (spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
@@ -500,15 +303,15 @@ static int live_preempt_hang(void *arg)
if (!intel_engine_has_preemption(engine))
continue;
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_lo, rq)) {
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -516,10 +319,10 @@ static int live_preempt_hang(void *arg)
goto err_ctx_lo;
}
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
@@ -544,7 +347,7 @@ static int live_preempt_hang(void *arg)
engine->execlists.preempt_hang.inject_hang = false;
- if (!wait_for_spinner(&spin_hi, rq)) {
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -552,8 +355,8 @@ static int live_preempt_hang(void *arg)
goto err_ctx_lo;
}
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx_lo;
@@ -566,9 +369,9 @@ err_ctx_lo:
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
- spinner_fini(&spin_lo);
+ igt_spinner_fini(&spin_lo);
err_spin_hi:
- spinner_fini(&spin_hi);
+ igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index d1a0923d2f38..67017d5175b8 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -6,6 +6,9 @@
#include "../i915_selftest.h"
+#include "igt_flush_test.h"
+#include "igt_reset.h"
+#include "igt_spinner.h"
#include "igt_wedge_me.h"
#include "mock_context.h"
@@ -91,17 +94,23 @@ err_obj:
return ERR_PTR(err);
}
-static u32 get_whitelist_reg(const struct whitelist *w, unsigned int i)
+static u32
+get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
{
- return i < w->count ? i915_mmio_reg_offset(w->reg[i]) : w->nopid;
+ i915_reg_t reg = i < engine->whitelist.count ?
+ engine->whitelist.list[i].reg :
+ RING_NOPID(engine->mmio_base);
+
+ return i915_mmio_reg_offset(reg);
}
-static void print_results(const struct whitelist *w, const u32 *results)
+static void
+print_results(const struct intel_engine_cs *engine, const u32 *results)
{
unsigned int i;
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
- u32 expected = get_whitelist_reg(w, i);
+ u32 expected = get_whitelist_reg(engine, i);
u32 actual = results[i];
pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
@@ -109,8 +118,7 @@ static void print_results(const struct whitelist *w, const u32 *results)
}
}
-static int check_whitelist(const struct whitelist *w,
- struct i915_gem_context *ctx,
+static int check_whitelist(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *results;
@@ -138,11 +146,11 @@ static int check_whitelist(const struct whitelist *w,
}
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
- u32 expected = get_whitelist_reg(w, i);
+ u32 expected = get_whitelist_reg(engine, i);
u32 actual = vaddr[i];
if (expected != actual) {
- print_results(w, vaddr);
+ print_results(engine, vaddr);
pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
i, expected, actual);
@@ -159,66 +167,107 @@ out_put:
static int do_device_reset(struct intel_engine_cs *engine)
{
- i915_reset(engine->i915, ENGINE_MASK(engine->id), NULL);
+ set_bit(I915_RESET_HANDOFF, &engine->i915->gpu_error.flags);
+ i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds");
return 0;
}
static int do_engine_reset(struct intel_engine_cs *engine)
{
- return i915_reset_engine(engine, NULL);
+ return i915_reset_engine(engine, "live_workarounds");
}
-static int switch_to_scratch_context(struct intel_engine_cs *engine)
+static int
+switch_to_scratch_context(struct intel_engine_cs *engine,
+ struct igt_spinner *spin)
{
struct i915_gem_context *ctx;
struct i915_request *rq;
+ int err = 0;
ctx = kernel_context(engine->i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
intel_runtime_pm_get(engine->i915);
- rq = i915_request_alloc(engine, ctx);
+
+ if (spin)
+ rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
+ else
+ rq = i915_request_alloc(engine, ctx);
+
intel_runtime_pm_put(engine->i915);
kernel_context_close(ctx);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+
+ if (IS_ERR(rq)) {
+ spin = NULL;
+ err = PTR_ERR(rq);
+ goto err;
+ }
i915_request_add(rq);
- return 0;
+ if (spin && !igt_wait_for_spinner(spin, rq)) {
+ pr_err("Spinner failed to start\n");
+ err = -ETIMEDOUT;
+ }
+
+err:
+ if (err && spin)
+ igt_spinner_end(spin);
+
+ return err;
}
static int check_whitelist_across_reset(struct intel_engine_cs *engine,
int (*reset)(struct intel_engine_cs *),
- const struct whitelist *w,
const char *name)
{
+ struct drm_i915_private *i915 = engine->i915;
+ bool want_spin = reset == do_engine_reset;
struct i915_gem_context *ctx;
+ struct igt_spinner spin;
int err;
- ctx = kernel_context(engine->i915);
+ pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
+ engine->whitelist.count, name);
+
+ if (want_spin) {
+ err = igt_spinner_init(&spin, i915);
+ if (err)
+ return err;
+ }
+
+ ctx = kernel_context(i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- err = check_whitelist(w, ctx, engine);
+ err = check_whitelist(ctx, engine);
if (err) {
pr_err("Invalid whitelist *before* %s reset!\n", name);
goto out;
}
- err = switch_to_scratch_context(engine);
+ err = switch_to_scratch_context(engine, want_spin ? &spin : NULL);
if (err)
goto out;
+ intel_runtime_pm_get(i915);
err = reset(engine);
+ intel_runtime_pm_put(i915);
+
+ if (want_spin) {
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+ }
+
if (err) {
pr_err("%s reset failed\n", name);
goto out;
}
- err = check_whitelist(w, ctx, engine);
+ err = check_whitelist(ctx, engine);
if (err) {
pr_err("Whitelist not preserved in context across %s reset!\n",
name);
@@ -227,11 +276,11 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
kernel_context_close(ctx);
- ctx = kernel_context(engine->i915);
+ ctx = kernel_context(i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- err = check_whitelist(w, ctx, engine);
+ err = check_whitelist(ctx, engine);
if (err) {
pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
name);
@@ -247,26 +296,18 @@ static int live_reset_whitelist(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine = i915->engine[RCS];
- struct i915_gpu_error *error = &i915->gpu_error;
- struct whitelist w;
int err = 0;
/* If we reset the gpu, we should not lose the RING_NONPRIV */
- if (!engine)
- return 0;
-
- if (!whitelist_build(engine, &w))
+ if (!engine || engine->whitelist.count == 0)
return 0;
- pr_info("Checking %d whitelisted registers (RING_NONPRIV)\n", w.count);
-
- set_bit(I915_RESET_BACKOFF, &error->flags);
- set_bit(I915_RESET_ENGINE + engine->id, &error->flags);
+ igt_global_reset_lock(i915);
if (intel_has_reset_engine(i915)) {
err = check_whitelist_across_reset(engine,
- do_engine_reset, &w,
+ do_engine_reset,
"engine");
if (err)
goto out;
@@ -274,22 +315,156 @@ static int live_reset_whitelist(void *arg)
if (intel_has_gpu_reset(i915)) {
err = check_whitelist_across_reset(engine,
- do_device_reset, &w,
+ do_device_reset,
"device");
if (err)
goto out;
}
out:
- clear_bit(I915_RESET_ENGINE + engine->id, &error->flags);
- clear_bit(I915_RESET_BACKOFF, &error->flags);
+ igt_global_reset_unlock(i915);
return err;
}
+static bool verify_gt_engine_wa(struct drm_i915_private *i915, const char *str)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ bool ok = true;
+
+ ok &= intel_gt_verify_workarounds(i915, str);
+
+ for_each_engine(engine, i915, id)
+ ok &= intel_engine_verify_workarounds(engine, str);
+
+ return ok;
+}
+
+static int
+live_gpu_reset_gt_engine_workarounds(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gpu_error *error = &i915->gpu_error;
+ bool ok;
+
+ if (!intel_has_gpu_reset(i915))
+ return 0;
+
+ pr_info("Verifying after GPU reset...\n");
+
+ igt_global_reset_lock(i915);
+
+ ok = verify_gt_engine_wa(i915, "before reset");
+ if (!ok)
+ goto out;
+
+ intel_runtime_pm_get(i915);
+ set_bit(I915_RESET_HANDOFF, &error->flags);
+ i915_reset(i915, ALL_ENGINES, "live_workarounds");
+ intel_runtime_pm_put(i915);
+
+ ok = verify_gt_engine_wa(i915, "after reset");
+
+out:
+ igt_global_reset_unlock(i915);
+
+ return ok ? 0 : -ESRCH;
+}
+
+static int
+live_engine_reset_gt_engine_workarounds(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ struct igt_spinner spin;
+ enum intel_engine_id id;
+ struct i915_request *rq;
+ int ret = 0;
+
+ if (!intel_has_reset_engine(i915))
+ return 0;
+
+ ctx = kernel_context(i915);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ igt_global_reset_lock(i915);
+
+ for_each_engine(engine, i915, id) {
+ bool ok;
+
+ pr_info("Verifying after %s reset...\n", engine->name);
+
+ ok = verify_gt_engine_wa(i915, "before reset");
+ if (!ok) {
+ ret = -ESRCH;
+ goto err;
+ }
+
+ intel_runtime_pm_get(i915);
+ i915_reset_engine(engine, "live_workarounds");
+ intel_runtime_pm_put(i915);
+
+ ok = verify_gt_engine_wa(i915, "after idle reset");
+ if (!ok) {
+ ret = -ESRCH;
+ goto err;
+ }
+
+ ret = igt_spinner_init(&spin, i915);
+ if (ret)
+ goto err;
+
+ intel_runtime_pm_get(i915);
+
+ rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ igt_spinner_fini(&spin);
+ intel_runtime_pm_put(i915);
+ goto err;
+ }
+
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ pr_err("Spinner failed to start\n");
+ igt_spinner_fini(&spin);
+ intel_runtime_pm_put(i915);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ i915_reset_engine(engine, "live_workarounds");
+
+ intel_runtime_pm_put(i915);
+
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+
+ ok = verify_gt_engine_wa(i915, "after busy reset");
+ if (!ok) {
+ ret = -ESRCH;
+ goto err;
+ }
+ }
+
+err:
+ igt_global_reset_unlock(i915);
+ kernel_context_close(ctx);
+
+ igt_flush_test(i915, I915_WAIT_LOCKED);
+
+ return ret;
+}
+
int intel_workarounds_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_reset_whitelist),
+ SUBTEST(live_gpu_reset_gt_engine_workarounds),
+ SUBTEST(live_engine_reset_gt_engine_workarounds),
};
int err;
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index fe6becdcc29e..77a26fd3a44a 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
*
* derived from imx-hdmi.c(renamed to bridge/dw_hdmi.c now)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 0e6942f21a4e..820c7e3878f0 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale i.MX drm driver
*
* Copyright (C) 2011 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/component.h>
#include <linux/device.h>
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 3bd0f8a18e74..2c5bbe317353 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX drm driver - LVDS display bridge
*
* Copyright (C) 2012 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index cffd3310240e..293dd5752583 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX drm driver - Television Encoder (TVEv2)
*
* Copyright (C) 2013 Philipp Zabel, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -442,7 +434,7 @@ static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static struct clk_ops clk_tve_di_ops = {
+static const struct clk_ops clk_tve_di_ops = {
.round_rate = clk_tve_di_round_rate,
.set_rate = clk_tve_di_set_rate,
.recalc_rate = clk_tve_di_recalc_rate,
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 7d4b710b837a..058b53c0aa7e 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX IPUv3 Graphics driver
*
* Copyright (C) 2011 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/component.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 40605fdf0e33..c390924de93d 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX IPUv3 DP Overlay Planes
*
* Copyright (C) 2013 Philipp Zabel, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <drm/drmP.h>
@@ -236,9 +228,15 @@ static void ipu_plane_enable(struct ipu_plane *ipu_plane)
void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel)
{
+ int ret;
+
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
+ ret = ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
+ if (ret == -ETIMEDOUT) {
+ DRM_ERROR("[PLANE:%d] IDMAC timeout\n",
+ ipu_plane->base.base.id);
+ }
if (ipu_plane->dp && disable_dp_channel)
ipu_dp_disable_channel(ipu_plane->dp, false);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index aefd04e18f93..f3ce51121dd6 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX drm driver - parallel display implementation
*
* Copyright (C) 2012 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/component.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 66df1b177959..27b507eb4a99 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -818,10 +818,13 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
dsi->encoder.possible_crtcs = 1;
/* If there's a bridge, attach to it and let it create the connector */
- ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
- if (ret) {
- DRM_ERROR("Failed to attach bridge to drm\n");
-
+ if (dsi->bridge) {
+ ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to attach bridge to drm\n");
+ goto err_encoder_cleanup;
+ }
+ } else {
/* Otherwise create our own connector and attach to a panel */
ret = mtk_dsi_create_connector(drm, dsi);
if (ret)
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index d78168f979db..75d97f1b2e8f 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -46,6 +46,7 @@ struct meson_crtc {
struct drm_crtc base;
struct drm_pending_vblank_event *event;
struct meson_drm *priv;
+ bool enabled;
};
#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
@@ -81,8 +82,7 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
};
-static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+static void meson_crtc_enable(struct drm_crtc *crtc)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct drm_crtc_state *crtc_state = crtc->state;
@@ -106,6 +106,22 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE,
priv->io_base + _REG(VPP_MISC));
+ drm_crtc_vblank_on(crtc);
+
+ meson_crtc->enabled = true;
+}
+
+static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
+ struct meson_drm *priv = meson_crtc->priv;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (!meson_crtc->enabled)
+ meson_crtc_enable(crtc);
+
priv->viu.osd1_enabled = true;
}
@@ -117,6 +133,8 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
DRM_DEBUG_DRIVER("\n");
+ drm_crtc_vblank_off(crtc);
+
priv->viu.osd1_enabled = false;
priv->viu.osd1_commit = false;
@@ -135,6 +153,8 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
crtc->state->event = NULL;
}
+
+ meson_crtc->enabled = false;
}
static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -143,6 +163,9 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
unsigned long flags;
+ if (crtc->state->enable && !meson_crtc->enabled)
+ meson_crtc_enable(crtc);
+
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 0b6c29cdd934..bc25001b8207 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -690,6 +690,7 @@ static const struct regmap_config meson_dw_hdmi_regmap_config = {
.reg_read = meson_dw_hdmi_reg_read,
.reg_write = meson_dw_hdmi_reg_write,
.max_register = 0x10000,
+ .fast_io = true,
};
static bool meson_hdmi_connector_is_available(struct device *dev)
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index e95e0e7a7fa1..0ba04f6813e6 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -71,6 +71,7 @@
*/
/* HHI Registers */
+#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */
@@ -840,6 +841,7 @@ struct meson_hdmi_venc_vic_mode {
{ 5, &meson_hdmi_encp_mode_1080i60 },
{ 20, &meson_hdmi_encp_mode_1080i50 },
{ 32, &meson_hdmi_encp_mode_1080p24 },
+ { 33, &meson_hdmi_encp_mode_1080p50 },
{ 34, &meson_hdmi_encp_mode_1080p30 },
{ 31, &meson_hdmi_encp_mode_1080p50 },
{ 16, &meson_hdmi_encp_mode_1080p60 },
@@ -1659,10 +1661,12 @@ unsigned int meson_venci_get_field(struct meson_drm *priv)
void meson_venc_enable_vsync(struct meson_drm *priv)
{
writel_relaxed(2, priv->io_base + _REG(VENC_INTCTRL));
+ regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), BIT(25));
}
void meson_venc_disable_vsync(struct meson_drm *priv)
{
+ regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), 0);
writel_relaxed(0, priv->io_base + _REG(VENC_INTCTRL));
}
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 0ba87ff95530..e46e05f50bad 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -184,18 +184,18 @@ void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
if (lut_sel == VIU_LUT_OSD_OETF) {
writel(0, priv->io_base + _REG(addr_port));
- for (i = 0; i < 20; i++)
+ for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
priv->io_base + _REG(data_port));
writel(r_map[OSD_OETF_LUT_SIZE - 1] | (g_map[0] << 16),
priv->io_base + _REG(data_port));
- for (i = 0; i < 20; i++)
+ for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
priv->io_base + _REG(data_port));
- for (i = 0; i < 20; i++)
+ for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
priv->io_base + _REG(data_port));
@@ -211,18 +211,18 @@ void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
} else if (lut_sel == VIU_LUT_OSD_EOTF) {
writel(0, priv->io_base + _REG(addr_port));
- for (i = 0; i < 20; i++)
+ for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
priv->io_base + _REG(data_port));
writel(r_map[OSD_EOTF_LUT_SIZE - 1] | (g_map[0] << 16),
priv->io_base + _REG(data_port));
- for (i = 0; i < 20; i++)
+ for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
priv->io_base + _REG(data_port));
- for (i = 0; i < 20; i++)
+ for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
priv->io_base + _REG(data_port));
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 843a9d40c05e..cf549f1ed403 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -2,7 +2,7 @@
config DRM_MSM
tristate "MSM DRM"
depends on DRM
- depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+ depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST)
depends on OF && COMMON_CLK
depends on MMU
select QCOM_MDT_LOADER if ARCH_QCOM
@@ -11,7 +11,7 @@ config DRM_MSM
select DRM_PANEL
select SHMEM
select TMPFS
- select QCOM_SCM
+ select QCOM_SCM if ARCH_QCOM
select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 19ab521d4c3a..56a70c74af4e 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -6,6 +6,7 @@ ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
msm-y := \
adreno/adreno_device.o \
adreno/adreno_gpu.o \
+ adreno/a2xx_gpu.o \
adreno/a3xx_gpu.o \
adreno/a4xx_gpu.o \
adreno/a5xx_gpu.o \
@@ -14,6 +15,7 @@ msm-y := \
adreno/a6xx_gpu.o \
adreno/a6xx_gmu.o \
adreno/a6xx_hfi.o \
+ adreno/a6xx_gpu_state.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -68,11 +70,9 @@ msm-y := \
disp/dpu1/dpu_hw_util.o \
disp/dpu1/dpu_hw_vbif.o \
disp/dpu1/dpu_io_util.o \
- disp/dpu1/dpu_irq.o \
disp/dpu1/dpu_kms.o \
disp/dpu1/dpu_mdss.o \
disp/dpu1/dpu_plane.o \
- disp/dpu1/dpu_power_handle.o \
disp/dpu1/dpu_rm.o \
disp/dpu1/dpu_vbif.o \
msm_atomic.o \
@@ -90,10 +90,11 @@ msm-y := \
msm_perf.o \
msm_rd.o \
msm_ringbuffer.o \
- msm_submitqueue.o
+ msm_submitqueue.o \
+ msm_gpu_tracepoints.o \
+ msm_gpummu.o
-msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
- disp/dpu1/dpu_dbg.o
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 12b0ba270b5e..14eb52f3e605 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
@@ -239,7 +239,63 @@ enum sq_tex_swiz {
enum sq_tex_filter {
SQ_TEX_FILTER_POINT = 0,
SQ_TEX_FILTER_BILINEAR = 1,
- SQ_TEX_FILTER_BICUBIC = 2,
+ SQ_TEX_FILTER_BASEMAP = 2,
+ SQ_TEX_FILTER_USE_FETCH_CONST = 3,
+};
+
+enum sq_tex_aniso_filter {
+ SQ_TEX_ANISO_FILTER_DISABLED = 0,
+ SQ_TEX_ANISO_FILTER_MAX_1_1 = 1,
+ SQ_TEX_ANISO_FILTER_MAX_2_1 = 2,
+ SQ_TEX_ANISO_FILTER_MAX_4_1 = 3,
+ SQ_TEX_ANISO_FILTER_MAX_8_1 = 4,
+ SQ_TEX_ANISO_FILTER_MAX_16_1 = 5,
+ SQ_TEX_ANISO_FILTER_USE_FETCH_CONST = 7,
+};
+
+enum sq_tex_dimension {
+ SQ_TEX_DIMENSION_1D = 0,
+ SQ_TEX_DIMENSION_2D = 1,
+ SQ_TEX_DIMENSION_3D = 2,
+ SQ_TEX_DIMENSION_CUBE = 3,
+};
+
+enum sq_tex_border_color {
+ SQ_TEX_BORDER_COLOR_BLACK = 0,
+ SQ_TEX_BORDER_COLOR_WHITE = 1,
+ SQ_TEX_BORDER_COLOR_ACBYCR_BLACK = 2,
+ SQ_TEX_BORDER_COLOR_ACBCRY_BLACK = 3,
+};
+
+enum sq_tex_sign {
+ SQ_TEX_SIGN_UNISIGNED = 0,
+ SQ_TEX_SIGN_SIGNED = 1,
+ SQ_TEX_SIGN_UNISIGNED_BIASED = 2,
+ SQ_TEX_SIGN_GAMMA = 3,
+};
+
+enum sq_tex_endian {
+ SQ_TEX_ENDIAN_NONE = 0,
+ SQ_TEX_ENDIAN_8IN16 = 1,
+ SQ_TEX_ENDIAN_8IN32 = 2,
+ SQ_TEX_ENDIAN_16IN32 = 3,
+};
+
+enum sq_tex_clamp_policy {
+ SQ_TEX_CLAMP_POLICY_D3D = 0,
+ SQ_TEX_CLAMP_POLICY_OGL = 1,
+};
+
+enum sq_tex_num_format {
+ SQ_TEX_NUM_FORMAT_FRAC = 0,
+ SQ_TEX_NUM_FORMAT_INT = 1,
+};
+
+enum sq_tex_type {
+ SQ_TEX_TYPE_0 = 0,
+ SQ_TEX_TYPE_1 = 1,
+ SQ_TEX_TYPE_2 = 2,
+ SQ_TEX_TYPE_3 = 3,
};
#define REG_A2XX_RBBM_PATCH_RELEASE 0x00000001
@@ -323,6 +379,18 @@ static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_cln
}
#define REG_A2XX_MH_MMU_VA_RANGE 0x00000041
+#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK 0x00000fff
+#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT 0
+static inline uint32_t A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(uint32_t val)
+{
+ return ((val) << A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT) & A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK;
+}
+#define A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK 0xfffff000
+#define A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT 12
+static inline uint32_t A2XX_MH_MMU_VA_RANGE_VA_BASE(uint32_t val)
+{
+ return ((val) << A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT) & A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK;
+}
#define REG_A2XX_MH_MMU_PT_BASE 0x00000042
@@ -331,6 +399,8 @@ static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_cln
#define REG_A2XX_MH_MMU_TRAN_ERROR 0x00000044
#define REG_A2XX_MH_MMU_INVALIDATE 0x00000045
+#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL 0x00000001
+#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC 0x00000002
#define REG_A2XX_MH_MMU_MPU_BASE 0x00000046
@@ -389,12 +459,19 @@ static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_cln
#define REG_A2XX_RBBM_READ_ERROR 0x000003b3
#define REG_A2XX_RBBM_INT_CNTL 0x000003b4
+#define A2XX_RBBM_INT_CNTL_RDERR_INT_MASK 0x00000001
+#define A2XX_RBBM_INT_CNTL_DISPLAY_UPDATE_INT_MASK 0x00000002
+#define A2XX_RBBM_INT_CNTL_GUI_IDLE_INT_MASK 0x00080000
#define REG_A2XX_RBBM_INT_STATUS 0x000003b5
#define REG_A2XX_RBBM_INT_ACK 0x000003b6
#define REG_A2XX_MASTER_INT_SIGNAL 0x000003b7
+#define A2XX_MASTER_INT_SIGNAL_MH_INT_STAT 0x00000020
+#define A2XX_MASTER_INT_SIGNAL_SQ_INT_STAT 0x04000000
+#define A2XX_MASTER_INT_SIGNAL_CP_INT_STAT 0x40000000
+#define A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT 0x80000000
#define REG_A2XX_RBBM_PERIPHID1 0x000003f9
@@ -467,6 +544,19 @@ static inline uint32_t A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(uint32_t val)
#define A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE 0x02000000
#define A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE 0x04000000
+#define REG_A2XX_MH_INTERRUPT_MASK 0x00000a42
+#define A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR 0x00000001
+#define A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR 0x00000002
+#define A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT 0x00000004
+
+#define REG_A2XX_MH_INTERRUPT_STATUS 0x00000a43
+
+#define REG_A2XX_MH_INTERRUPT_CLEAR 0x00000a44
+
+#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1 0x00000a54
+
+#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG2 0x00000a55
+
#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01
#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
@@ -648,6 +738,18 @@ static inline uint32_t A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT(uint32_t val
#define REG_A2XX_RB_DEBUG_DATA 0x00000f27
#define REG_A2XX_RB_SURFACE_INFO 0x00002000
+#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK 0x00003fff
+#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT 0
+static inline uint32_t A2XX_RB_SURFACE_INFO_SURFACE_PITCH(uint32_t val)
+{
+ return ((val) << A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT) & A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK;
+}
+#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK 0x0000c000
+#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT 14
+static inline uint32_t A2XX_RB_SURFACE_INFO_MSAA_SAMPLES(uint32_t val)
+{
+ return ((val) << A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT) & A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK;
+}
#define REG_A2XX_RB_COLOR_INFO 0x00002001
#define A2XX_RB_COLOR_INFO_FORMAT__MASK 0x0000000f
@@ -679,7 +781,7 @@ static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val)
#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12
static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val)
{
- return ((val >> 10) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
+ return ((val >> 12) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
}
#define REG_A2XX_RB_DEPTH_INFO 0x00002002
@@ -693,7 +795,7 @@ static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_form
#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
{
- return ((val >> 10) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+ return ((val >> 12) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
}
#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005
@@ -1757,6 +1859,36 @@ static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
#define REG_A2XX_COHER_STATUS_PM4 0x00000a2b
#define REG_A2XX_SQ_TEX_0 0x00000000
+#define A2XX_SQ_TEX_0_TYPE__MASK 0x00000003
+#define A2XX_SQ_TEX_0_TYPE__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_0_TYPE(enum sq_tex_type val)
+{
+ return ((val) << A2XX_SQ_TEX_0_TYPE__SHIFT) & A2XX_SQ_TEX_0_TYPE__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_X__MASK 0x0000000c
+#define A2XX_SQ_TEX_0_SIGN_X__SHIFT 2
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_X(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_X__SHIFT) & A2XX_SQ_TEX_0_SIGN_X__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_Y__MASK 0x00000030
+#define A2XX_SQ_TEX_0_SIGN_Y__SHIFT 4
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_Y(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_Y__SHIFT) & A2XX_SQ_TEX_0_SIGN_Y__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_Z__MASK 0x000000c0
+#define A2XX_SQ_TEX_0_SIGN_Z__SHIFT 6
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_Z(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_Z__SHIFT) & A2XX_SQ_TEX_0_SIGN_Z__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_W__MASK 0x00000300
+#define A2XX_SQ_TEX_0_SIGN_W__SHIFT 8
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_W(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_W__SHIFT) & A2XX_SQ_TEX_0_SIGN_W__MASK;
+}
#define A2XX_SQ_TEX_0_CLAMP_X__MASK 0x00001c00
#define A2XX_SQ_TEX_0_CLAMP_X__SHIFT 10
static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val)
@@ -1775,14 +1907,46 @@ static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val)
{
return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK;
}
-#define A2XX_SQ_TEX_0_PITCH__MASK 0xffc00000
+#define A2XX_SQ_TEX_0_PITCH__MASK 0x7fc00000
#define A2XX_SQ_TEX_0_PITCH__SHIFT 22
static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
{
return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
}
+#define A2XX_SQ_TEX_0_TILED 0x00000002
#define REG_A2XX_SQ_TEX_1 0x00000001
+#define A2XX_SQ_TEX_1_FORMAT__MASK 0x0000003f
+#define A2XX_SQ_TEX_1_FORMAT__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_1_FORMAT(enum a2xx_sq_surfaceformat val)
+{
+ return ((val) << A2XX_SQ_TEX_1_FORMAT__SHIFT) & A2XX_SQ_TEX_1_FORMAT__MASK;
+}
+#define A2XX_SQ_TEX_1_ENDIANNESS__MASK 0x000000c0
+#define A2XX_SQ_TEX_1_ENDIANNESS__SHIFT 6
+static inline uint32_t A2XX_SQ_TEX_1_ENDIANNESS(enum sq_tex_endian val)
+{
+ return ((val) << A2XX_SQ_TEX_1_ENDIANNESS__SHIFT) & A2XX_SQ_TEX_1_ENDIANNESS__MASK;
+}
+#define A2XX_SQ_TEX_1_REQUEST_SIZE__MASK 0x00000300
+#define A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT 8
+static inline uint32_t A2XX_SQ_TEX_1_REQUEST_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT) & A2XX_SQ_TEX_1_REQUEST_SIZE__MASK;
+}
+#define A2XX_SQ_TEX_1_STACKED 0x00000400
+#define A2XX_SQ_TEX_1_CLAMP_POLICY__MASK 0x00000800
+#define A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT 11
+static inline uint32_t A2XX_SQ_TEX_1_CLAMP_POLICY(enum sq_tex_clamp_policy val)
+{
+ return ((val) << A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT) & A2XX_SQ_TEX_1_CLAMP_POLICY__MASK;
+}
+#define A2XX_SQ_TEX_1_BASE_ADDRESS__MASK 0xfffff000
+#define A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT 12
+static inline uint32_t A2XX_SQ_TEX_1_BASE_ADDRESS(uint32_t val)
+{
+ return ((val >> 12) << A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT) & A2XX_SQ_TEX_1_BASE_ADDRESS__MASK;
+}
#define REG_A2XX_SQ_TEX_2 0x00000002
#define A2XX_SQ_TEX_2_WIDTH__MASK 0x00001fff
@@ -1797,8 +1961,20 @@ static inline uint32_t A2XX_SQ_TEX_2_HEIGHT(uint32_t val)
{
return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK;
}
+#define A2XX_SQ_TEX_2_DEPTH__MASK 0xfc000000
+#define A2XX_SQ_TEX_2_DEPTH__SHIFT 26
+static inline uint32_t A2XX_SQ_TEX_2_DEPTH(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_2_DEPTH__SHIFT) & A2XX_SQ_TEX_2_DEPTH__MASK;
+}
#define REG_A2XX_SQ_TEX_3 0x00000003
+#define A2XX_SQ_TEX_3_NUM_FORMAT__MASK 0x00000001
+#define A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_3_NUM_FORMAT(enum sq_tex_num_format val)
+{
+ return ((val) << A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT) & A2XX_SQ_TEX_3_NUM_FORMAT__MASK;
+}
#define A2XX_SQ_TEX_3_SWIZ_X__MASK 0x0000000e
#define A2XX_SQ_TEX_3_SWIZ_X__SHIFT 1
static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val)
@@ -1823,6 +1999,12 @@ static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val)
{
return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK;
}
+#define A2XX_SQ_TEX_3_EXP_ADJUST__MASK 0x0007e000
+#define A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT 13
+static inline uint32_t A2XX_SQ_TEX_3_EXP_ADJUST(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT) & A2XX_SQ_TEX_3_EXP_ADJUST__MASK;
+}
#define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK 0x00180000
#define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT 19
static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val)
@@ -1835,6 +2017,104 @@ static inline uint32_t A2XX_SQ_TEX_3_XY_MIN_FILTER(enum sq_tex_filter val)
{
return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK;
}
+#define A2XX_SQ_TEX_3_MIP_FILTER__MASK 0x01800000
+#define A2XX_SQ_TEX_3_MIP_FILTER__SHIFT 23
+static inline uint32_t A2XX_SQ_TEX_3_MIP_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_MIP_FILTER__SHIFT) & A2XX_SQ_TEX_3_MIP_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_ANISO_FILTER__MASK 0x0e000000
+#define A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT 25
+static inline uint32_t A2XX_SQ_TEX_3_ANISO_FILTER(enum sq_tex_aniso_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT) & A2XX_SQ_TEX_3_ANISO_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_BORDER_SIZE__MASK 0x80000000
+#define A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT 31
+static inline uint32_t A2XX_SQ_TEX_3_BORDER_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT) & A2XX_SQ_TEX_3_BORDER_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_4 0x00000004
+#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK 0x00000001
+#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_4_VOL_MAG_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK 0x00000002
+#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT 1
+static inline uint32_t A2XX_SQ_TEX_4_VOL_MIN_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK 0x0000003c
+#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT 2
+static inline uint32_t A2XX_SQ_TEX_4_MIP_MIN_LEVEL(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK;
+}
+#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK 0x000003c0
+#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT 6
+static inline uint32_t A2XX_SQ_TEX_4_MIP_MAX_LEVEL(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK;
+}
+#define A2XX_SQ_TEX_4_MAX_ANISO_WALK 0x00000400
+#define A2XX_SQ_TEX_4_MIN_ANISO_WALK 0x00000800
+#define A2XX_SQ_TEX_4_LOD_BIAS__MASK 0x003ff000
+#define A2XX_SQ_TEX_4_LOD_BIAS__SHIFT 12
+static inline uint32_t A2XX_SQ_TEX_4_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 32.0))) << A2XX_SQ_TEX_4_LOD_BIAS__SHIFT) & A2XX_SQ_TEX_4_LOD_BIAS__MASK;
+}
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK 0x07c00000
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT 22
+static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK;
+}
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK 0xf8000000
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT 27
+static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_5 0x00000005
+#define A2XX_SQ_TEX_5_BORDER_COLOR__MASK 0x00000003
+#define A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_5_BORDER_COLOR(enum sq_tex_border_color val)
+{
+ return ((val) << A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT) & A2XX_SQ_TEX_5_BORDER_COLOR__MASK;
+}
+#define A2XX_SQ_TEX_5_FORCE_BCW_MAX 0x00000004
+#define A2XX_SQ_TEX_5_TRI_CLAMP__MASK 0x00000018
+#define A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT 3
+static inline uint32_t A2XX_SQ_TEX_5_TRI_CLAMP(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT) & A2XX_SQ_TEX_5_TRI_CLAMP__MASK;
+}
+#define A2XX_SQ_TEX_5_ANISO_BIAS__MASK 0x000001e0
+#define A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT 5
+static inline uint32_t A2XX_SQ_TEX_5_ANISO_BIAS(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT) & A2XX_SQ_TEX_5_ANISO_BIAS__MASK;
+}
+#define A2XX_SQ_TEX_5_DIMENSION__MASK 0x00000600
+#define A2XX_SQ_TEX_5_DIMENSION__SHIFT 9
+static inline uint32_t A2XX_SQ_TEX_5_DIMENSION(enum sq_tex_dimension val)
+{
+ return ((val) << A2XX_SQ_TEX_5_DIMENSION__SHIFT) & A2XX_SQ_TEX_5_DIMENSION__MASK;
+}
+#define A2XX_SQ_TEX_5_PACKED_MIPS 0x00000800
+#define A2XX_SQ_TEX_5_MIP_ADDRESS__MASK 0xfffff000
+#define A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT 12
+static inline uint32_t A2XX_SQ_TEX_5_MIP_ADDRESS(uint32_t val)
+{
+ return ((val >> 12) << A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT) & A2XX_SQ_TEX_5_MIP_ADDRESS__MASK;
+}
#endif /* A2XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
new file mode 100644
index 000000000000..1f83bc18d500
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include "a2xx_gpu.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+extern bool hang_debug;
+
+static void a2xx_dump(struct msm_gpu *gpu);
+static bool a2xx_idle(struct msm_gpu *gpu);
+
+static bool a2xx_me_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT3(ring, CP_ME_INIT, 18);
+
+ /* All fields present (bits 9:0) */
+ OUT_RING(ring, 0x000003ff);
+ /* Disable/Enable Real-Time Stream processing (present but ignored) */
+ OUT_RING(ring, 0x00000000);
+ /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
+ OUT_RING(ring, 0x00000000);
+
+ OUT_RING(ring, REG_A2XX_RB_SURFACE_INFO - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SC_WINDOW_OFFSET - 0x2000);
+ OUT_RING(ring, REG_A2XX_VGT_MAX_VTX_INDX - 0x2000);
+ OUT_RING(ring, REG_A2XX_SQ_PROGRAM_CNTL - 0x2000);
+ OUT_RING(ring, REG_A2XX_RB_DEPTHCONTROL - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SU_POINT_SIZE - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SC_LINE_CNTL - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE - 0x2000);
+
+ /* Vertex and Pixel Shader Start Addresses in instructions
+ * (3 DWORDS per instruction) */
+ OUT_RING(ring, 0x80000180);
+ /* Maximum Contexts */
+ OUT_RING(ring, 0x00000001);
+ /* Write Confirm Interval and The CP will wait the
+ * wait_interval * 16 clocks between polling */
+ OUT_RING(ring, 0x00000000);
+ /* NQ and External Memory Swap */
+ OUT_RING(ring, 0x00000000);
+ /* protected mode error checking (0x1f2 is REG_AXXX_CP_INT_CNTL) */
+ OUT_RING(ring, 0x200001f2);
+ /* Disable header dumping and Header dump address */
+ OUT_RING(ring, 0x00000000);
+ /* Header dump size */
+ OUT_RING(ring, 0x00000000);
+
+ /* enable protected mode */
+ OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ gpu->funcs->flush(gpu, ring);
+ return a2xx_idle(gpu);
+}
+
+static int a2xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ dma_addr_t pt_base, tran_error;
+ uint32_t *ptr, len;
+ int i, ret;
+
+ msm_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error);
+
+ DBG("%s", gpu->name);
+
+ /* halt ME to avoid ucode upload issues on a20x */
+ gpu_write(gpu, REG_AXXX_CP_ME_CNTL, AXXX_CP_ME_CNTL_HALT);
+
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0xfffffffe);
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0xffffffff);
+
+ /* note: kgsl uses 0x00000001 after first reset on a22x */
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0xffffffff);
+ msleep(30);
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0x00000000);
+
+ if (adreno_is_a225(adreno_gpu))
+ gpu_write(gpu, REG_A2XX_SQ_FLOW_CONTROL, 0x18000000);
+
+ /* note: kgsl uses 0x0000ffff for a20x */
+ gpu_write(gpu, REG_A2XX_RBBM_CNTL, 0x00004442);
+
+ /* MPU: physical range */
+ gpu_write(gpu, REG_A2XX_MH_MMU_MPU_BASE, 0x00000000);
+ gpu_write(gpu, REG_A2XX_MH_MMU_MPU_END, 0xfffff000);
+
+ gpu_write(gpu, REG_A2XX_MH_MMU_CONFIG, A2XX_MH_MMU_CONFIG_MMU_ENABLE |
+ A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(BEH_TRAN_RNG));
+
+ /* same as parameters in adreno_gpu */
+ gpu_write(gpu, REG_A2XX_MH_MMU_VA_RANGE, SZ_16M |
+ A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(0xfff));
+
+ gpu_write(gpu, REG_A2XX_MH_MMU_PT_BASE, pt_base);
+ gpu_write(gpu, REG_A2XX_MH_MMU_TRAN_ERROR, tran_error);
+
+ gpu_write(gpu, REG_A2XX_MH_MMU_INVALIDATE,
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+
+ gpu_write(gpu, REG_A2XX_MH_ARBITER_CONFIG,
+ A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(16) |
+ A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(1) |
+ A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(8) |
+ A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE);
+ if (!adreno_is_a20x(adreno_gpu))
+ gpu_write(gpu, REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1, 0x00032f07);
+
+ gpu_write(gpu, REG_A2XX_SQ_VS_PROGRAM, 0x00000000);
+ gpu_write(gpu, REG_A2XX_SQ_PS_PROGRAM, 0x00000000);
+
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0); /* 0x200 for msm8960? */
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0); /* 0x80/0x1a0 for a22x? */
+
+ /* note: gsl doesn't set this */
+ gpu_write(gpu, REG_A2XX_RBBM_DEBUG, 0x00080000);
+
+ gpu_write(gpu, REG_A2XX_RBBM_INT_CNTL,
+ A2XX_RBBM_INT_CNTL_RDERR_INT_MASK);
+ gpu_write(gpu, REG_AXXX_CP_INT_CNTL,
+ AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK |
+ AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK |
+ AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK |
+ AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK |
+ AXXX_CP_INT_CNTL_IB_ERROR_MASK |
+ AXXX_CP_INT_CNTL_IB1_INT_MASK |
+ AXXX_CP_INT_CNTL_RB_INT_MASK);
+ gpu_write(gpu, REG_A2XX_SQ_INT_CNTL, 0);
+ gpu_write(gpu, REG_A2XX_MH_INTERRUPT_MASK,
+ A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR |
+ A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR |
+ A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT);
+
+ for (i = 3; i <= 5; i++)
+ if ((SZ_16K << i) == adreno_gpu->gmem)
+ break;
+ gpu_write(gpu, REG_A2XX_RB_EDRAM_INFO, i);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ /* NOTE: PM4/micro-engine firmware registers look to be the same
+ * for a2xx and a3xx.. we could possibly push that part down to
+ * adreno_gpu base class. Or push both PM4 and PFP but
+ * parameterize the pfp ucode addr/data registers..
+ */
+
+ /* Load PM4: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
+ DBG("loading PM4 ucode version: %x", ptr[1]);
+
+ gpu_write(gpu, REG_AXXX_CP_DEBUG,
+ AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
+
+ /* Load PFP: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
+ DBG("loading PFP ucode version: %x", ptr[5]);
+
+ gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_ADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_DATA, ptr[i]);
+
+ gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x000C0804);
+
+ /* clear ME_HALT to start micro engine */
+ gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
+
+ return a2xx_me_init(gpu) ? 0 : -EINVAL;
+}
+
+static void a2xx_recover(struct msm_gpu *gpu)
+{
+ int i;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
+ }
+
+ /* dump registers before resetting gpu, if enabled: */
+ if (hang_debug)
+ a2xx_dump(gpu);
+
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 1);
+ gpu_read(gpu, REG_A2XX_RBBM_SOFT_RESET);
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0);
+ adreno_recover(gpu);
+}
+
+static void a2xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ adreno_gpu_cleanup(adreno_gpu);
+
+ kfree(a2xx_gpu);
+}
+
+static bool a2xx_idle(struct msm_gpu *gpu)
+{
+ /* wait for ringbuffer to drain: */
+ if (!adreno_idle(gpu, gpu->rb[0]))
+ return false;
+
+ /* then wait for GPU to finish: */
+ if (spin_until(!(gpu_read(gpu, REG_A2XX_RBBM_STATUS) &
+ A2XX_RBBM_STATUS_GUI_ACTIVE))) {
+ DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
+
+ return true;
+}
+
+static irqreturn_t a2xx_irq(struct msm_gpu *gpu)
+{
+ uint32_t mstatus, status;
+
+ mstatus = gpu_read(gpu, REG_A2XX_MASTER_INT_SIGNAL);
+
+ if (mstatus & A2XX_MASTER_INT_SIGNAL_MH_INT_STAT) {
+ status = gpu_read(gpu, REG_A2XX_MH_INTERRUPT_STATUS);
+
+ dev_warn(gpu->dev->dev, "MH_INT: %08X\n", status);
+ dev_warn(gpu->dev->dev, "MMU_PAGE_FAULT: %08X\n",
+ gpu_read(gpu, REG_A2XX_MH_MMU_PAGE_FAULT));
+
+ gpu_write(gpu, REG_A2XX_MH_INTERRUPT_CLEAR, status);
+ }
+
+ if (mstatus & A2XX_MASTER_INT_SIGNAL_CP_INT_STAT) {
+ status = gpu_read(gpu, REG_AXXX_CP_INT_STATUS);
+
+ /* only RB_INT is expected */
+ if (status & ~AXXX_CP_INT_CNTL_RB_INT_MASK)
+ dev_warn(gpu->dev->dev, "CP_INT: %08X\n", status);
+
+ gpu_write(gpu, REG_AXXX_CP_INT_ACK, status);
+ }
+
+ if (mstatus & A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT) {
+ status = gpu_read(gpu, REG_A2XX_RBBM_INT_STATUS);
+
+ dev_warn(gpu->dev->dev, "RBBM_INT: %08X\n", status);
+
+ gpu_write(gpu, REG_A2XX_RBBM_INT_ACK, status);
+ }
+
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const unsigned int a200_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+ 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+ 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+ 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+ 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+ 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+ 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+ 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A43, 0x0A45, 0x0A45,
+ 0x0A4E, 0x0A4F, 0x0C2C, 0x0C2C, 0x0C30, 0x0C30, 0x0C38, 0x0C3C,
+ 0x0C40, 0x0C40, 0x0C44, 0x0C44, 0x0C80, 0x0C86, 0x0C88, 0x0C94,
+ 0x0C99, 0x0C9A, 0x0CA4, 0x0CA5, 0x0D00, 0x0D03, 0x0D06, 0x0D06,
+ 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+ 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+ 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+ 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x0F0C, 0x0F0C, 0x0F0E, 0x0F12,
+ 0x0F26, 0x0F2A, 0x0F2C, 0x0F2C, 0x2000, 0x2002, 0x2006, 0x200F,
+ 0x2080, 0x2082, 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184,
+ 0x21F5, 0x21F7, 0x2200, 0x2208, 0x2280, 0x2283, 0x2293, 0x2294,
+ 0x2300, 0x2308, 0x2312, 0x2312, 0x2316, 0x231D, 0x2324, 0x2326,
+ 0x2380, 0x2383, 0x2400, 0x2402, 0x2406, 0x240F, 0x2480, 0x2482,
+ 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7,
+ 0x2600, 0x2608, 0x2680, 0x2683, 0x2693, 0x2694, 0x2700, 0x2708,
+ 0x2712, 0x2712, 0x2716, 0x271D, 0x2724, 0x2726, 0x2780, 0x2783,
+ 0x4000, 0x4003, 0x4800, 0x4805, 0x4900, 0x4900, 0x4908, 0x4908,
+ ~0 /* sentinel */
+};
+
+static const unsigned int a220_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+ 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+ 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+ 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+ 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+ 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+ 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+ 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A40, 0x0A42, 0x0A43,
+ 0x0A45, 0x0A45, 0x0A4E, 0x0A4F, 0x0C30, 0x0C30, 0x0C38, 0x0C39,
+ 0x0C3C, 0x0C3C, 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03,
+ 0x0D05, 0x0D06, 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1,
+ 0x0DC8, 0x0DD4, 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04,
+ 0x0E17, 0x0E1E, 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0,
+ 0x0ED4, 0x0ED7, 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x2002,
+ 0x2006, 0x200F, 0x2080, 0x2082, 0x2100, 0x2102, 0x2104, 0x2109,
+ 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7, 0x2200, 0x2202,
+ 0x2204, 0x2204, 0x2208, 0x2208, 0x2280, 0x2282, 0x2294, 0x2294,
+ 0x2300, 0x2308, 0x2309, 0x230A, 0x2312, 0x2312, 0x2316, 0x2316,
+ 0x2318, 0x231D, 0x2324, 0x2326, 0x2380, 0x2383, 0x2400, 0x2402,
+ 0x2406, 0x240F, 0x2480, 0x2482, 0x2500, 0x2502, 0x2504, 0x2509,
+ 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7, 0x2600, 0x2602,
+ 0x2604, 0x2606, 0x2608, 0x2608, 0x2680, 0x2682, 0x2694, 0x2694,
+ 0x2700, 0x2708, 0x2712, 0x2712, 0x2716, 0x2716, 0x2718, 0x271D,
+ 0x2724, 0x2726, 0x2780, 0x2783, 0x4000, 0x4003, 0x4800, 0x4805,
+ 0x4900, 0x4900, 0x4908, 0x4908,
+ ~0 /* sentinel */
+};
+
+static const unsigned int a225_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x013C, 0x013C, 0x0140, 0x014F, 0x01C0, 0x01C1,
+ 0x01C3, 0x01C8, 0x01D5, 0x01D9, 0x01DC, 0x01DD, 0x01EA, 0x01EA,
+ 0x01EE, 0x01F3, 0x01F6, 0x01F7, 0x01FC, 0x01FF, 0x0391, 0x0392,
+ 0x039B, 0x039E, 0x03B2, 0x03B5, 0x03B7, 0x03B7, 0x03F8, 0x03FB,
+ 0x0440, 0x0440, 0x0443, 0x0444, 0x044B, 0x044B, 0x044D, 0x044F,
+ 0x0452, 0x0452, 0x0454, 0x045B, 0x047F, 0x047F, 0x0578, 0x0587,
+ 0x05C9, 0x05C9, 0x05D0, 0x05D0, 0x0601, 0x0604, 0x0606, 0x0609,
+ 0x060B, 0x060E, 0x0613, 0x0614, 0x0A29, 0x0A2B, 0x0A2F, 0x0A31,
+ 0x0A40, 0x0A40, 0x0A42, 0x0A43, 0x0A45, 0x0A45, 0x0A4E, 0x0A4F,
+ 0x0C01, 0x0C1D, 0x0C30, 0x0C30, 0x0C38, 0x0C39, 0x0C3C, 0x0C3C,
+ 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03, 0x0D05, 0x0D06,
+ 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+ 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+ 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+ 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x200F, 0x2080, 0x2082,
+ 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7,
+ 0x2200, 0x2202, 0x2204, 0x2206, 0x2208, 0x2210, 0x2220, 0x2222,
+ 0x2280, 0x2282, 0x2294, 0x2294, 0x2297, 0x2297, 0x2300, 0x230A,
+ 0x2312, 0x2312, 0x2315, 0x2316, 0x2318, 0x231D, 0x2324, 0x2326,
+ 0x2340, 0x2357, 0x2360, 0x2360, 0x2380, 0x2383, 0x2400, 0x240F,
+ 0x2480, 0x2482, 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584,
+ 0x25F5, 0x25F7, 0x2600, 0x2602, 0x2604, 0x2606, 0x2608, 0x2610,
+ 0x2620, 0x2622, 0x2680, 0x2682, 0x2694, 0x2694, 0x2697, 0x2697,
+ 0x2700, 0x270A, 0x2712, 0x2712, 0x2715, 0x2716, 0x2718, 0x271D,
+ 0x2724, 0x2726, 0x2740, 0x2757, 0x2760, 0x2760, 0x2780, 0x2783,
+ 0x4000, 0x4003, 0x4800, 0x4806, 0x4808, 0x4808, 0x4900, 0x4900,
+ 0x4908, 0x4908,
+ ~0 /* sentinel */
+};
+
+/* would be nice to not have to duplicate the _show() stuff with printk(): */
+static void a2xx_dump(struct msm_gpu *gpu)
+{
+ printk("status: %08x\n",
+ gpu_read(gpu, REG_A2XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+
+ state->rbbm_status = gpu_read(gpu, REG_A2XX_RBBM_STATUS);
+
+ return state;
+}
+
+/* Register offset defines for A2XX - copy of A3XX */
+static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
+};
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a2xx_hw_init,
+ .pm_suspend = msm_gpu_pm_suspend,
+ .pm_resume = msm_gpu_pm_resume,
+ .recover = a2xx_recover,
+ .submit = adreno_submit,
+ .flush = adreno_flush,
+ .active_ring = adreno_active_ring,
+ .irq = a2xx_irq,
+ .destroy = a2xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
+#endif
+ .gpu_state_get = a2xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
+ },
+};
+
+static const struct msm_gpu_perfcntr perfcntrs[] = {
+/* TODO */
+};
+
+struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
+{
+ struct a2xx_gpu *a2xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "no a2xx device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ a2xx_gpu = kzalloc(sizeof(*a2xx_gpu), GFP_KERNEL);
+ if (!a2xx_gpu) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ adreno_gpu = &a2xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ gpu->perfcntrs = perfcntrs;
+ gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
+
+ if (adreno_is_a20x(adreno_gpu))
+ adreno_gpu->registers = a200_registers;
+ else if (adreno_is_a225(adreno_gpu))
+ adreno_gpu->registers = a225_registers;
+ else
+ adreno_gpu->registers = a220_registers;
+
+ adreno_gpu->reg_offsets = a2xx_register_offsets;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ if (ret)
+ goto fail;
+
+ if (!gpu->aspace) {
+ dev_err(dev->dev, "No memory protection without MMU\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ return gpu;
+
+fail:
+ if (a2xx_gpu)
+ a2xx_destroy(&a2xx_gpu->base.base);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
new file mode 100644
index 000000000000..02fba2cb8932
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#ifndef __A2XX_GPU_H__
+#define __A2XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a2xx.xml.h"
+
+struct a2xx_gpu {
+ struct adreno_gpu base;
+ bool pm_enabled;
+};
+#define to_a2xx_gpu(x) container_of(x, struct a2xx_gpu, base)
+
+#endif /* __A2XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index a89f7bb8b5cc..17059f242a98 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 669c2d4b070d..c3b4bc6e4155 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -481,7 +481,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
int ret;
if (!pdev) {
- dev_err(dev->dev, "no a3xx device\n");
+ DRM_DEV_ERROR(dev->dev, "no a3xx device\n");
ret = -ENXIO;
goto fail;
}
@@ -528,7 +528,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
* to not be possible to restrict access, then we must
* implement a cmdstream validator.
*/
- dev_err(dev->dev, "No memory protection without IOMMU\n");
+ DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
ret = -ENXIO;
goto fail;
}
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 858690f52854..9b51e25a9583 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 7c4e6dc1ed59..18f9a8e0bf3b 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -561,7 +561,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
int ret;
if (!pdev) {
- dev_err(dev->dev, "no a4xx device\n");
+ DRM_DEV_ERROR(dev->dev, "no a4xx device\n");
ret = -ENXIO;
goto fail;
}
@@ -608,7 +608,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
* to not be possible to restrict access, then we must
* implement a cmdstream validator.
*/
- dev_err(dev->dev, "No memory protection without IOMMU\n");
+ DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
ret = -ENXIO;
goto fail;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
index b4944cc0e62f..cf4fe14ddd6e 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index d2127b1c4ece..d9af3aff690f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -130,15 +130,13 @@ reset_set(void *data, u64 val)
adreno_gpu->fw[ADRENO_FW_PFP] = NULL;
if (a5xx_gpu->pm4_bo) {
- if (a5xx_gpu->pm4_iova)
- msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
}
if (a5xx_gpu->pfp_bo) {
- if (a5xx_gpu->pfp_iova)
- msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
}
@@ -173,7 +171,7 @@ int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
minor->debugfs_root, minor);
if (ret) {
- dev_err(dev->dev, "could not install a5xx_debugfs_list\n");
+ DRM_DEV_ERROR(dev->dev, "could not install a5xx_debugfs_list\n");
return ret;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 8edd80bb0428..d5f5e56422f5 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -20,7 +20,6 @@
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/pm_opp.h>
#include <linux/nvmem-consumer.h>
-#include <linux/iopoll.h>
#include <linux/slab.h>
#include "msm_gem.h"
#include "msm_mmu.h"
@@ -511,13 +510,16 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
+
if (IS_ERR(a5xx_gpu->pm4_bo)) {
ret = PTR_ERR(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
- dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
+ DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PM4: %d\n",
ret);
return ret;
}
+
+ msm_gem_object_set_name(a5xx_gpu->pm4_bo, "pm4fw");
}
if (!a5xx_gpu->pfp_bo) {
@@ -527,10 +529,12 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
if (IS_ERR(a5xx_gpu->pfp_bo)) {
ret = PTR_ERR(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
- dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
+ DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PFP: %d\n",
ret);
return ret;
}
+
+ msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
}
gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
@@ -841,20 +845,17 @@ static void a5xx_destroy(struct msm_gpu *gpu)
a5xx_preempt_fini(gpu);
if (a5xx_gpu->pm4_bo) {
- if (a5xx_gpu->pm4_iova)
- msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->pm4_bo);
}
if (a5xx_gpu->pfp_bo) {
- if (a5xx_gpu->pfp_iova)
- msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->pfp_bo);
}
if (a5xx_gpu->gpmu_bo) {
- if (a5xx_gpu->gpmu_iova)
- msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->gpmu_bo);
}
@@ -1028,7 +1029,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
struct msm_drm_private *priv = dev->dev_private;
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
- dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
ring ? ring->id : -1, ring ? ring->seqno : 0,
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
@@ -1134,7 +1135,7 @@ static const u32 a5xx_registers[] = {
static void a5xx_dump(struct msm_gpu *gpu)
{
- dev_info(gpu->dev->dev, "status: %08x\n",
+ DRM_DEV_INFO(gpu->dev->dev, "status: %08x\n",
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
adreno_dump(gpu);
}
@@ -1211,10 +1212,6 @@ struct a5xx_gpu_state {
u32 *hlsqregs;
};
-#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
- readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
- interval, timeout)
-
static int a5xx_crashdumper_init(struct msm_gpu *gpu,
struct a5xx_crashdumper *dumper)
{
@@ -1222,16 +1219,10 @@ static int a5xx_crashdumper_init(struct msm_gpu *gpu,
SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
&dumper->bo, &dumper->iova);
- return PTR_ERR_OR_ZERO(dumper->ptr);
-}
-
-static void a5xx_crashdumper_free(struct msm_gpu *gpu,
- struct a5xx_crashdumper *dumper)
-{
- msm_gem_put_iova(dumper->bo, gpu->aspace);
- msm_gem_put_vaddr(dumper->bo);
+ if (!IS_ERR(dumper->ptr))
+ msm_gem_object_set_name(dumper->bo, "crashdump");
- drm_gem_object_put(dumper->bo);
+ return PTR_ERR_OR_ZERO(dumper->ptr);
}
static int a5xx_crashdumper_run(struct msm_gpu *gpu,
@@ -1326,7 +1317,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
if (a5xx_crashdumper_run(gpu, &dumper)) {
kfree(a5xx_state->hlsqregs);
- a5xx_crashdumper_free(gpu, &dumper);
+ msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
return;
}
@@ -1334,7 +1325,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
count * sizeof(u32));
- a5xx_crashdumper_free(gpu, &dumper);
+ msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
}
static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
@@ -1505,7 +1496,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
int ret;
if (!pdev) {
- dev_err(dev->dev, "No A5XX device is defined\n");
+ DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n");
return ERR_PTR(-ENXIO);
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 7a41e1c147e4..70e65c94e525 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -298,7 +298,9 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
if (IS_ERR(ptr))
- goto err;
+ return;
+
+ msm_gem_object_set_name(a5xx_gpu->gpmu_bo, "gpmufw");
while (cmds_size > 0) {
int i;
@@ -317,15 +319,4 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_dwords = dwords;
-
- return;
-err:
- if (a5xx_gpu->gpmu_iova)
- msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
- if (a5xx_gpu->gpmu_bo)
- drm_gem_object_put(a5xx_gpu->gpmu_bo);
-
- a5xx_gpu->gpmu_bo = NULL;
- a5xx_gpu->gpmu_iova = 0;
- a5xx_gpu->gpmu_dwords = 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 4c357ead1be6..3d62310a535f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -92,7 +92,7 @@ static void a5xx_preempt_timer(struct timer_list *t)
if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
return;
- dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
+ DRM_DEV_ERROR(dev->dev, "%s: preemption timed out\n", gpu->name);
queue_work(priv->wq, &gpu->recover_work);
}
@@ -188,7 +188,7 @@ void a5xx_preempt_irq(struct msm_gpu *gpu)
status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
if (unlikely(status)) {
set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
- dev_err(dev->dev, "%s: Preemption failed to complete\n",
+ DRM_DEV_ERROR(dev->dev, "%s: Preemption failed to complete\n",
gpu->name);
queue_work(priv->wq, &gpu->recover_work);
return;
@@ -245,6 +245,8 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
if (IS_ERR(ptr))
return PTR_ERR(ptr);
+ msm_gem_object_set_name(bo, "preempt");
+
a5xx_gpu->preempt_bo[ring->id] = bo;
a5xx_gpu->preempt_iova[ring->id] = iova;
a5xx_gpu->preempt[ring->id] = ptr;
@@ -267,18 +269,8 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
- for (i = 0; i < gpu->nr_rings; i++) {
- if (!a5xx_gpu->preempt_bo[i])
- continue;
-
- msm_gem_put_vaddr(a5xx_gpu->preempt_bo[i]);
-
- if (a5xx_gpu->preempt_iova[i])
- msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
-
- drm_gem_object_put(a5xx_gpu->preempt_bo[i]);
- a5xx_gpu->preempt_bo[i] = NULL;
- }
+ for (i = 0; i < gpu->nr_rings; i++)
+ msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace, true);
}
void a5xx_preempt_init(struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index a6f7c40454a6..f44553ec3193 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
@@ -501,7 +501,7 @@ enum a6xx_vfd_perfcounter_select {
PERF_VFDP_VS_STAGE_WAVES = 22,
};
-enum a6xx_hslq_perfcounter_select {
+enum a6xx_hlsq_perfcounter_select {
PERF_HLSQ_BUSY_CYCLES = 0,
PERF_HLSQ_STALL_CYCLES_UCHE = 1,
PERF_HLSQ_STALL_CYCLES_SP_STATE = 2,
@@ -2959,6 +2959,8 @@ static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
#define A6XX_GRAS_LRZ_CNTL_ENABLE 0x00000001
#define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
#define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004
+#define A6XX_GRAS_LRZ_CNTL_UNK3 0x00000008
+#define A6XX_GRAS_LRZ_CNTL_UNK4 0x00000010
#define REG_A6XX_GRAS_UNKNOWN_8101 0x00008101
@@ -2997,6 +2999,13 @@ static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
#define REG_A6XX_GRAS_UNKNOWN_8110 0x00008110
#define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400
+#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00
+#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_SCISSOR 0x00010000
#define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401
#define A6XX_GRAS_2D_SRC_TL_X_X__MASK 0x00ffff00
@@ -3449,6 +3458,7 @@ static inline uint32_t A6XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
return ((val) << A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
}
#define A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A6XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
@@ -3642,6 +3652,9 @@ static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val)
#define REG_A6XX_RB_SAMPLE_COUNT_CONTROL 0x00008891
#define A6XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
+#define REG_A6XX_RB_LRZ_CNTL 0x00008898
+#define A6XX_RB_LRZ_CNTL_ENABLE 0x00000001
+
#define REG_A6XX_RB_UNKNOWN_88D0 0x000088d0
#define REG_A6XX_RB_BLIT_SCISSOR_TL 0x000088d1
@@ -3674,6 +3687,14 @@ static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_Y(uint32_t val)
return ((val) << A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_Y__MASK;
}
+#define REG_A6XX_RB_MSAA_CNTL 0x000088d5
+#define A6XX_RB_MSAA_CNTL_SAMPLES__MASK 0x00000018
+#define A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT 3
+static inline uint32_t A6XX_RB_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_MSAA_CNTL_SAMPLES__MASK;
+}
+
#define REG_A6XX_RB_BLIT_BASE_GMEM 0x000088d6
#define REG_A6XX_RB_BLIT_DST_INFO 0x000088d7
@@ -3684,6 +3705,12 @@ static inline uint32_t A6XX_RB_BLIT_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
return ((val) << A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK;
}
#define A6XX_RB_BLIT_DST_INFO_FLAGS 0x00000004
+#define A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK 0x00000018
+#define A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT 3
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK;
+}
#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK 0x00007f80
#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT 7
static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
@@ -3780,6 +3807,9 @@ static inline uint32_t A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val
{
return ((val) << A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
}
+#define A6XX_RB_2D_BLIT_CNTL_SCISSOR 0x00010000
+
+#define REG_A6XX_RB_UNKNOWN_8C01 0x00008c01
#define REG_A6XX_RB_2D_DST_INFO 0x00008c17
#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
@@ -4465,6 +4495,7 @@ static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
#define REG_A6XX_SP_BLEND_CNTL 0x0000a989
#define A6XX_SP_BLEND_CNTL_ENABLED 0x00000001
#define A6XX_SP_BLEND_CNTL_UNK8 0x00000100
+#define A6XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
#define REG_A6XX_SP_SRGB_CNTL 0x0000a98a
#define A6XX_SP_SRGB_CNTL_SRGB_MRT0 0x00000001
@@ -4643,6 +4674,8 @@ static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
#define REG_A6XX_SP_UNKNOWN_AB20 0x0000ab20
+#define REG_A6XX_SP_UNKNOWN_ACC0 0x0000acc0
+
#define REG_A6XX_SP_UNKNOWN_AE00 0x0000ae00
#define REG_A6XX_SP_UNKNOWN_AE03 0x0000ae03
@@ -4700,11 +4733,34 @@ static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap va
return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK;
}
#define A6XX_SP_PS_2D_SRC_INFO_FLAGS 0x00001000
+#define A6XX_SP_PS_2D_SRC_INFO_FILTER 0x00010000
+
+#define REG_A6XX_SP_PS_2D_SRC_SIZE 0x0000b4c1
+#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK 0x00007fff
+#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK 0x3fff8000
+#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT 15
+static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK;
+}
#define REG_A6XX_SP_PS_2D_SRC_LO 0x0000b4c2
#define REG_A6XX_SP_PS_2D_SRC_HI 0x0000b4c3
+#define REG_A6XX_SP_PS_2D_SRC_PITCH 0x0000b4c4
+#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK 0x01fffe00
+#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT 9
+static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK;
+}
+
#define REG_A6XX_SP_PS_2D_SRC_FLAGS_LO 0x0000b4ca
#define REG_A6XX_SP_PS_2D_SRC_FLAGS_HI 0x0000b4cb
@@ -5033,6 +5089,12 @@ static inline uint32_t A6XX_TEX_CONST_0_MIPLVLS(uint32_t val)
{
return ((val) << A6XX_TEX_CONST_0_MIPLVLS__SHIFT) & A6XX_TEX_CONST_0_MIPLVLS__MASK;
}
+#define A6XX_TEX_CONST_0_SAMPLES__MASK 0x00300000
+#define A6XX_TEX_CONST_0_SAMPLES__SHIFT 20
+static inline uint32_t A6XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SAMPLES__SHIFT) & A6XX_TEX_CONST_0_SAMPLES__MASK;
+}
#define A6XX_TEX_CONST_0_FMT__MASK 0x3fc00000
#define A6XX_TEX_CONST_0_FMT__SHIFT 22
static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_tex_fmt val)
@@ -5365,5 +5427,9 @@ static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000030
+#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0 0x00000001
+
+#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1 0x00000002
+
#endif /* A6XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index d4e98e5876bc..5beb83d1cf87 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -51,10 +51,31 @@ static irqreturn_t a6xx_hfi_irq(int irq, void *data)
return IRQ_HANDLED;
}
+bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
+{
+ u32 val;
+
+ /* This can be called from gpu state code so make sure GMU is valid */
+ if (IS_ERR_OR_NULL(gmu->mmio))
+ return false;
+
+ val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+
+ return !(val &
+ (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
+ A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
+}
+
/* Check to see if the GX rail is still powered */
-static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
+bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
{
- u32 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+ u32 val;
+
+ /* This can be called from gpu state code so make sure GMU is valid */
+ if (IS_ERR_OR_NULL(gmu->mmio))
+ return false;
+
+ val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
return !(val &
(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
@@ -153,7 +174,7 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
val == 0xbabeface, 100, 10000);
if (ret)
- dev_err(gmu->dev, "GMU firmware initialization timed out\n");
+ DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
return ret;
}
@@ -168,7 +189,7 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
val & 1, 100, 10000);
if (ret)
- dev_err(gmu->dev, "Unable to start the HFI queues\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
return ret;
}
@@ -209,7 +230,7 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
val & (1 << ack), 100, 10000);
if (ret)
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"Timeout waiting for GMU OOB set %s: 0x%x\n",
name,
gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
@@ -251,7 +272,7 @@ static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
(val & 0x38) == 0x28, 1, 100);
if (ret) {
- dev_err(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
+ DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
}
@@ -273,7 +294,7 @@ static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
(val & 0x04), 100, 10000);
if (ret)
- dev_err(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
+ DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
}
@@ -317,7 +338,7 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
/* Check to see if the GMU really did slumber */
if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
!= 0x0f) {
- dev_err(gmu->dev, "The GMU did not go into slumber\n");
+ DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
ret = -ETIMEDOUT;
}
}
@@ -339,23 +360,27 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
val & (1 << 1), 100, 10000);
if (ret) {
- dev_err(gmu->dev, "Unable to power on the GPU RSC\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
return ret;
}
ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
!val, 100, 10000);
- if (!ret) {
- gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
-
- /* Re-enable the power counter */
- gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
- return 0;
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
+ return ret;
}
- dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
- return ret;
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+
+ /* Set up CX GMU counter 0 to count busy ticks */
+ gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
+ gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
+
+ /* Enable the power counter */
+ gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
+ return 0;
}
static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
@@ -368,7 +393,7 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
val, val & (1 << 16), 100, 10000);
if (ret)
- dev_err(gmu->dev, "Unable to power off the GPU RSC\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
}
@@ -520,7 +545,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
/* Sanity check the size of the firmware that was loaded */
if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"GMU firmware is bigger than the available region\n");
return -EINVAL;
}
@@ -764,7 +789,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
*/
if (ret)
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"Unable to slumber GMU: status = 0%x/0%x\n",
gmu_read(gmu,
REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
@@ -843,7 +868,7 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
IOMMU_READ | IOMMU_WRITE);
if (ret) {
- dev_err(gmu->dev, "Unable to map GMU buffer object\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
for (i = i - 1 ; i >= 0; i--)
iommu_unmap(gmu->domain,
@@ -902,26 +927,6 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
return ret;
}
-/* Get the list of RPMh voltage levels from cmd-db */
-static int a6xx_gmu_rpmh_arc_cmds(const char *id, void *vals, int size)
-{
- u32 len = cmd_db_read_aux_data_len(id);
-
- if (!len)
- return 0;
-
- if (WARN_ON(len > size))
- return -EINVAL;
-
- cmd_db_read_aux_data(id, vals, len);
-
- /*
- * The data comes back as an array of unsigned shorts so adjust the
- * count accordingly
- */
- return len >> 1;
-}
-
/* Return the 'arc-level' for the given frequency */
static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
{
@@ -949,11 +954,30 @@ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
}
static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
- unsigned long *freqs, int freqs_count,
- u16 *pri, int pri_count,
- u16 *sec, int sec_count)
+ unsigned long *freqs, int freqs_count, const char *id)
{
int i, j;
+ const u16 *pri, *sec;
+ size_t pri_count, sec_count;
+
+ pri = cmd_db_read_aux_data(id, &pri_count);
+ if (IS_ERR(pri))
+ return PTR_ERR(pri);
+ /*
+ * The data comes back as an array of unsigned shorts so adjust the
+ * count accordingly
+ */
+ pri_count >>= 1;
+ if (!pri_count)
+ return -EINVAL;
+
+ sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
+ if (IS_ERR(sec))
+ return PTR_ERR(sec);
+
+ sec_count >>= 1;
+ if (!sec_count)
+ return -EINVAL;
/* Construct a vote for each frequency */
for (i = 0; i < freqs_count; i++) {
@@ -969,12 +993,12 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
}
if (j == pri_count) {
- dev_err(dev,
+ DRM_DEV_ERROR(dev,
"Level %u not found in in the RPMh list\n",
level);
- dev_err(dev, "Available levels:\n");
+ DRM_DEV_ERROR(dev, "Available levels:\n");
for (j = 0; j < pri_count; j++)
- dev_err(dev, " %u\n", pri[j]);
+ DRM_DEV_ERROR(dev, " %u\n", pri[j]);
return -EINVAL;
}
@@ -1012,25 +1036,15 @@ static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
-
- u16 gx[16], cx[16], mx[16];
- u32 gxcount, cxcount, mxcount;
int ret;
- /* Get the list of available voltage levels for each component */
- gxcount = a6xx_gmu_rpmh_arc_cmds("gfx.lvl", gx, sizeof(gx));
- cxcount = a6xx_gmu_rpmh_arc_cmds("cx.lvl", cx, sizeof(cx));
- mxcount = a6xx_gmu_rpmh_arc_cmds("mx.lvl", mx, sizeof(mx));
-
/* Build the GX votes */
ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
- gmu->gpu_freqs, gmu->nr_gpu_freqs,
- gx, gxcount, mx, mxcount);
+ gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
/* Build the CX votes */
ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
- gmu->gmu_freqs, gmu->nr_gmu_freqs,
- cx, cxcount, mx, mxcount);
+ gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
return ret;
}
@@ -1081,7 +1095,7 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
*/
ret = dev_pm_opp_of_add_table(gmu->dev);
if (ret) {
- dev_err(gmu->dev, "Unable to set the OPP table for the GMU\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
return ret;
}
@@ -1122,13 +1136,13 @@ static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
IORESOURCE_MEM, name);
if (!res) {
- dev_err(&pdev->dev, "Unable to find the %s registers\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
return ERR_PTR(-EINVAL);
}
ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!ret) {
- dev_err(&pdev->dev, "Unable to map the %s registers\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
return ERR_PTR(-EINVAL);
}
@@ -1145,7 +1159,7 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
name, gmu);
if (ret) {
- dev_err(&pdev->dev, "Unable to get interrupt %s\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s\n", name);
return ret;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 35f765afae45..c721d9165d8e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -164,4 +164,7 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
+bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
+bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
+
#endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
index db56f263ed77..1cc1c135236b 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 631257c297fd..fefe773c989e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -4,6 +4,7 @@
#include "msm_gem.h"
#include "msm_mmu.h"
+#include "msm_gpu_trace.h"
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
@@ -67,13 +68,36 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
}
+static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
+ u64 iova)
+{
+ OUT_PKT7(ring, CP_REG_TO_MEM, 3);
+ OUT_RING(ring, counter | (1 << 30) | (2 << 18));
+ OUT_RING(ring, lower_32_bits(iova));
+ OUT_RING(ring, upper_32_bits(iova));
+}
+
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
{
+ unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+ rbmemptr_stats(ring, index, cpcycles_start));
+
+ /*
+ * For PM4 the GMU register offsets are calculated from the base of the
+ * GPU registers so we need to add 0x1a800 to the register value on A630
+ * to get the right value from PM4.
+ */
+ get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+ rbmemptr_stats(ring, index, alwayson_start));
+
/* Invalidate CCU depth and color */
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH);
@@ -98,6 +122,11 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
}
}
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+ rbmemptr_stats(ring, index, cpcycles_end));
+ get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+ rbmemptr_stats(ring, index, alwayson_end));
+
/* Write the fence to the scratch register */
OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
OUT_RING(ring, submit->seqno);
@@ -112,6 +141,10 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->seqno);
+ trace_msm_gpu_submit_flush(submit,
+ gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
+ REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
+
a6xx_flush(gpu, ring);
}
@@ -300,6 +333,8 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
return ret;
}
+
+ msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
}
gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
@@ -387,14 +422,6 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
/* Select CP0 to always count cycles */
gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
- /* FIXME: not sure if this should live here or in a6xx_gmu.c */
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK,
- 0xff000000);
- gmu_rmw(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
- 0xff, 0x20);
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE,
- 0x01);
-
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
@@ -481,7 +508,7 @@ out:
static void a6xx_dump(struct msm_gpu *gpu)
{
- dev_info(&gpu->pdev->dev, "status: %08x\n",
+ DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
gpu_read(gpu, REG_A6XX_RBBM_STATUS));
adreno_dump(gpu);
}
@@ -498,7 +525,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
adreno_dump_info(gpu);
for (i = 0; i < 8; i++)
- dev_info(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
+ DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
if (hang_debug)
@@ -645,33 +672,6 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
};
-static const u32 a6xx_registers[] = {
- 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
- 0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
- 0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
- 0x0100, 0x011d, 0x0200, 0x020d, 0x0210, 0x0213, 0x0218, 0x023d,
- 0x0400, 0x04f9, 0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511,
- 0x0533, 0x0533, 0x0540, 0x0555, 0x0800, 0x0808, 0x0810, 0x0813,
- 0x0820, 0x0821, 0x0823, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843,
- 0x084f, 0x086f, 0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4,
- 0x08d0, 0x08dd, 0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911,
- 0x0928, 0x093e, 0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996,
- 0x0998, 0x099e, 0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1,
- 0x09c2, 0x09c8, 0x0a00, 0x0a03, 0x0c00, 0x0c04, 0x0c06, 0x0c06,
- 0x0c10, 0x0cd9, 0x0e00, 0x0e0e, 0x0e10, 0x0e13, 0x0e17, 0x0e19,
- 0x0e1c, 0x0e2b, 0x0e30, 0x0e32, 0x0e38, 0x0e39, 0x8600, 0x8601,
- 0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b, 0x8630, 0x8637,
- 0x8e01, 0x8e01, 0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e0c, 0x8e0c,
- 0x8e10, 0x8e1c, 0x8e20, 0x8e25, 0x8e28, 0x8e28, 0x8e2c, 0x8e2f,
- 0x8e3b, 0x8e3e, 0x8e40, 0x8e43, 0x8e50, 0x8e5e, 0x8e70, 0x8e77,
- 0x9600, 0x9604, 0x9624, 0x9637, 0x9e00, 0x9e01, 0x9e03, 0x9e0e,
- 0x9e11, 0x9e16, 0x9e19, 0x9e19, 0x9e1c, 0x9e1c, 0x9e20, 0x9e23,
- 0x9e30, 0x9e31, 0x9e34, 0x9e34, 0x9e70, 0x9e72, 0x9e78, 0x9e79,
- 0x9e80, 0x9fff, 0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a,
- 0xa610, 0xa617, 0xa630, 0xa630,
- ~0
-};
-
static int a6xx_pm_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -724,14 +724,6 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
return 0;
}
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
-static void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
- struct drm_printer *p)
-{
- adreno_show(gpu, state, p);
-}
-#endif
-
static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -746,8 +738,7 @@ static void a6xx_destroy(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
if (a6xx_gpu->sqe_bo) {
- if (a6xx_gpu->sqe_iova)
- msm_gem_put_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
drm_gem_object_put_unlocked(a6xx_gpu->sqe_bo);
}
@@ -796,6 +787,8 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a6xx_gpu_busy,
.gpu_get_freq = a6xx_gmu_get_freq,
.gpu_set_freq = a6xx_gmu_set_freq,
+ .gpu_state_get = a6xx_gpu_state_get,
+ .gpu_state_put = a6xx_gpu_state_put,
},
.get_timestamp = a6xx_get_timestamp,
};
@@ -817,7 +810,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu = &a6xx_gpu->base;
gpu = &adreno_gpu->base;
- adreno_gpu->registers = a6xx_registers;
+ adreno_gpu->registers = NULL;
adreno_gpu->reg_offsets = a6xx_register_offsets;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 4127dcebc202..528a4cfe07cd 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -56,6 +56,14 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
+
void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq);
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
+
+void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p);
+
+struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu);
+int a6xx_gpu_state_put(struct msm_gpu_state *state);
+
#endif /* __A6XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
new file mode 100644
index 000000000000..e686331fa089
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -0,0 +1,1165 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include <linux/ascii85.h>
+#include "msm_gem.h"
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.h"
+#include "a6xx_gpu_state.h"
+#include "a6xx_gmu.xml.h"
+
+struct a6xx_gpu_state_obj {
+ const void *handle;
+ u32 *data;
+};
+
+struct a6xx_gpu_state {
+ struct msm_gpu_state base;
+
+ struct a6xx_gpu_state_obj *gmu_registers;
+ int nr_gmu_registers;
+
+ struct a6xx_gpu_state_obj *registers;
+ int nr_registers;
+
+ struct a6xx_gpu_state_obj *shaders;
+ int nr_shaders;
+
+ struct a6xx_gpu_state_obj *clusters;
+ int nr_clusters;
+
+ struct a6xx_gpu_state_obj *dbgahb_clusters;
+ int nr_dbgahb_clusters;
+
+ struct a6xx_gpu_state_obj *indexed_regs;
+ int nr_indexed_regs;
+
+ struct a6xx_gpu_state_obj *debugbus;
+ int nr_debugbus;
+
+ struct a6xx_gpu_state_obj *vbif_debugbus;
+
+ struct a6xx_gpu_state_obj *cx_debugbus;
+ int nr_cx_debugbus;
+
+ struct list_head objs;
+};
+
+static inline int CRASHDUMP_WRITE(u64 *in, u32 reg, u32 val)
+{
+ in[0] = val;
+ in[1] = (((u64) reg) << 44 | (1 << 21) | 1);
+
+ return 2;
+}
+
+static inline int CRASHDUMP_READ(u64 *in, u32 reg, u32 dwords, u64 target)
+{
+ in[0] = target;
+ in[1] = (((u64) reg) << 44 | dwords);
+
+ return 2;
+}
+
+static inline int CRASHDUMP_FINI(u64 *in)
+{
+ in[0] = 0;
+ in[1] = 0;
+
+ return 2;
+}
+
+struct a6xx_crashdumper {
+ void *ptr;
+ struct drm_gem_object *bo;
+ u64 iova;
+};
+
+struct a6xx_state_memobj {
+ struct list_head node;
+ unsigned long long data[];
+};
+
+void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize)
+{
+ struct a6xx_state_memobj *obj =
+ kzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL);
+
+ if (!obj)
+ return NULL;
+
+ list_add_tail(&obj->node, &a6xx_state->objs);
+ return &obj->data;
+}
+
+void *state_kmemdup(struct a6xx_gpu_state *a6xx_state, void *src,
+ size_t size)
+{
+ void *dst = state_kcalloc(a6xx_state, 1, size);
+
+ if (dst)
+ memcpy(dst, src, size);
+ return dst;
+}
+
+/*
+ * Allocate 1MB for the crashdumper scratch region - 8k for the script and
+ * the rest for the data
+ */
+#define A6XX_CD_DATA_OFFSET 8192
+#define A6XX_CD_DATA_SIZE (SZ_1M - 8192)
+
+static int a6xx_crashdumper_init(struct msm_gpu *gpu,
+ struct a6xx_crashdumper *dumper)
+{
+ dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
+ SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
+ &dumper->bo, &dumper->iova);
+
+ if (!IS_ERR(dumper->ptr))
+ msm_gem_object_set_name(dumper->bo, "crashdump");
+
+ return PTR_ERR_OR_ZERO(dumper->ptr);
+}
+
+static int a6xx_crashdumper_run(struct msm_gpu *gpu,
+ struct a6xx_crashdumper *dumper)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ u32 val;
+ int ret;
+
+ if (IS_ERR_OR_NULL(dumper->ptr))
+ return -EINVAL;
+
+ if (!a6xx_gmu_sptprac_is_on(&a6xx_gpu->gmu))
+ return -EINVAL;
+
+ /* Make sure all pending memory writes are posted */
+ wmb();
+
+ gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE_LO,
+ REG_A6XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
+
+ gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1);
+
+ ret = gpu_poll_timeout(gpu, REG_A6XX_CP_CRASH_DUMP_STATUS, val,
+ val & 0x02, 100, 10000);
+
+ gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 0);
+
+ return ret;
+}
+
+/* read a value from the GX debug bus */
+static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
+ u32 *data)
+{
+ u32 reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
+ A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+ /* Wait 1 us to make sure the data is flowing */
+ udelay(1);
+
+ data[0] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2);
+ data[1] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1);
+
+ return 2;
+}
+
+#define cxdbg_write(ptr, offset, val) \
+ msm_writel((val), (ptr) + ((offset) << 2))
+
+#define cxdbg_read(ptr, offset) \
+ msm_readl((ptr) + ((offset) << 2))
+
+/* read a value from the CX debug bus */
+static int cx_debugbus_read(void *__iomem cxdbg, u32 block, u32 offset,
+ u32 *data)
+{
+ u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
+ A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+ /* Wait 1 us to make sure the data is flowing */
+ udelay(1);
+
+ data[0] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2);
+ data[1] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1);
+
+ return 2;
+}
+
+/* Read a chunk of data from the VBIF debug bus */
+static int vbif_debugbus_read(struct msm_gpu *gpu, u32 ctrl0, u32 ctrl1,
+ u32 reg, int count, u32 *data)
+{
+ int i;
+
+ gpu_write(gpu, ctrl0, reg);
+
+ for (i = 0; i < count; i++) {
+ gpu_write(gpu, ctrl1, i);
+ data[i] = gpu_read(gpu, REG_A6XX_VBIF_TEST_BUS_OUT);
+ }
+
+ return count;
+}
+
+#define AXI_ARB_BLOCKS 2
+#define XIN_AXI_BLOCKS 5
+#define XIN_CORE_BLOCKS 4
+
+#define VBIF_DEBUGBUS_BLOCK_SIZE \
+ ((16 * AXI_ARB_BLOCKS) + \
+ (18 * XIN_AXI_BLOCKS) + \
+ (12 * XIN_CORE_BLOCKS))
+
+static void a6xx_get_vbif_debugbus_block(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_gpu_state_obj *obj)
+{
+ u32 clk, *ptr;
+ int i;
+
+ obj->data = state_kcalloc(a6xx_state, VBIF_DEBUGBUS_BLOCK_SIZE,
+ sizeof(u32));
+ if (!obj->data)
+ return;
+
+ obj->handle = NULL;
+
+ /* Get the current clock setting */
+ clk = gpu_read(gpu, REG_A6XX_VBIF_CLKON);
+
+ /* Force on the bus so we can read it */
+ gpu_write(gpu, REG_A6XX_VBIF_CLKON,
+ clk | A6XX_VBIF_CLKON_FORCE_ON_TESTBUS);
+
+ /* We will read from BUS2 first, so disable BUS1 */
+ gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS1_CTRL0, 0);
+
+ /* Enable the VBIF bus for reading */
+ gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS_OUT_CTRL, 1);
+
+ ptr = obj->data;
+
+ for (i = 0; i < AXI_ARB_BLOCKS; i++)
+ ptr += vbif_debugbus_read(gpu,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL0,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL1,
+ 1 << (i + 16), 16, ptr);
+
+ for (i = 0; i < XIN_AXI_BLOCKS; i++)
+ ptr += vbif_debugbus_read(gpu,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL0,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL1,
+ 1 << i, 18, ptr);
+
+ /* Stop BUS2 so we can turn on BUS1 */
+ gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS2_CTRL0, 0);
+
+ for (i = 0; i < XIN_CORE_BLOCKS; i++)
+ ptr += vbif_debugbus_read(gpu,
+ REG_A6XX_VBIF_TEST_BUS1_CTRL0,
+ REG_A6XX_VBIF_TEST_BUS1_CTRL1,
+ 1 << i, 12, ptr);
+
+ /* Restore the VBIF clock setting */
+ gpu_write(gpu, REG_A6XX_VBIF_CLKON, clk);
+}
+
+static void a6xx_get_debugbus_block(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_debugbus_block *block,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i;
+ u32 *ptr;
+
+ obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
+ if (!obj->data)
+ return;
+
+ obj->handle = block;
+
+ for (ptr = obj->data, i = 0; i < block->count; i++)
+ ptr += debugbus_read(gpu, block->id, i, ptr);
+}
+
+static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_debugbus_block *block,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i;
+ u32 *ptr;
+
+ obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
+ if (!obj->data)
+ return;
+
+ obj->handle = block;
+
+ for (ptr = obj->data, i = 0; i < block->count; i++)
+ ptr += cx_debugbus_read(cxdbg, block->id, i, ptr);
+}
+
+static void a6xx_get_debugbus(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ struct resource *res;
+ void __iomem *cxdbg = NULL;
+
+ /* Set up the GX debug bus */
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
+ A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
+ A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0, 0x76543210);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1, 0xFEDCBA98);
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+
+ /* Set up the CX debug bus - it lives elsewhere in the system so do a
+ * temporary ioremap for the registers
+ */
+ res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM,
+ "cx_dbgc");
+
+ if (res)
+ cxdbg = ioremap(res->start, resource_size(res));
+
+ if (cxdbg) {
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
+ A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
+
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
+ A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
+
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
+ 0x76543210);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
+ 0xFEDCBA98);
+
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+ }
+
+ a6xx_state->debugbus = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_debugbus_blocks),
+ sizeof(*a6xx_state->debugbus));
+
+ if (a6xx_state->debugbus) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_debugbus_blocks); i++)
+ a6xx_get_debugbus_block(gpu,
+ a6xx_state,
+ &a6xx_debugbus_blocks[i],
+ &a6xx_state->debugbus[i]);
+
+ a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks);
+ }
+
+ a6xx_state->vbif_debugbus =
+ state_kcalloc(a6xx_state, 1,
+ sizeof(*a6xx_state->vbif_debugbus));
+
+ if (a6xx_state->vbif_debugbus)
+ a6xx_get_vbif_debugbus_block(gpu, a6xx_state,
+ a6xx_state->vbif_debugbus);
+
+ if (cxdbg) {
+ a6xx_state->cx_debugbus =
+ state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_cx_debugbus_blocks),
+ sizeof(*a6xx_state->cx_debugbus));
+
+ if (a6xx_state->cx_debugbus) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_cx_debugbus_blocks); i++)
+ a6xx_get_cx_debugbus_block(cxdbg,
+ a6xx_state,
+ &a6xx_cx_debugbus_blocks[i],
+ &a6xx_state->cx_debugbus[i]);
+
+ a6xx_state->nr_cx_debugbus =
+ ARRAY_SIZE(a6xx_cx_debugbus_blocks);
+ }
+
+ iounmap(cxdbg);
+ }
+}
+
+#define RANGE(reg, a) ((reg)[(a) + 1] - (reg)[(a)] + 1)
+
+/* Read a data cluster from behind the AHB aperture */
+static void a6xx_get_dbgahb_cluster(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_dbgahb_cluster *dbgahb,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ size_t datasize;
+ int i, regcount = 0;
+
+ for (i = 0; i < A6XX_NUM_CONTEXTS; i++) {
+ int j;
+
+ in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL,
+ (dbgahb->statetype + i * 2) << 8);
+
+ for (j = 0; j < dbgahb->count; j += 2) {
+ int count = RANGE(dbgahb->registers, j);
+ u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+ dbgahb->registers[j] - (dbgahb->base >> 2);
+
+ in += CRASHDUMP_READ(in, offset, count, out);
+
+ out += count * sizeof(u32);
+
+ if (i == 0)
+ regcount += count;
+ }
+ }
+
+ CRASHDUMP_FINI(in);
+
+ datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32);
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = dbgahb;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
+static void a6xx_get_dbgahb_clusters(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i;
+
+ a6xx_state->dbgahb_clusters = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_dbgahb_clusters),
+ sizeof(*a6xx_state->dbgahb_clusters));
+
+ if (!a6xx_state->dbgahb_clusters)
+ return;
+
+ a6xx_state->nr_dbgahb_clusters = ARRAY_SIZE(a6xx_dbgahb_clusters);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_clusters); i++)
+ a6xx_get_dbgahb_cluster(gpu, a6xx_state,
+ &a6xx_dbgahb_clusters[i],
+ &a6xx_state->dbgahb_clusters[i], dumper);
+}
+
+/* Read a data cluster from the CP aperture with the crashdumper */
+static void a6xx_get_cluster(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_cluster *cluster,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ size_t datasize;
+ int i, regcount = 0;
+
+ /* Some clusters need a selector register to be programmed too */
+ if (cluster->sel_reg)
+ in += CRASHDUMP_WRITE(in, cluster->sel_reg, cluster->sel_val);
+
+ for (i = 0; i < A6XX_NUM_CONTEXTS; i++) {
+ int j;
+
+ in += CRASHDUMP_WRITE(in, REG_A6XX_CP_APERTURE_CNTL_CD,
+ (cluster->id << 8) | (i << 4) | i);
+
+ for (j = 0; j < cluster->count; j += 2) {
+ int count = RANGE(cluster->registers, j);
+
+ in += CRASHDUMP_READ(in, cluster->registers[j],
+ count, out);
+
+ out += count * sizeof(u32);
+
+ if (i == 0)
+ regcount += count;
+ }
+ }
+
+ CRASHDUMP_FINI(in);
+
+ datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32);
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = cluster;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
+static void a6xx_get_clusters(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i;
+
+ a6xx_state->clusters = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_clusters), sizeof(*a6xx_state->clusters));
+
+ if (!a6xx_state->clusters)
+ return;
+
+ a6xx_state->nr_clusters = ARRAY_SIZE(a6xx_clusters);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++)
+ a6xx_get_cluster(gpu, a6xx_state, &a6xx_clusters[i],
+ &a6xx_state->clusters[i], dumper);
+}
+
+/* Read a shader / debug block from the HLSQ aperture with the crashdumper */
+static void a6xx_get_shader_block(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_shader_block *block,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ u64 *in = dumper->ptr;
+ size_t datasize = block->size * A6XX_NUM_SHADER_BANKS * sizeof(u32);
+ int i;
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) {
+ in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL,
+ (block->type << 8) | i);
+
+ in += CRASHDUMP_READ(in, REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE,
+ block->size, dumper->iova + A6XX_CD_DATA_OFFSET);
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = block;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
+static void a6xx_get_shaders(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i;
+
+ a6xx_state->shaders = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_shader_blocks), sizeof(*a6xx_state->shaders));
+
+ if (!a6xx_state->shaders)
+ return;
+
+ a6xx_state->nr_shaders = ARRAY_SIZE(a6xx_shader_blocks);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++)
+ a6xx_get_shader_block(gpu, a6xx_state, &a6xx_shader_blocks[i],
+ &a6xx_state->shaders[i], dumper);
+}
+
+/* Read registers from behind the HLSQ aperture with the crashdumper */
+static void a6xx_get_crashdumper_hlsq_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ int i, regcount = 0;
+
+ in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL, regs->val1);
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+ u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+ regs->registers[i] - (regs->val0 >> 2);
+
+ in += CRASHDUMP_READ(in, offset, count, out);
+
+ out += count * sizeof(u32);
+ regcount += count;
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = regs;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ regcount * sizeof(u32));
+}
+
+/* Read a block of registers using the crashdumper */
+static void a6xx_get_crashdumper_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ int i, regcount = 0;
+
+ /* Some blocks might need to program a selector register first */
+ if (regs->val0)
+ in += CRASHDUMP_WRITE(in, regs->val0, regs->val1);
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+
+ in += CRASHDUMP_READ(in, regs->registers[i], count, out);
+
+ out += count * sizeof(u32);
+ regcount += count;
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = regs;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ regcount * sizeof(u32));
+}
+
+/* Read a block of registers via AHB */
+static void a6xx_get_ahb_gpu_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i, regcount = 0, index = 0;
+
+ for (i = 0; i < regs->count; i += 2)
+ regcount += RANGE(regs->registers, i);
+
+ obj->handle = (const void *) regs;
+ obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
+ if (!obj->data)
+ return;
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+ int j;
+
+ for (j = 0; j < count; j++)
+ obj->data[index++] = gpu_read(gpu,
+ regs->registers[i] + j);
+ }
+}
+
+/* Read a block of GMU registers */
+static void _a6xx_get_gmu_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int i, regcount = 0, index = 0;
+
+ for (i = 0; i < regs->count; i += 2)
+ regcount += RANGE(regs->registers, i);
+
+ obj->handle = (const void *) regs;
+ obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
+ if (!obj->data)
+ return;
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+ int j;
+
+ for (j = 0; j < count; j++)
+ obj->data[index++] = gmu_read(gmu,
+ regs->registers[i] + j);
+ }
+}
+
+static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
+ 2, sizeof(*a6xx_state->gmu_registers));
+
+ if (!a6xx_state->gmu_registers)
+ return;
+
+ a6xx_state->nr_gmu_registers = 2;
+
+ /* Get the CX GMU registers from AHB */
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
+ &a6xx_state->gmu_registers[0]);
+
+ if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
+ return;
+
+ /* Set the fence to ALLOW mode so we can access the registers */
+ gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1],
+ &a6xx_state->gmu_registers[1]);
+}
+
+static void a6xx_get_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i, count = ARRAY_SIZE(a6xx_ahb_reglist) +
+ ARRAY_SIZE(a6xx_reglist) +
+ ARRAY_SIZE(a6xx_hlsq_reglist);
+ int index = 0;
+
+ a6xx_state->registers = state_kcalloc(a6xx_state,
+ count, sizeof(*a6xx_state->registers));
+
+ if (!a6xx_state->registers)
+ return;
+
+ a6xx_state->nr_registers = count;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_ahb_reglist); i++)
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_ahb_reglist[i],
+ &a6xx_state->registers[index++]);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++)
+ a6xx_get_crashdumper_registers(gpu,
+ a6xx_state, &a6xx_reglist[i],
+ &a6xx_state->registers[index++],
+ dumper);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_hlsq_reglist); i++)
+ a6xx_get_crashdumper_hlsq_registers(gpu,
+ a6xx_state, &a6xx_hlsq_reglist[i],
+ &a6xx_state->registers[index++],
+ dumper);
+}
+
+/* Read a block of data from an indexed register pair */
+static void a6xx_get_indexed_regs(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_indexed_registers *indexed,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i;
+
+ obj->handle = (const void *) indexed;
+ obj->data = state_kcalloc(a6xx_state, indexed->count, sizeof(u32));
+ if (!obj->data)
+ return;
+
+ /* All the indexed banks start at address 0 */
+ gpu_write(gpu, indexed->addr, 0);
+
+ /* Read the data - each read increments the internal address by 1 */
+ for (i = 0; i < indexed->count; i++)
+ obj->data[i] = gpu_read(gpu, indexed->data);
+}
+
+static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ u32 mempool_size;
+ int count = ARRAY_SIZE(a6xx_indexed_reglist) + 1;
+ int i;
+
+ a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
+ sizeof(a6xx_state->indexed_regs));
+ if (!a6xx_state->indexed_regs)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_indexed_reglist); i++)
+ a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_indexed_reglist[i],
+ &a6xx_state->indexed_regs[i]);
+
+ /* Set the CP mempool size to 0 to stabilize it while dumping */
+ mempool_size = gpu_read(gpu, REG_A6XX_CP_MEM_POOL_SIZE);
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 0);
+
+ /* Get the contents of the CP mempool */
+ a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_cp_mempool_indexed,
+ &a6xx_state->indexed_regs[i]);
+
+ /*
+ * Offset 0x2000 in the mempool is the size - copy the saved size over
+ * so the data is consistent
+ */
+ a6xx_state->indexed_regs[i].data[0x2000] = mempool_size;
+
+ /* Restore the size in the hardware */
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size);
+
+ a6xx_state->nr_indexed_regs = count;
+}
+
+struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct a6xx_crashdumper dumper = { 0 };
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gpu_state *a6xx_state = kzalloc(sizeof(*a6xx_state),
+ GFP_KERNEL);
+
+ if (!a6xx_state)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&a6xx_state->objs);
+
+ /* Get the generic state from the adreno core */
+ adreno_gpu_state_get(gpu, &a6xx_state->base);
+
+ a6xx_get_gmu_registers(gpu, a6xx_state);
+
+ /* If GX isn't on the rest of the data isn't going to be accessible */
+ if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
+ return &a6xx_state->base;
+
+ /* Get the banks of indexed registers */
+ a6xx_get_indexed_registers(gpu, a6xx_state);
+
+ /* Try to initialize the crashdumper */
+ if (!a6xx_crashdumper_init(gpu, &dumper)) {
+ a6xx_get_registers(gpu, a6xx_state, &dumper);
+ a6xx_get_shaders(gpu, a6xx_state, &dumper);
+ a6xx_get_clusters(gpu, a6xx_state, &dumper);
+ a6xx_get_dbgahb_clusters(gpu, a6xx_state, &dumper);
+
+ msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
+ }
+
+ a6xx_get_debugbus(gpu, a6xx_state);
+
+ return &a6xx_state->base;
+}
+
+void a6xx_gpu_state_destroy(struct kref *kref)
+{
+ struct a6xx_state_memobj *obj, *tmp;
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+ struct a6xx_gpu_state *a6xx_state = container_of(state,
+ struct a6xx_gpu_state, base);
+
+ list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node)
+ kfree(obj);
+
+ adreno_gpu_state_destroy(state);
+ kfree(a6xx_state);
+}
+
+int a6xx_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, a6xx_gpu_state_destroy);
+}
+
+static void a6xx_show_registers(const u32 *registers, u32 *data, size_t count,
+ struct drm_printer *p)
+{
+ int i, index = 0;
+
+ if (!data)
+ return;
+
+ for (i = 0; i < count; i += 2) {
+ u32 count = RANGE(registers, i);
+ u32 offset = registers[i];
+ int j;
+
+ for (j = 0; j < count; index++, offset++, j++) {
+ if (data[index] == 0xdeafbead)
+ continue;
+
+ drm_printf(p, " - { offset: 0x%06x, value: 0x%08x }\n",
+ offset << 2, data[index]);
+ }
+ }
+}
+
+static void print_ascii85(struct drm_printer *p, size_t len, u32 *data)
+{
+ char out[ASCII85_BUFSZ];
+ long i, l, datalen = 0;
+
+ for (i = 0; i < len >> 2; i++) {
+ if (data[i])
+ datalen = (i + 1) << 2;
+ }
+
+ if (datalen == 0)
+ return;
+
+ drm_puts(p, " data: !!ascii85 |\n");
+ drm_puts(p, " ");
+
+
+ l = ascii85_encode_len(datalen);
+
+ for (i = 0; i < l; i++)
+ drm_puts(p, ascii85_encode(data[i], out));
+
+ drm_puts(p, "\n");
+}
+
+static void print_name(struct drm_printer *p, const char *fmt, const char *name)
+{
+ drm_puts(p, fmt);
+ drm_puts(p, name);
+ drm_puts(p, "\n");
+}
+
+static void a6xx_show_shader(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_shader_block *block = obj->handle;
+ int i;
+
+ if (!obj->handle)
+ return;
+
+ print_name(p, " - type: ", block->name);
+
+ for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) {
+ drm_printf(p, " - bank: %d\n", i);
+ drm_printf(p, " size: %d\n", block->size);
+
+ if (!obj->data)
+ continue;
+
+ print_ascii85(p, block->size << 2,
+ obj->data + (block->size * i));
+ }
+}
+
+static void a6xx_show_cluster_data(const u32 *registers, int size, u32 *data,
+ struct drm_printer *p)
+{
+ int ctx, index = 0;
+
+ for (ctx = 0; ctx < A6XX_NUM_CONTEXTS; ctx++) {
+ int j;
+
+ drm_printf(p, " - context: %d\n", ctx);
+
+ for (j = 0; j < size; j += 2) {
+ u32 count = RANGE(registers, j);
+ u32 offset = registers[j];
+ int k;
+
+ for (k = 0; k < count; index++, offset++, k++) {
+ if (data[index] == 0xdeafbead)
+ continue;
+
+ drm_printf(p, " - { offset: 0x%06x, value: 0x%08x }\n",
+ offset << 2, data[index]);
+ }
+ }
+ }
+}
+
+static void a6xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_dbgahb_cluster *dbgahb = obj->handle;
+
+ if (dbgahb) {
+ print_name(p, " - cluster-name: ", dbgahb->name);
+ a6xx_show_cluster_data(dbgahb->registers, dbgahb->count,
+ obj->data, p);
+ }
+}
+
+static void a6xx_show_cluster(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_cluster *cluster = obj->handle;
+
+ if (cluster) {
+ print_name(p, " - cluster-name: ", cluster->name);
+ a6xx_show_cluster_data(cluster->registers, cluster->count,
+ obj->data, p);
+ }
+}
+
+static void a6xx_show_indexed_regs(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_indexed_registers *indexed = obj->handle;
+
+ if (!indexed)
+ return;
+
+ print_name(p, " - regs-name: ", indexed->name);
+ drm_printf(p, " dwords: %d\n", indexed->count);
+
+ print_ascii85(p, indexed->count << 2, obj->data);
+}
+
+static void a6xx_show_debugbus_block(const struct a6xx_debugbus_block *block,
+ u32 *data, struct drm_printer *p)
+{
+ if (block) {
+ print_name(p, " - debugbus-block: ", block->name);
+
+ /*
+ * count for regular debugbus data is in quadwords,
+ * but print the size in dwords for consistency
+ */
+ drm_printf(p, " count: %d\n", block->count << 1);
+
+ print_ascii85(p, block->count << 3, data);
+ }
+}
+
+static void a6xx_show_debugbus(struct a6xx_gpu_state *a6xx_state,
+ struct drm_printer *p)
+{
+ int i;
+
+ for (i = 0; i < a6xx_state->nr_debugbus; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->debugbus[i];
+
+ a6xx_show_debugbus_block(obj->handle, obj->data, p);
+ }
+
+ if (a6xx_state->vbif_debugbus) {
+ struct a6xx_gpu_state_obj *obj = a6xx_state->vbif_debugbus;
+
+ drm_puts(p, " - debugbus-block: A6XX_DBGBUS_VBIF\n");
+ drm_printf(p, " count: %d\n", VBIF_DEBUGBUS_BLOCK_SIZE);
+
+ /* vbif debugbus data is in dwords. Confusing, huh? */
+ print_ascii85(p, VBIF_DEBUGBUS_BLOCK_SIZE << 2, obj->data);
+ }
+
+ for (i = 0; i < a6xx_state->nr_cx_debugbus; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->cx_debugbus[i];
+
+ a6xx_show_debugbus_block(obj->handle, obj->data, p);
+ }
+}
+
+void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ struct a6xx_gpu_state *a6xx_state = container_of(state,
+ struct a6xx_gpu_state, base);
+ int i;
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ adreno_show(gpu, state, p);
+
+ drm_puts(p, "registers:\n");
+ for (i = 0; i < a6xx_state->nr_registers; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->registers[i];
+ const struct a6xx_registers *regs = obj->handle;
+
+ if (!obj->handle)
+ continue;
+
+ a6xx_show_registers(regs->registers, obj->data, regs->count, p);
+ }
+
+ drm_puts(p, "registers-gmu:\n");
+ for (i = 0; i < a6xx_state->nr_gmu_registers; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->gmu_registers[i];
+ const struct a6xx_registers *regs = obj->handle;
+
+ if (!obj->handle)
+ continue;
+
+ a6xx_show_registers(regs->registers, obj->data, regs->count, p);
+ }
+
+ drm_puts(p, "indexed-registers:\n");
+ for (i = 0; i < a6xx_state->nr_indexed_regs; i++)
+ a6xx_show_indexed_regs(&a6xx_state->indexed_regs[i], p);
+
+ drm_puts(p, "shader-blocks:\n");
+ for (i = 0; i < a6xx_state->nr_shaders; i++)
+ a6xx_show_shader(&a6xx_state->shaders[i], p);
+
+ drm_puts(p, "clusters:\n");
+ for (i = 0; i < a6xx_state->nr_clusters; i++)
+ a6xx_show_cluster(&a6xx_state->clusters[i], p);
+
+ for (i = 0; i < a6xx_state->nr_dbgahb_clusters; i++)
+ a6xx_show_dbgahb_cluster(&a6xx_state->dbgahb_clusters[i], p);
+
+ drm_puts(p, "debugbus:\n");
+ a6xx_show_debugbus(a6xx_state, p);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
new file mode 100644
index 000000000000..68cccfa2870a
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_CRASH_DUMP_H_
+#define _A6XX_CRASH_DUMP_H_
+
+#include "a6xx.xml.h"
+
+#define A6XX_NUM_CONTEXTS 2
+#define A6XX_NUM_SHADER_BANKS 3
+
+static const u32 a6xx_gras_cluster[] = {
+ 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809d, 0x80a0, 0x80a6,
+ 0x80af, 0x80f1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
+ 0x8400, 0x840b,
+};
+
+static const u32 a6xx_ps_cluster_rac[] = {
+ 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881e, 0x8820, 0x8865,
+ 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
+ 0x88c0, 0x88c1, 0x88d0, 0x88e3, 0x8900, 0x890c, 0x890f, 0x891a,
+ 0x8c00, 0x8c01, 0x8c08, 0x8c10, 0x8c17, 0x8c1f, 0x8c26, 0x8c33,
+};
+
+static const u32 a6xx_ps_cluster_rbp[] = {
+ 0x88f0, 0x88f3, 0x890d, 0x890e, 0x8927, 0x8928, 0x8bf0, 0x8bf1,
+ 0x8c02, 0x8c07, 0x8c11, 0x8c16, 0x8c20, 0x8c25,
+};
+
+static const u32 a6xx_ps_cluster[] = {
+ 0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306,
+};
+
+static const u32 a6xx_fe_cluster[] = {
+ 0x9300, 0x9306, 0x9800, 0x9806, 0x9b00, 0x9b07, 0xa000, 0xa009,
+ 0xa00e, 0xa0ef, 0xa0f8, 0xa0f8,
+};
+
+static const u32 a6xx_pc_vs_cluster[] = {
+ 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9b00, 0x9b07,
+};
+
+#define CLUSTER_FE 0
+#define CLUSTER_SP_VS 1
+#define CLUSTER_PC_VS 2
+#define CLUSTER_GRAS 3
+#define CLUSTER_SP_PS 4
+#define CLUSTER_PS 5
+
+#define CLUSTER(_id, _reg, _sel_reg, _sel_val) \
+ { .id = _id, .name = #_id,\
+ .registers = _reg, \
+ .count = ARRAY_SIZE(_reg), \
+ .sel_reg = _sel_reg, .sel_val = _sel_val }
+
+static const struct a6xx_cluster {
+ u32 id;
+ const char *name;
+ const u32 *registers;
+ size_t count;
+ u32 sel_reg;
+ u32 sel_val;
+} a6xx_clusters[] = {
+ CLUSTER(CLUSTER_GRAS, a6xx_gras_cluster, 0, 0),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rac, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x0),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rbp, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x9),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster, 0, 0),
+ CLUSTER(CLUSTER_FE, a6xx_fe_cluster, 0, 0),
+ CLUSTER(CLUSTER_PC_VS, a6xx_pc_vs_cluster, 0, 0),
+};
+
+static const u32 a6xx_sp_vs_hlsq_cluster[] = {
+ 0xb800, 0xb803, 0xb820, 0xb822,
+};
+
+static const u32 a6xx_sp_vs_sp_cluster[] = {
+ 0xa800, 0xa824, 0xa830, 0xa83c, 0xa840, 0xa864, 0xa870, 0xa895,
+ 0xa8a0, 0xa8af, 0xa8c0, 0xa8c3,
+};
+
+static const u32 a6xx_hlsq_duplicate_cluster[] = {
+ 0xbb10, 0xbb11, 0xbb20, 0xbb29,
+};
+
+static const u32 a6xx_hlsq_2d_duplicate_cluster[] = {
+ 0xbd80, 0xbd80,
+};
+
+static const u32 a6xx_sp_duplicate_cluster[] = {
+ 0xab00, 0xab00, 0xab04, 0xab05, 0xab10, 0xab1b, 0xab20, 0xab20,
+};
+
+static const u32 a6xx_tp_duplicate_cluster[] = {
+ 0xb300, 0xb307, 0xb309, 0xb309, 0xb380, 0xb382,
+};
+
+static const u32 a6xx_sp_ps_hlsq_cluster[] = {
+ 0xb980, 0xb980, 0xb982, 0xb987, 0xb990, 0xb99b, 0xb9a0, 0xb9a2,
+ 0xb9c0, 0xb9c9,
+};
+
+static const u32 a6xx_sp_ps_hlsq_2d_cluster[] = {
+ 0xbd80, 0xbd80,
+};
+
+static const u32 a6xx_sp_ps_sp_cluster[] = {
+ 0xa980, 0xa9a8, 0xa9b0, 0xa9bc, 0xa9d0, 0xa9d3, 0xa9e0, 0xa9f3,
+ 0xaa00, 0xaa00, 0xaa30, 0xaa31,
+};
+
+static const u32 a6xx_sp_ps_sp_2d_cluster[] = {
+ 0xacc0, 0xacc0,
+};
+
+static const u32 a6xx_sp_ps_tp_cluster[] = {
+ 0xb180, 0xb183, 0xb190, 0xb191,
+};
+
+static const u32 a6xx_sp_ps_tp_2d_cluster[] = {
+ 0xb4c0, 0xb4d1,
+};
+
+#define CLUSTER_DBGAHB(_id, _base, _type, _reg) \
+ { .name = #_id, .statetype = _type, .base = _base, \
+ .registers = _reg, .count = ARRAY_SIZE(_reg) }
+
+static const struct a6xx_dbgahb_cluster {
+ const char *name;
+ u32 statetype;
+ u32 base;
+ const u32 *registers;
+ size_t count;
+} a6xx_dbgahb_clusters[] = {
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_sp_vs_hlsq_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_vs_sp_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_hlsq_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002f000, 0x45, a6xx_hlsq_2d_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002c000, 0x1, a6xx_tp_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_sp_ps_hlsq_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002f000, 0x46, a6xx_sp_ps_hlsq_2d_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_ps_sp_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002b000, 0x26, a6xx_sp_ps_sp_2d_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_sp_ps_tp_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002d000, 0x6, a6xx_sp_ps_tp_2d_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_hlsq_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_tp_duplicate_cluster),
+};
+
+static const u32 a6xx_hlsq_registers[] = {
+ 0xbe00, 0xbe01, 0xbe04, 0xbe05, 0xbe08, 0xbe09, 0xbe10, 0xbe15,
+ 0xbe20, 0xbe23,
+};
+
+static const u32 a6xx_sp_registers[] = {
+ 0xae00, 0xae04, 0xae0c, 0xae0c, 0xae0f, 0xae2b, 0xae30, 0xae32,
+ 0xae35, 0xae35, 0xae3a, 0xae3f, 0xae50, 0xae52,
+};
+
+static const u32 a6xx_tp_registers[] = {
+ 0xb600, 0xb601, 0xb604, 0xb605, 0xb610, 0xb61b, 0xb620, 0xb623,
+};
+
+struct a6xx_registers {
+ const u32 *registers;
+ size_t count;
+ u32 val0;
+ u32 val1;
+};
+
+#define HLSQ_DBG_REGS(_base, _type, _array) \
+ { .val0 = _base, .val1 = _type, .registers = _array, \
+ .count = ARRAY_SIZE(_array), }
+
+static const struct a6xx_registers a6xx_hlsq_reglist[] = {
+ HLSQ_DBG_REGS(0x0002F800, 0x40, a6xx_hlsq_registers),
+ HLSQ_DBG_REGS(0x0002B800, 0x20, a6xx_sp_registers),
+ HLSQ_DBG_REGS(0x0002D800, 0x0, a6xx_tp_registers),
+};
+
+#define SHADER(_type, _size) \
+ { .type = _type, .name = #_type, .size = _size }
+
+static const struct a6xx_shader_block {
+ const char *name;
+ u32 type;
+ u32 size;
+} a6xx_shader_blocks[] = {
+ SHADER(A6XX_TP0_TMO_DATA, 0x200),
+ SHADER(A6XX_TP0_SMO_DATA, 0x80),
+ SHADER(A6XX_TP0_MIPMAP_BASE_DATA, 0x3c0),
+ SHADER(A6XX_TP1_TMO_DATA, 0x200),
+ SHADER(A6XX_TP1_SMO_DATA, 0x80),
+ SHADER(A6XX_TP1_MIPMAP_BASE_DATA, 0x3c0),
+ SHADER(A6XX_SP_INST_DATA, 0x800),
+ SHADER(A6XX_SP_LB_0_DATA, 0x800),
+ SHADER(A6XX_SP_LB_1_DATA, 0x800),
+ SHADER(A6XX_SP_LB_2_DATA, 0x800),
+ SHADER(A6XX_SP_LB_3_DATA, 0x800),
+ SHADER(A6XX_SP_LB_4_DATA, 0x800),
+ SHADER(A6XX_SP_LB_5_DATA, 0x200),
+ SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x2000),
+ SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280),
+ SHADER(A6XX_SP_UAV_DATA, 0x80),
+ SHADER(A6XX_SP_INST_TAG, 0x80),
+ SHADER(A6XX_SP_CB_BINDLESS_TAG, 0x80),
+ SHADER(A6XX_SP_TMO_UMO_TAG, 0x80),
+ SHADER(A6XX_SP_SMO_TAG, 0x80),
+ SHADER(A6XX_SP_STATE_DATA, 0x3f),
+ SHADER(A6XX_HLSQ_CHUNK_CVS_RAM, 0x1c0),
+ SHADER(A6XX_HLSQ_CHUNK_CPS_RAM, 0x280),
+ SHADER(A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40),
+ SHADER(A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40),
+ SHADER(A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4),
+ SHADER(A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4),
+ SHADER(A6XX_HLSQ_CVS_MISC_RAM, 0x1c0),
+ SHADER(A6XX_HLSQ_CPS_MISC_RAM, 0x580),
+ SHADER(A6XX_HLSQ_INST_RAM, 0x800),
+ SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800),
+ SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800),
+ SHADER(A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8),
+ SHADER(A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4),
+ SHADER(A6XX_HLSQ_INST_RAM_TAG, 0x80),
+ SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xc),
+ SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10),
+ SHADER(A6XX_HLSQ_PWR_REST_RAM, 0x28),
+ SHADER(A6XX_HLSQ_PWR_REST_TAG, 0x14),
+ SHADER(A6XX_HLSQ_DATAPATH_META, 0x40),
+ SHADER(A6XX_HLSQ_FRONTEND_META, 0x40),
+ SHADER(A6XX_HLSQ_INDIRECT_META, 0x40),
+};
+
+static const u32 a6xx_rb_rac_registers[] = {
+ 0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e10, 0x8e1c, 0x8e20, 0x8e25,
+ 0x8e28, 0x8e28, 0x8e2c, 0x8e2f, 0x8e50, 0x8e52,
+};
+
+static const u32 a6xx_rb_rbp_registers[] = {
+ 0x8e01, 0x8e01, 0x8e0c, 0x8e0c, 0x8e3b, 0x8e3e, 0x8e40, 0x8e43,
+ 0x8e53, 0x8e5f, 0x8e70, 0x8e77,
+};
+
+static const u32 a6xx_registers[] = {
+ /* RBBM */
+ 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
+ 0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
+ 0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
+ 0x0100, 0x011d, 0x0200, 0x020d, 0x0218, 0x023d, 0x0400, 0x04f9,
+ 0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511, 0x0533, 0x0533,
+ 0x0540, 0x0555,
+ /* CP */
+ 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
+ 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084f, 0x086f,
+ 0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4, 0x08d0, 0x08dd,
+ 0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093e,
+ 0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996, 0x0998, 0x099e,
+ 0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1, 0x09c2, 0x09c8,
+ 0x0a00, 0x0a03,
+ /* VSC */
+ 0x0c00, 0x0c04, 0x0c06, 0x0c06, 0x0c10, 0x0cd9, 0x0e00, 0x0e0e,
+ /* UCHE */
+ 0x0e10, 0x0e13, 0x0e17, 0x0e19, 0x0e1c, 0x0e2b, 0x0e30, 0x0e32,
+ 0x0e38, 0x0e39,
+ /* GRAS */
+ 0x8600, 0x8601, 0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b,
+ 0x8630, 0x8637,
+ /* VPC */
+ 0x9600, 0x9604, 0x9624, 0x9637,
+ /* PC */
+ 0x9e00, 0x9e01, 0x9e03, 0x9e0e, 0x9e11, 0x9e16, 0x9e19, 0x9e19,
+ 0x9e1c, 0x9e1c, 0x9e20, 0x9e23, 0x9e30, 0x9e31, 0x9e34, 0x9e34,
+ 0x9e70, 0x9e72, 0x9e78, 0x9e79, 0x9e80, 0x9fff,
+ /* VFD */
+ 0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a, 0xa610, 0xa617,
+ 0xa630, 0xa630,
+};
+
+#define REGS(_array, _sel_reg, _sel_val) \
+ { .registers = _array, .count = ARRAY_SIZE(_array), \
+ .val0 = _sel_reg, .val1 = _sel_val }
+
+static const struct a6xx_registers a6xx_reglist[] = {
+ REGS(a6xx_registers, 0, 0),
+ REGS(a6xx_rb_rac_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0),
+ REGS(a6xx_rb_rbp_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 9),
+};
+
+static const u32 a6xx_ahb_registers[] = {
+ /* RBBM_STATUS - RBBM_STATUS3 */
+ 0x210, 0x213,
+ /* CP_STATUS_1 */
+ 0x825, 0x825,
+};
+
+static const u32 a6xx_vbif_registers[] = {
+ 0x3000, 0x3007, 0x300c, 0x3014, 0x3018, 0x302d, 0x3030, 0x3031,
+ 0x3034, 0x3036, 0x303c, 0x303d, 0x3040, 0x3040, 0x3042, 0x3042,
+ 0x3049, 0x3049, 0x3058, 0x3058, 0x305a, 0x3061, 0x3064, 0x3068,
+ 0x306c, 0x306d, 0x3080, 0x3088, 0x308b, 0x308c, 0x3090, 0x3094,
+ 0x3098, 0x3098, 0x309c, 0x309c, 0x30c0, 0x30c0, 0x30c8, 0x30c8,
+ 0x30d0, 0x30d0, 0x30d8, 0x30d8, 0x30e0, 0x30e0, 0x3100, 0x3100,
+ 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
+ 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
+ 0x3156, 0x3156, 0x3158, 0x3158, 0x315a, 0x315a, 0x315c, 0x315c,
+ 0x315e, 0x315e, 0x3160, 0x3160, 0x3162, 0x3162, 0x340c, 0x340c,
+ 0x3410, 0x3410, 0x3800, 0x3801,
+};
+
+static const struct a6xx_registers a6xx_ahb_reglist[] = {
+ REGS(a6xx_ahb_registers, 0, 0),
+ REGS(a6xx_vbif_registers, 0, 0),
+};
+
+static const u32 a6xx_gmu_gx_registers[] = {
+ /* GMU GX */
+ 0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
+ 0x001e, 0x001e, 0x0020, 0x0023, 0x0026, 0x0026, 0x0028, 0x002b,
+ 0x002e, 0x002e, 0x0030, 0x0033, 0x0036, 0x0036, 0x0038, 0x003b,
+ 0x003e, 0x003e, 0x0040, 0x0043, 0x0046, 0x0046, 0x0080, 0x0084,
+ 0x0100, 0x012b, 0x0140, 0x0140,
+};
+
+static const u32 a6xx_gmu_cx_registers[] = {
+ /* GMU CX */
+ 0x4c00, 0x4c07, 0x4c10, 0x4c12, 0x4d00, 0x4d00, 0x4d07, 0x4d0a,
+ 0x5000, 0x5004, 0x5007, 0x5008, 0x500b, 0x500c, 0x500f, 0x501c,
+ 0x5024, 0x502a, 0x502d, 0x5030, 0x5040, 0x5053, 0x5087, 0x5089,
+ 0x50a0, 0x50a2, 0x50a4, 0x50af, 0x50c0, 0x50c3, 0x50d0, 0x50d0,
+ 0x50e4, 0x50e4, 0x50e8, 0x50ec, 0x5100, 0x5103, 0x5140, 0x5140,
+ 0x5142, 0x5144, 0x514c, 0x514d, 0x514f, 0x5151, 0x5154, 0x5154,
+ 0x5157, 0x5158, 0x515d, 0x515d, 0x5162, 0x5162, 0x5164, 0x5165,
+ 0x5180, 0x5186, 0x5190, 0x519e, 0x51c0, 0x51c0, 0x51c5, 0x51cc,
+ 0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201,
+ /* GPU RSCC */
+ 0x8c8c, 0x8c8c, 0x8d01, 0x8d02, 0x8f40, 0x8f42, 0x8f44, 0x8f47,
+ 0x8f4c, 0x8f87, 0x8fec, 0x8fef, 0x8ff4, 0x902f, 0x9094, 0x9097,
+ 0x909c, 0x90d7, 0x913c, 0x913f, 0x9144, 0x917f,
+ /* GMU AO */
+ 0x9300, 0x9316, 0x9400, 0x9400,
+ /* GPU CC */
+ 0x9800, 0x9812, 0x9840, 0x9852, 0x9c00, 0x9c04, 0x9c07, 0x9c0b,
+ 0x9c15, 0x9c1c, 0x9c1e, 0x9c2d, 0x9c3c, 0x9c3d, 0x9c3f, 0x9c40,
+ 0x9c42, 0x9c49, 0x9c58, 0x9c5a, 0x9d40, 0x9d5e, 0xa000, 0xa002,
+ 0xa400, 0xa402, 0xac00, 0xac02, 0xb000, 0xb002, 0xb400, 0xb402,
+ 0xb800, 0xb802,
+ /* GPU CC ACD */
+ 0xbc00, 0xbc16, 0xbc20, 0xbc27,
+};
+
+static const struct a6xx_registers a6xx_gmu_reglist[] = {
+ REGS(a6xx_gmu_cx_registers, 0, 0),
+ REGS(a6xx_gmu_gx_registers, 0, 0),
+};
+
+static const struct a6xx_indexed_registers {
+ const char *name;
+ u32 addr;
+ u32 data;
+ u32 count;
+} a6xx_indexed_reglist[] = {
+ { "CP_SEQ_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
+ REG_A6XX_CP_SQE_STAT_DATA, 0x33 },
+ { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
+ REG_A6XX_CP_DRAW_STATE_DATA, 0x100 },
+ { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+ REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x6000 },
+ { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+ REG_A6XX_CP_ROQ_DBG_DATA, 0x400 },
+};
+
+static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
+ "CP_MEMPOOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+ REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060,
+};
+
+#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }
+
+static const struct a6xx_debugbus_block {
+ const char *name;
+ u32 id;
+ u32 count;
+} a6xx_debugbus_blocks[] = {
+ DEBUGBUS(A6XX_DBGBUS_CP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RBBM, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_HLSQ, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_UCHE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_DPM, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TESS, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_PC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFDP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VPC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TSE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RAS, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VSC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_COM, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_LRZ, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_A2D, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCUFCHE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RBP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_DCS, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_DBGC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_GMU_GX, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPFCHE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_GPC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_LARC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_HLSQ_SPTP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RB_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RB_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_UCHE_WRAPPER, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCU_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCU_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_3, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SP_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SP_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_3, 0x100),
+};
+
+static const struct a6xx_debugbus_block a6xx_cx_debugbus_blocks[] = {
+ DEBUGBUS(A6XX_DBGBUS_GMU_CX, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CX, 0x100),
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index 6ff9baec2658..eda11abc5f01 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -91,7 +91,7 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
if (ret) {
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"Message %s id %d timed out waiting for response\n",
a6xx_hfi_msg_id[id], seqnum);
return -ETIMEDOUT;
@@ -110,7 +110,7 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
/* If the queue is empty our response never made it */
if (!ret) {
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"The HFI response queue is unexpectedly empty\n");
return -ENOENT;
@@ -120,20 +120,20 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
struct a6xx_hfi_msg_error *error =
(struct a6xx_hfi_msg_error *) &resp;
- dev_err(gmu->dev, "GMU firmware error %d\n",
+ DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n",
error->code);
continue;
}
if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"Unexpected message id %d on the response queue\n",
HFI_HEADER_SEQNUM(resp.ret_header));
continue;
}
if (resp.error) {
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"Message %s id %d returned error %d\n",
a6xx_hfi_msg_id[id], seqnum, resp.error);
return -EINVAL;
@@ -163,7 +163,7 @@ static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
if (ret) {
- dev_err(gmu->dev, "Unable to send message %s id %d\n",
+ DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n",
a6xx_hfi_msg_id[id], seqnum);
return ret;
}
@@ -317,7 +317,7 @@ void a6xx_hfi_stop(struct a6xx_gmu *gmu)
continue;
if (queue->header->read_index != queue->header->write_index)
- dev_err(gmu->dev, "HFI queue %d is not empty\n", i);
+ DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
queue->header->read_index = 0;
queue->header->write_index = 0;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 1318959d504d..641d3ba477b6 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
@@ -339,6 +339,15 @@ static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
#define REG_AXXX_CP_STATE_DEBUG_DATA 0x000001ed
#define REG_AXXX_CP_INT_CNTL 0x000001f2
+#define AXXX_CP_INT_CNTL_SW_INT_MASK 0x00080000
+#define AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK 0x00800000
+#define AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK 0x01000000
+#define AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK 0x02000000
+#define AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK 0x04000000
+#define AXXX_CP_INT_CNTL_IB_ERROR_MASK 0x08000000
+#define AXXX_CP_INT_CNTL_IB2_INT_MASK 0x20000000
+#define AXXX_CP_INT_CNTL_IB1_INT_MASK 0x40000000
+#define AXXX_CP_INT_CNTL_RB_INT_MASK 0x80000000
#define REG_AXXX_CP_INT_STATUS 0x000001f3
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 86abdb2b3a9c..714ed6505e47 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -27,6 +27,39 @@ module_param_named(hang_debug, hang_debug, bool, 0600);
static const struct adreno_info gpulist[] = {
{
+ .rev = ADRENO_REV(2, 0, 0, 0),
+ .revn = 200,
+ .name = "A200",
+ .fw = {
+ [ADRENO_FW_PM4] = "yamato_pm4.fw",
+ [ADRENO_FW_PFP] = "yamato_pfp.fw",
+ },
+ .gmem = SZ_256K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a2xx_gpu_init,
+ }, { /* a200 on i.mx51 has only 128kib gmem */
+ .rev = ADRENO_REV(2, 0, 0, 1),
+ .revn = 201,
+ .name = "A200",
+ .fw = {
+ [ADRENO_FW_PM4] = "yamato_pm4.fw",
+ [ADRENO_FW_PFP] = "yamato_pfp.fw",
+ },
+ .gmem = SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a2xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(2, 2, 0, ANY_ID),
+ .revn = 220,
+ .name = "A220",
+ .fw = {
+ [ADRENO_FW_PM4] = "leia_pm4_470.fw",
+ [ADRENO_FW_PFP] = "leia_pfp_470.fw",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a2xx_gpu_init,
+ }, {
.rev = ADRENO_REV(3, 0, 5, ANY_ID),
.revn = 305,
.name = "A305",
@@ -196,7 +229,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
- dev_err(dev->dev, "Couldn't power up the GPU: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
return NULL;
}
@@ -205,7 +238,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
pm_runtime_put_autosuspend(&pdev->dev);
if (ret) {
- dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
return NULL;
}
@@ -238,7 +271,8 @@ static int find_chipid(struct device *dev, struct adreno_rev *rev)
if (ret == 0) {
unsigned int r, patch;
- if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2) {
+ if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2 ||
+ sscanf(compat, "amd,imageon-%u.%u", &r, &patch) == 2) {
rev->core = r / 100;
r %= 100;
rev->major = r / 10;
@@ -253,7 +287,7 @@ static int find_chipid(struct device *dev, struct adreno_rev *rev)
/* and if that fails, fall back to legacy "qcom,chipid" property: */
ret = of_property_read_u32(node, "qcom,chipid", &chipid);
if (ret) {
- dev_err(dev, "could not parse qcom,chipid: %d\n", ret);
+ DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
return ret;
}
@@ -274,6 +308,7 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
static struct adreno_platform_config config = {};
const struct adreno_info *info;
struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu *gpu;
int ret;
@@ -296,6 +331,8 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
DBG("Found GPU: %u.%u.%u.%u", config.rev.core, config.rev.major,
config.rev.minor, config.rev.patchid);
+ priv->is_a2xx = config.rev.core == 2;
+
gpu = info->init(drm);
if (IS_ERR(gpu)) {
dev_warn(drm->dev, "failed to load adreno gpu\n");
@@ -323,9 +360,37 @@ static const struct component_ops a3xx_ops = {
.unbind = adreno_unbind,
};
+static void adreno_device_register_headless(void)
+{
+ /* on imx5, we don't have a top-level mdp/dpu node
+ * this creates a dummy node for the driver for that case
+ */
+ struct platform_device_info dummy_info = {
+ .parent = NULL,
+ .name = "msm",
+ .id = -1,
+ .res = NULL,
+ .num_res = 0,
+ .data = NULL,
+ .size_data = 0,
+ .dma_mask = ~0,
+ };
+ platform_device_register_full(&dummy_info);
+}
+
static int adreno_probe(struct platform_device *pdev)
{
- return component_add(&pdev->dev, &a3xx_ops);
+
+ int ret;
+
+ ret = component_add(&pdev->dev, &a3xx_ops);
+ if (ret)
+ return ret;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon"))
+ adreno_device_register_headless();
+
+ return 0;
}
static int adreno_remove(struct platform_device *pdev)
@@ -337,6 +402,8 @@ static int adreno_remove(struct platform_device *pdev)
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,adreno" },
{ .compatible = "qcom,adreno-3xx" },
+ /* for compatibility with imx5 gpu: */
+ { .compatible = "amd,imageon" },
/* for backwards compat w/ downstream kgsl DT files: */
{ .compatible = "qcom,kgsl-3d0" },
{}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 93d70f4a2154..2e4372ef17a3 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -89,12 +89,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
ret = request_firmware_direct(&fw, newname, drm->dev);
if (!ret) {
- dev_info(drm->dev, "loaded %s from new location\n",
+ DRM_DEV_INFO(drm->dev, "loaded %s from new location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_NEW;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
- dev_err(drm->dev, "failed to load %s: %d\n",
+ DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
newname, ret);
fw = ERR_PTR(ret);
goto out;
@@ -109,12 +109,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
ret = request_firmware_direct(&fw, fwname, drm->dev);
if (!ret) {
- dev_info(drm->dev, "loaded %s from legacy location\n",
+ DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
- dev_err(drm->dev, "failed to load %s: %d\n",
+ DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
fwname, ret);
fw = ERR_PTR(ret);
goto out;
@@ -130,19 +130,19 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
ret = request_firmware(&fw, newname, drm->dev);
if (!ret) {
- dev_info(drm->dev, "loaded %s with helper\n",
+ DRM_DEV_INFO(drm->dev, "loaded %s with helper\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_HELPER;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
- dev_err(drm->dev, "failed to load %s: %d\n",
+ DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
newname, ret);
fw = ERR_PTR(ret);
goto out;
}
}
- dev_err(drm->dev, "failed to load %s\n", fwname);
+ DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname);
fw = ERR_PTR(-ENOENT);
out:
kfree(newname);
@@ -209,14 +209,6 @@ int adreno_hw_init(struct msm_gpu *gpu)
if (!ring)
continue;
- ret = msm_gem_get_iova(ring->bo, gpu->aspace, &ring->iova);
- if (ret) {
- ring->iova = 0;
- dev_err(gpu->dev->dev,
- "could not map ringbuffer %d: %d\n", i, ret);
- return ret;
- }
-
ring->cur = ring->start;
ring->next = ring->start;
@@ -277,7 +269,7 @@ void adreno_recover(struct msm_gpu *gpu)
ret = msm_gpu_hw_init(gpu);
if (ret) {
- dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
/* hmm, oh well? */
}
}
@@ -319,16 +311,27 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
*/
OUT_PKT3(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, HLSQ_FLUSH);
-
- OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
- OUT_RING(ring, 0x00000000);
}
- /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
- OUT_PKT3(ring, CP_EVENT_WRITE, 3);
- OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
- OUT_RING(ring, rbmemptr(ring, fence));
- OUT_RING(ring, submit->seqno);
+ /* wait for idle before cache flush/interrupt */
+ OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+ OUT_RING(ring, 0x00000000);
+
+ if (!adreno_is_a2xx(adreno_gpu)) {
+ /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
+ OUT_RING(ring, rbmemptr(ring, fence));
+ OUT_RING(ring, submit->seqno);
+ } else {
+ /* BIT(31) means something else on a2xx */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS);
+ OUT_RING(ring, rbmemptr(ring, fence));
+ OUT_RING(ring, submit->seqno);
+ OUT_PKT3(ring, CP_INTERRUPT, 1);
+ OUT_RING(ring, 0x80000000);
+ }
#if 0
if (adreno_is_a3xx(adreno_gpu)) {
@@ -406,7 +409,7 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
size = j + 1;
if (size) {
- state->ring[i].data = kmalloc(size << 2, GFP_KERNEL);
+ state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
if (state->ring[i].data) {
memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
state->ring[i].data_size = size << 2;
@@ -414,6 +417,10 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
}
}
+ /* Some targets prefer to collect their own registers */
+ if (!adreno_gpu->registers)
+ return 0;
+
/* Count the number of registers */
for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
count += adreno_gpu->registers[i + 1] -
@@ -445,7 +452,7 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state)
int i;
for (i = 0; i < ARRAY_SIZE(state->ring); i++)
- kfree(state->ring[i].data);
+ kvfree(state->ring[i].data);
for (i = 0; state->bos && i < state->nr_bos; i++)
kvfree(state->bos[i].data);
@@ -475,34 +482,74 @@ int adreno_gpu_state_put(struct msm_gpu_state *state)
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
-static void adreno_show_object(struct drm_printer *p, u32 *ptr, int len)
+static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
{
+ void *buf;
+ size_t buf_itr = 0, buffer_size;
char out[ASCII85_BUFSZ];
- long l, datalen, i;
+ long l;
+ int i;
- if (!ptr || !len)
- return;
+ if (!src || !len)
+ return NULL;
+
+ l = ascii85_encode_len(len);
/*
- * Only dump the non-zero part of the buffer - rarely will any data
- * completely fill the entire allocated size of the buffer
+ * Ascii85 outputs either a 5 byte string or a 1 byte string. So we
+ * account for the worst case of 5 bytes per dword plus the 1 for '\0'
*/
- for (datalen = 0, i = 0; i < len >> 2; i++) {
- if (ptr[i])
- datalen = (i << 2) + 1;
- }
+ buffer_size = (l * 5) + 1;
+
+ buf = kvmalloc(buffer_size, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ for (i = 0; i < l; i++)
+ buf_itr += snprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
+ ascii85_encode(src[i], out));
+
+ return buf;
+}
- /* Skip printing the object if it is empty */
- if (datalen == 0)
+/* len is expected to be in bytes */
+static void adreno_show_object(struct drm_printer *p, void **ptr, int len,
+ bool *encoded)
+{
+ if (!*ptr || !len)
return;
- l = ascii85_encode_len(datalen);
+ if (!*encoded) {
+ long datalen, i;
+ u32 *buf = *ptr;
+
+ /*
+ * Only dump the non-zero part of the buffer - rarely will
+ * any data completely fill the entire allocated size of
+ * the buffer.
+ */
+ for (datalen = 0, i = 0; i < len >> 2; i++)
+ if (buf[i])
+ datalen = ((i + 1) << 2);
+
+ /*
+ * If we reach here, then the originally captured binary buffer
+ * will be replaced with the ascii85 encoded string
+ */
+ *ptr = adreno_gpu_ascii85_encode(buf, datalen);
+
+ kvfree(buf);
+
+ *encoded = true;
+ }
+
+ if (!*ptr)
+ return;
drm_puts(p, " data: !!ascii85 |\n");
drm_puts(p, " ");
- for (i = 0; i < l; i++)
- drm_puts(p, ascii85_encode(ptr[i], out));
+ drm_puts(p, *ptr);
drm_puts(p, "\n");
}
@@ -534,8 +581,8 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
drm_printf(p, " wptr: %d\n", state->ring[i].wptr);
drm_printf(p, " size: %d\n", MSM_GPU_RINGBUFFER_SZ);
- adreno_show_object(p, state->ring[i].data,
- state->ring[i].data_size);
+ adreno_show_object(p, &state->ring[i].data,
+ state->ring[i].data_size, &state->ring[i].encoded);
}
if (state->bos) {
@@ -546,17 +593,19 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
state->bos[i].iova);
drm_printf(p, " size: %zd\n", state->bos[i].size);
- adreno_show_object(p, state->bos[i].data,
- state->bos[i].size);
+ adreno_show_object(p, &state->bos[i].data,
+ state->bos[i].size, &state->bos[i].encoded);
}
}
- drm_puts(p, "registers:\n");
+ if (state->nr_registers) {
+ drm_puts(p, "registers:\n");
- for (i = 0; i < state->nr_registers; i++) {
- drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
- state->registers[i * 2] << 2,
- state->registers[(i * 2) + 1]);
+ for (i = 0; i < state->nr_registers; i++) {
+ drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
+ state->registers[i * 2] << 2,
+ state->registers[(i * 2) + 1]);
+ }
}
}
#endif
@@ -595,6 +644,9 @@ void adreno_dump(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int i;
+ if (!adreno_gpu->registers)
+ return;
+
/* dump these out in a form that can be parsed by demsm: */
printk("IO:region %s 00000000 00020000\n", gpu->name);
for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
@@ -635,7 +687,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
if (!node) {
- dev_err(dev, "Could not find the GPU powerlevels\n");
+ DRM_DEV_ERROR(dev, "Could not find the GPU powerlevels\n");
return -ENXIO;
}
@@ -674,7 +726,7 @@ static int adreno_get_pwrlevels(struct device *dev,
else {
ret = dev_pm_opp_of_add_table(dev);
if (ret)
- dev_err(dev, "Unable to set the OPP table\n");
+ DRM_DEV_ERROR(dev, "Unable to set the OPP table\n");
}
if (!ret) {
@@ -717,6 +769,9 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.va_start = SZ_16M;
adreno_gpu_config.va_end = 0xffffffff;
+ /* maximum range of a2xx mmu */
+ if (adreno_is_a2xx(adreno_gpu))
+ adreno_gpu_config.va_end = SZ_16M + 0xfff * SZ_64K;
adreno_gpu_config.nr_rings = nr_rings;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index de6e6ee42fba..5db459bc28a7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -21,6 +21,7 @@
#define __ADRENO_GPU_H__
#include <linux/firmware.h>
+#include <linux/iopoll.h>
#include "msm_gpu.h"
@@ -154,6 +155,20 @@ struct adreno_platform_config {
__ret; \
})
+static inline bool adreno_is_a2xx(struct adreno_gpu *gpu)
+{
+ return (gpu->revn < 300);
+}
+
+static inline bool adreno_is_a20x(struct adreno_gpu *gpu)
+{
+ return (gpu->revn < 210);
+}
+
+static inline bool adreno_is_a225(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 225;
+}
static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
{
@@ -334,6 +349,7 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
gpu_write(&gpu->base, reg - 1, data);
}
+struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
@@ -375,4 +391,9 @@ static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
((1 << 29) \
((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
+#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
+
#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 15eb03bed984..79b907ac0b4b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
@@ -108,6 +108,13 @@ enum pc_di_src_sel {
DI_SRC_SEL_RESERVED = 3,
};
+enum pc_di_face_cull_sel {
+ DI_FACE_CULL_NONE = 0,
+ DI_FACE_CULL_FETCH = 1,
+ DI_FACE_BACKFACE_CULL = 2,
+ DI_FACE_FRONTFACE_CULL = 3,
+};
+
enum pc_di_index_size {
INDEX_SIZE_IGN = 0,
INDEX_SIZE_16_BIT = 0,
@@ -356,6 +363,7 @@ enum a6xx_render_mode {
RM6_GMEM = 4,
RM6_BLIT2D = 5,
RM6_RESOLVE = 6,
+ RM6_BLIT2DSCALE = 12,
};
enum pseudo_reg {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
index 879c13fe74e0..e45c69044935 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -319,10 +319,8 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
unsigned long irq_flags;
int i, irq_count, enable_count, cb_count;
- if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
- DPU_ERROR("invalid parameters\n");
+ if (WARN_ON(!irq_obj->enable_counts || !irq_obj->irq_cb_tbl))
return 0;
- }
for (i = 0; i < irq_obj->total_irqs; i++) {
spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
@@ -343,31 +341,11 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
-int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
- struct dentry *parent)
-{
- dpu_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0600,
- parent, &dpu_kms->irq_obj,
- &dpu_debugfs_core_irq_fops);
-
- return 0;
-}
-
-void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
-{
- debugfs_remove(dpu_kms->irq_obj.debugfs_file);
- dpu_kms->irq_obj.debugfs_file = NULL;
-}
-
-#else
-int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
- return 0;
-}
-
-void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
-{
+ debugfs_create_file("core_irq", 0600, parent, &dpu_kms->irq_obj,
+ &dpu_debugfs_core_irq_fops);
}
#endif
@@ -376,10 +354,7 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
struct msm_drm_private *priv;
int i;
- if (!dpu_kms) {
- DPU_ERROR("invalid dpu_kms\n");
- return;
- } else if (!dpu_kms->dev) {
+ if (!dpu_kms->dev) {
DPU_ERROR("invalid drm device\n");
return;
} else if (!dpu_kms->dev->dev_private) {
@@ -410,20 +385,12 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
}
}
-int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms)
-{
- return 0;
-}
-
void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
{
struct msm_drm_private *priv;
int i;
- if (!dpu_kms) {
- DPU_ERROR("invalid dpu_kms\n");
- return;
- } else if (!dpu_kms->dev) {
+ if (!dpu_kms->dev) {
DPU_ERROR("invalid drm device\n");
return;
} else if (!dpu_kms->dev->dev_private) {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
index 5e98bba46af5..e9015a2b23fe 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -24,13 +24,6 @@
void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
/**
- * dpu_core_irq_postinstall - perform post-installation of core IRQ handler
- * @dpu_kms: DPU handle
- * @return: 0 if success; error code otherwise
- */
-int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms);
-
-/**
* dpu_core_irq_uninstall - uninstall core IRQ handler
* @dpu_kms: DPU handle
* @return: none
@@ -139,15 +132,8 @@ int dpu_core_irq_unregister_callback(
* dpu_debugfs_core_irq_init - register core irq debugfs
* @dpu_kms: pointer to kms
* @parent: debugfs directory root
- * @Return: 0 on success
*/
-int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent);
-/**
- * dpu_debugfs_core_irq_destroy - deregister core irq debugfs
- * @dpu_kms: pointer to kms
- */
-void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms);
-
#endif /* __DPU_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 41c5191f9056..9f20f397f77d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -24,8 +24,6 @@
#include "dpu_crtc.h"
#include "dpu_core_perf.h"
-#define DPU_PERF_MODE_STRING_SIZE 128
-
/**
* enum dpu_perf_mode - performance tuning mode
* @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
@@ -57,31 +55,20 @@ static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
return to_dpu_kms(priv->kms);
}
-static bool _dpu_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
-{
- return dpu_crtc_is_enabled(crtc);
-}
-
static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
{
struct drm_crtc *tmp_crtc;
- bool intf_connected = false;
-
- if (!crtc)
- goto end;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
- _dpu_core_perf_crtc_is_power_on(tmp_crtc)) {
+ tmp_crtc->enabled) {
DPU_DEBUG("video interface connected crtc:%d\n",
tmp_crtc->base.id);
- intf_connected = true;
- goto end;
+ return true;
}
}
-end:
- return intf_connected;
+ return false;
}
static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
@@ -101,20 +88,20 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
memset(perf, 0, sizeof(struct dpu_core_perf_params));
if (!dpu_cstate->bw_control) {
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
1000ULL;
perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
}
perf->core_clk_rate = kms->perf.max_core_clk_rate;
} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
perf->bw_ctl[i] = 0;
perf->max_per_pipe_ib[i] = 0;
}
perf->core_clk_rate = 0;
} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
}
@@ -124,12 +111,12 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
DPU_DEBUG(
"crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
crtc->base.id, perf->core_clk_rate,
- perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MNOC],
- perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
- perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_LLCC],
- perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
- perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_EBI],
- perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI]);
+ perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
+ perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
+ perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
+ perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
+ perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_EBI],
+ perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_EBI]);
}
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
@@ -164,13 +151,13 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
/* obtain new values */
_dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
- for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
- i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
+ i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
curr_client_type = dpu_crtc_get_client_type(crtc);
drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ if (tmp_crtc->enabled &&
(dpu_crtc_get_client_type(tmp_crtc) ==
curr_client_type) &&
(tmp_crtc != crtc)) {
@@ -229,7 +216,7 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
int ret = 0;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ if (tmp_crtc->enabled &&
curr_client_type ==
dpu_crtc_get_client_type(tmp_crtc)) {
dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
@@ -286,7 +273,7 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
*/
if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ if (tmp_crtc->enabled &&
dpu_crtc_get_intf_mode(tmp_crtc) ==
INTF_MODE_VIDEO)
return;
@@ -296,7 +283,7 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
if (kms->perf.enable_bw_release) {
trace_dpu_cmd_release_bw(crtc->base.id);
DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
dpu_crtc->cur_perf.bw_ctl[i] = 0;
_dpu_core_perf_crtc_update_bus(kms, crtc, i);
}
@@ -321,7 +308,7 @@ static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
struct dpu_crtc_state *dpu_cstate;
drm_for_each_crtc(crtc, kms->dev) {
- if (_dpu_core_perf_crtc_is_power_on(crtc)) {
+ if (crtc->enabled) {
dpu_cstate = to_dpu_crtc_state(crtc->state);
clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
clk_rate);
@@ -372,8 +359,8 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
old = &dpu_crtc->cur_perf;
new = &dpu_cstate->new_perf;
- if (_dpu_core_perf_crtc_is_power_on(crtc) && !stop_req) {
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ if (crtc->enabled && !stop_req) {
+ for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
/*
* cases for bus bandwidth update.
* 1. new bandwidth vote - "ab or ib vote" is higher
@@ -415,13 +402,13 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
update_clk = 1;
}
trace_dpu_perf_crtc_update(crtc->base.id,
- new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
- new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
- new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI],
+ new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
+ new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
+ new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_EBI],
new->core_clk_rate, stop_req,
update_bus, update_clk);
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
if (update_bus & BIT(i)) {
ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i);
if (ret) {
@@ -462,24 +449,14 @@ static ssize_t _dpu_core_perf_mode_write(struct file *file,
struct dpu_core_perf *perf = file->private_data;
struct dpu_perf_cfg *cfg = &perf->catalog->perf;
u32 perf_mode = 0;
- char buf[10];
-
- if (!perf)
- return -ENODEV;
-
- if (count >= sizeof(buf))
- return -EFAULT;
-
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
- buf[count] = 0; /* end of string */
+ int ret;
- if (kstrtouint(buf, 0, &perf_mode))
- return -EFAULT;
+ ret = kstrtouint_from_user(user_buf, count, 0, &perf_mode);
+ if (ret)
+ return ret;
if (perf_mode >= DPU_PERF_MODE_MAX)
- return -EFAULT;
+ return -EINVAL;
if (perf_mode == DPU_PERF_MODE_FIXED) {
DRM_INFO("fix performance mode\n");
@@ -504,29 +481,16 @@ static ssize_t _dpu_core_perf_mode_read(struct file *file,
char __user *buff, size_t count, loff_t *ppos)
{
struct dpu_core_perf *perf = file->private_data;
- int len = 0;
- char buf[DPU_PERF_MODE_STRING_SIZE] = {'\0'};
-
- if (!perf)
- return -ENODEV;
+ int len;
+ char buf[128];
- if (*ppos)
- return 0; /* the end */
-
- len = snprintf(buf, sizeof(buf),
+ len = scnprintf(buf, sizeof(buf),
"mode %d min_mdp_clk %llu min_bus_vote %llu\n",
perf->perf_tune.mode,
perf->perf_tune.min_core_clk,
perf->perf_tune.min_bus_vote);
- if (len < 0 || len >= sizeof(buf))
- return 0;
-
- if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
- return -EFAULT;
-
- *ppos += len; /* increase offset */
- return len;
+ return simple_read_from_buffer(buff, count, ppos, buf, len);
}
static const struct file_operations dpu_core_perf_mode_fops = {
@@ -535,70 +499,43 @@ static const struct file_operations dpu_core_perf_mode_fops = {
.write = _dpu_core_perf_mode_write,
};
-static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
-{
- debugfs_remove_recursive(perf->debugfs_root);
- perf->debugfs_root = NULL;
-}
-
-int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
- struct dentry *parent)
+int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
{
+ struct dpu_core_perf *perf = &dpu_kms->perf;
struct dpu_mdss_cfg *catalog = perf->catalog;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
-
- priv = perf->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid KMS reference\n");
- return -EINVAL;
- }
+ struct dentry *entry;
- dpu_kms = to_dpu_kms(priv->kms);
-
- perf->debugfs_root = debugfs_create_dir("core_perf", parent);
- if (!perf->debugfs_root) {
- DPU_ERROR("failed to create core perf debugfs\n");
+ entry = debugfs_create_dir("core_perf", parent);
+ if (IS_ERR_OR_NULL(entry))
return -EINVAL;
- }
- debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
+ debugfs_create_u64("max_core_clk_rate", 0600, entry,
&perf->max_core_clk_rate);
- debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
+ debugfs_create_u64("core_clk_rate", 0600, entry,
&perf->core_clk_rate);
- debugfs_create_u32("enable_bw_release", 0600, perf->debugfs_root,
+ debugfs_create_u32("enable_bw_release", 0600, entry,
(u32 *)&perf->enable_bw_release);
- debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
+ debugfs_create_u32("threshold_low", 0600, entry,
(u32 *)&catalog->perf.max_bw_low);
- debugfs_create_u32("threshold_high", 0600, perf->debugfs_root,
+ debugfs_create_u32("threshold_high", 0600, entry,
(u32 *)&catalog->perf.max_bw_high);
- debugfs_create_u32("min_core_ib", 0600, perf->debugfs_root,
+ debugfs_create_u32("min_core_ib", 0600, entry,
(u32 *)&catalog->perf.min_core_ib);
- debugfs_create_u32("min_llcc_ib", 0600, perf->debugfs_root,
+ debugfs_create_u32("min_llcc_ib", 0600, entry,
(u32 *)&catalog->perf.min_llcc_ib);
- debugfs_create_u32("min_dram_ib", 0600, perf->debugfs_root,
+ debugfs_create_u32("min_dram_ib", 0600, entry,
(u32 *)&catalog->perf.min_dram_ib);
- debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
+ debugfs_create_file("perf_mode", 0600, entry,
(u32 *)perf, &dpu_core_perf_mode_fops);
- debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
+ debugfs_create_u64("fix_core_clk_rate", 0600, entry,
&perf->fix_core_clk_rate);
- debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
+ debugfs_create_u64("fix_core_ib_vote", 0600, entry,
&perf->fix_core_ib_vote);
- debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
+ debugfs_create_u64("fix_core_ab_vote", 0600, entry,
&perf->fix_core_ab_vote);
return 0;
}
-#else
-static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
-{
-}
-
-int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
- struct dentry *parent)
-{
- return 0;
-}
#endif
void dpu_core_perf_destroy(struct dpu_core_perf *perf)
@@ -608,10 +545,8 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf)
return;
}
- dpu_core_perf_debugfs_destroy(perf);
perf->max_core_clk_rate = 0;
perf->core_clk = NULL;
- perf->phandle = NULL;
perf->catalog = NULL;
perf->dev = NULL;
}
@@ -619,12 +554,10 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf)
int dpu_core_perf_init(struct dpu_core_perf *perf,
struct drm_device *dev,
struct dpu_mdss_cfg *catalog,
- struct dpu_power_handle *phandle,
struct dss_clk *core_clk)
{
perf->dev = dev;
perf->catalog = catalog;
- perf->phandle = phandle;
perf->core_clk = core_clk;
perf->max_core_clk_rate = core_clk->max_rate;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
index fbcbe0c7527a..37f518815eb7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -19,19 +19,31 @@
#include <drm/drm_crtc.h>
#include "dpu_hw_catalog.h"
-#include "dpu_power_handle.h"
#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
/**
+ * enum dpu_core_perf_data_bus_id - data bus identifier
+ * @DPU_CORE_PERF_DATA_BUS_ID_MNOC: DPU/MNOC data bus
+ * @DPU_CORE_PERF_DATA_BUS_ID_LLCC: MNOC/LLCC data bus
+ * @DPU_CORE_PERF_DATA_BUS_ID_EBI: LLCC/EBI data bus
+ */
+enum dpu_core_perf_data_bus_id {
+ DPU_CORE_PERF_DATA_BUS_ID_MNOC,
+ DPU_CORE_PERF_DATA_BUS_ID_LLCC,
+ DPU_CORE_PERF_DATA_BUS_ID_EBI,
+ DPU_CORE_PERF_DATA_BUS_ID_MAX,
+};
+
+/**
* struct dpu_core_perf_params - definition of performance parameters
* @max_per_pipe_ib: maximum instantaneous bandwidth request
* @bw_ctl: arbitrated bandwidth request
* @core_clk_rate: core clock rate request
*/
struct dpu_core_perf_params {
- u64 max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MAX];
- u64 bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MAX];
+ u64 max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_MAX];
+ u64 bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MAX];
u64 core_clk_rate;
};
@@ -52,7 +64,6 @@ struct dpu_core_perf_tune {
* @dev: Pointer to drm device
* @debugfs_root: top level debug folder
* @catalog: Pointer to catalog configuration
- * @phandle: Pointer to power handler
* @core_clk: Pointer to core clock structure
* @core_clk_rate: current core clock rate
* @max_core_clk_rate: maximum allowable core clock rate
@@ -66,7 +77,6 @@ struct dpu_core_perf {
struct drm_device *dev;
struct dentry *debugfs_root;
struct dpu_mdss_cfg *catalog;
- struct dpu_power_handle *phandle;
struct dss_clk *core_clk;
u64 core_clk_rate;
u64 max_core_clk_rate;
@@ -113,21 +123,20 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf);
* @perf: Pointer to core performance context
* @dev: Pointer to drm device
* @catalog: Pointer to catalog
- * @phandle: Pointer to power handle
* @core_clk: pointer to core clock
*/
int dpu_core_perf_init(struct dpu_core_perf *perf,
struct drm_device *dev,
struct dpu_mdss_cfg *catalog,
- struct dpu_power_handle *phandle,
struct dss_clk *core_clk);
+struct dpu_kms;
+
/**
* dpu_core_perf_debugfs_init - initialize debugfs for core performance context
- * @perf: Pointer to core performance context
+ * @dpu_kms: Pointer to the dpu_kms struct
* @debugfs_parent: Pointer to parent debugfs
*/
-int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
- struct dentry *parent);
+int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent);
#endif /* _DPU_CORE_PERF_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index d4530d60767b..9be7c355debd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -33,7 +33,6 @@
#include "dpu_plane.h"
#include "dpu_encoder.h"
#include "dpu_vbif.h"
-#include "dpu_power_handle.h"
#include "dpu_core_perf.h"
#include "dpu_trace.h"
@@ -47,13 +46,7 @@
#define LEFT_MIXER 0
#define RIGHT_MIXER 1
-static inline int _dpu_crtc_get_mixer_width(struct dpu_crtc_state *cstate,
- struct drm_display_mode *mode)
-{
- return mode->hdisplay / cstate->num_mixers;
-}
-
-static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
@@ -69,10 +62,7 @@ static void dpu_crtc_destroy(struct drm_crtc *crtc)
if (!crtc)
return;
- dpu_crtc->phandle = NULL;
-
drm_crtc_cleanup(crtc);
- mutex_destroy(&dpu_crtc->crtc_lock);
kfree(dpu_crtc);
}
@@ -287,16 +277,17 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
return INTF_MODE_NONE;
}
- drm_for_each_encoder(encoder, crtc->dev)
- if (encoder->crtc == crtc)
- return dpu_encoder_get_intf_mode(encoder);
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+ /* TODO: Returns the first INTF_MODE, could there be multiple values? */
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ return dpu_encoder_get_intf_mode(encoder);
return INTF_MODE_NONE;
}
-static void dpu_crtc_vblank_cb(void *data)
+void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
{
- struct drm_crtc *crtc = (struct drm_crtc *)data;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
/* keep statistics on vblank callback - with auto reset via debugfs */
@@ -309,6 +300,19 @@ static void dpu_crtc_vblank_cb(void *data)
trace_dpu_crtc_vblank_cb(DRMID(crtc));
}
+static void dpu_crtc_release_bw_unlocked(struct drm_crtc *crtc)
+{
+ int ret = 0;
+ struct drm_modeset_acquire_ctx ctx;
+
+ DRM_MODESET_LOCK_ALL_BEGIN(crtc->dev, ctx, 0, ret);
+ dpu_core_perf_crtc_release_bw(crtc);
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
+ if (ret)
+ DRM_ERROR("Failed to acquire modeset locks to release bw, %d\n",
+ ret);
+}
+
static void dpu_crtc_frame_event_work(struct kthread_work *work)
{
struct dpu_crtc_frame_event *fevent = container_of(work,
@@ -338,7 +342,7 @@ static void dpu_crtc_frame_event_work(struct kthread_work *work)
/* release bandwidth and other resources */
trace_dpu_crtc_frame_event_done(DRMID(crtc),
fevent->event);
- dpu_core_perf_crtc_release_bw(crtc);
+ dpu_crtc_release_bw_unlocked(crtc);
} else {
trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
fevent->event);
@@ -473,28 +477,21 @@ static void _dpu_crtc_setup_mixer_for_encoder(
static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_encoder *enc;
- mutex_lock(&dpu_crtc->crtc_lock);
- /* Check for mixers on all encoders attached to this crtc */
- list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
- if (enc->crtc != crtc)
- continue;
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+ /* Check for mixers on all encoders attached to this crtc */
+ drm_for_each_encoder_mask(enc, crtc->dev, crtc->state->encoder_mask)
_dpu_crtc_setup_mixer_for_encoder(crtc, enc);
- }
-
- mutex_unlock(&dpu_crtc->crtc_lock);
}
static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
struct drm_display_mode *adj_mode = &state->adjusted_mode;
- u32 crtc_split_width = _dpu_crtc_get_mixer_width(cstate, adj_mode);
+ u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
int i;
for (i = 0; i < cstate->num_mixers; i++) {
@@ -502,7 +499,7 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
r->x1 = crtc_split_width * i;
r->y1 = 0;
r->x2 = r->x1 + crtc_split_width;
- r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode);
+ r->y2 = adj_mode->vdisplay;
trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
}
@@ -552,13 +549,9 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
spin_unlock_irqrestore(&dev->event_lock, flags);
}
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc != crtc)
- continue;
-
- /* encoder will trigger pending mask now */
+ /* encoder will trigger pending mask now */
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_trigger_kickoff_pending(encoder);
- }
/*
* If no mixers have been allocated in dpu_crtc_atomic_check(),
@@ -702,10 +695,9 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
return rc;
}
-void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async)
{
struct drm_encoder *encoder;
- struct drm_device *dev = crtc->dev;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
@@ -721,127 +713,59 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
DPU_ATRACE_BEGIN("crtc_commit");
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ /*
+ * Encoder will flush/start now, unless it has a tx pending. If so, it
+ * may delay and flush at an irq event (e.g. ppdone)
+ */
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask) {
struct dpu_encoder_kickoff_params params = { 0 };
-
- if (encoder->crtc != crtc)
- continue;
-
- /*
- * Encoder will flush/start now, unless it has a tx pending.
- * If so, it may delay and flush at an irq event (e.g. ppdone)
- */
- dpu_encoder_prepare_for_kickoff(encoder, &params);
+ dpu_encoder_prepare_for_kickoff(encoder, &params, async);
}
- /* wait for frame_event_done completion */
- DPU_ATRACE_BEGIN("wait_for_frame_done_event");
- ret = _dpu_crtc_wait_for_frame_done(crtc);
- DPU_ATRACE_END("wait_for_frame_done_event");
- if (ret) {
- DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
- crtc->base.id,
- atomic_read(&dpu_crtc->frame_pending));
- goto end;
- }
- if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
- /* acquire bandwidth and other resources */
- DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
- } else
- DPU_DEBUG("crtc%d commit\n", crtc->base.id);
+ if (!async) {
+ /* wait for frame_event_done completion */
+ DPU_ATRACE_BEGIN("wait_for_frame_done_event");
+ ret = _dpu_crtc_wait_for_frame_done(crtc);
+ DPU_ATRACE_END("wait_for_frame_done_event");
+ if (ret) {
+ DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+ crtc->base.id,
+ atomic_read(&dpu_crtc->frame_pending));
+ goto end;
+ }
+
+ if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
+ /* acquire bandwidth and other resources */
+ DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
+ } else
+ DPU_DEBUG("crtc%d commit\n", crtc->base.id);
- dpu_crtc->play_count++;
+ dpu_crtc->play_count++;
+ }
dpu_vbif_clear_errors(dpu_kms);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc != crtc)
- continue;
-
- dpu_encoder_kickoff(encoder);
- }
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ dpu_encoder_kickoff(encoder, async);
end:
- reinit_completion(&dpu_crtc->frame_done_comp);
+ if (!async)
+ reinit_completion(&dpu_crtc->frame_done_comp);
DPU_ATRACE_END("crtc_commit");
}
-/**
- * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
- * @dpu_crtc: Pointer to dpu crtc structure
- * @enable: Whether to enable/disable vblanks
- */
-static void _dpu_crtc_vblank_enable_no_lock(
- struct dpu_crtc *dpu_crtc, bool enable)
-{
- struct drm_crtc *crtc = &dpu_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_encoder *enc;
-
- if (enable) {
- /* drop lock since power crtc cb may try to re-acquire lock */
- mutex_unlock(&dpu_crtc->crtc_lock);
- pm_runtime_get_sync(dev->dev);
- mutex_lock(&dpu_crtc->crtc_lock);
-
- list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
- if (enc->crtc != crtc)
- continue;
-
- trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
- DRMID(enc), enable,
- dpu_crtc);
-
- dpu_encoder_register_vblank_callback(enc,
- dpu_crtc_vblank_cb, (void *)crtc);
- }
- } else {
- list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
- if (enc->crtc != crtc)
- continue;
-
- trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
- DRMID(enc), enable,
- dpu_crtc);
-
- dpu_encoder_register_vblank_callback(enc, NULL, NULL);
- }
-
- /* drop lock since power crtc cb may try to re-acquire lock */
- mutex_unlock(&dpu_crtc->crtc_lock);
- pm_runtime_put_sync(dev->dev);
- mutex_lock(&dpu_crtc->crtc_lock);
- }
-}
-
-/**
- * _dpu_crtc_set_suspend - notify crtc of suspend enable/disable
- * @crtc: Pointer to drm crtc object
- * @enable: true to enable suspend, false to indicate resume
- */
-static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
+static void dpu_crtc_reset(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
-
- DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable);
-
- mutex_lock(&dpu_crtc->crtc_lock);
+ struct dpu_crtc_state *cstate;
- /*
- * If the vblank is enabled, release a power reference on suspend
- * and take it back during resume (if it is still enabled).
- */
- trace_dpu_crtc_set_suspend(DRMID(&dpu_crtc->base), enable, dpu_crtc);
- if (dpu_crtc->suspend == enable)
- DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
- crtc->base.id, enable);
- else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
- _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
- }
+ if (crtc->state)
+ dpu_crtc_destroy_state(crtc, crtc->state);
- dpu_crtc->suspend = enable;
- mutex_unlock(&dpu_crtc->crtc_lock);
+ crtc->state = kzalloc(sizeof(*cstate), GFP_KERNEL);
+ if (crtc->state)
+ crtc->state->crtc = crtc;
}
/**
@@ -873,65 +797,8 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
return &cstate->base;
}
-/**
- * dpu_crtc_reset - reset hook for CRTCs
- * Resets the atomic state for @crtc by freeing the state pointer (which might
- * be NULL, e.g. at driver load time) and allocating a new empty state object.
- * @crtc: Pointer to drm crtc structure
- */
-static void dpu_crtc_reset(struct drm_crtc *crtc)
-{
- struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_state *cstate;
-
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
-
- /* revert suspend actions, if necessary */
- if (dpu_kms_is_suspend_state(crtc->dev))
- _dpu_crtc_set_suspend(crtc, false);
-
- /* remove previous state, if present */
- if (crtc->state) {
- dpu_crtc_destroy_state(crtc, crtc->state);
- crtc->state = 0;
- }
-
- dpu_crtc = to_dpu_crtc(crtc);
- cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
- if (!cstate) {
- DPU_ERROR("failed to allocate state\n");
- return;
- }
-
- cstate->base.crtc = crtc;
- crtc->state = &cstate->base;
-}
-
-static void dpu_crtc_handle_power_event(u32 event_type, void *arg)
-{
- struct drm_crtc *crtc = arg;
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
- struct drm_encoder *encoder;
-
- mutex_lock(&dpu_crtc->crtc_lock);
-
- trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type);
-
- /* restore encoder; crtc will be programmed during commit */
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
-
- dpu_encoder_virt_restore(encoder);
- }
-
- mutex_unlock(&dpu_crtc->crtc_lock);
-}
-
-static void dpu_crtc_disable(struct drm_crtc *crtc)
+static void dpu_crtc_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *cstate;
@@ -951,13 +818,12 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
- if (dpu_kms_is_suspend_state(crtc->dev))
- _dpu_crtc_set_suspend(crtc, true);
-
/* Disable/save vblank irq handling */
drm_crtc_vblank_off(crtc);
- mutex_lock(&dpu_crtc->crtc_lock);
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ old_crtc_state->encoder_mask)
+ dpu_encoder_assign_crtc(encoder, NULL);
/* wait for frame_event_done completion */
if (_dpu_crtc_wait_for_frame_done(crtc))
@@ -966,10 +832,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
atomic_read(&dpu_crtc->frame_pending));
trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
- if (dpu_crtc->enabled && !dpu_crtc->suspend &&
- dpu_crtc->vblank_requested) {
- _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
- }
dpu_crtc->enabled = false;
if (atomic_read(&dpu_crtc->frame_pending)) {
@@ -981,15 +843,8 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
dpu_core_perf_crtc_update(crtc, 0, true);
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
- }
-
- if (dpu_crtc->power_event)
- dpu_power_handle_unregister_event(dpu_crtc->phandle,
- dpu_crtc->power_event);
memset(cstate->mixers, 0, sizeof(cstate->mixers));
cstate->num_mixers = 0;
@@ -998,14 +853,14 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
cstate->bw_control = false;
cstate->bw_split_vote = false;
- mutex_unlock(&dpu_crtc->crtc_lock);
-
if (crtc->state->event && !crtc->state->active) {
spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
+
+ pm_runtime_put_sync(crtc->dev->dev);
}
static void dpu_crtc_enable(struct drm_crtc *crtc,
@@ -1021,33 +876,23 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
}
priv = crtc->dev->dev_private;
+ pm_runtime_get_sync(crtc->dev->dev);
+
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
dpu_crtc = to_dpu_crtc(crtc);
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_register_frame_event_callback(encoder,
dpu_crtc_frame_event_cb, (void *)crtc);
- }
- mutex_lock(&dpu_crtc->crtc_lock);
trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
- if (!dpu_crtc->enabled && !dpu_crtc->suspend &&
- dpu_crtc->vblank_requested) {
- _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
- }
dpu_crtc->enabled = true;
- mutex_unlock(&dpu_crtc->crtc_lock);
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ dpu_encoder_assign_crtc(encoder, crtc);
/* Enable/restore vblank irq handling */
drm_crtc_vblank_on(crtc);
-
- dpu_crtc->power_event = dpu_power_handle_register_event(
- dpu_crtc->phandle, DPU_POWER_EVENT_ENABLE,
- dpu_crtc_handle_power_event, crtc, dpu_crtc->name);
-
}
struct plane_state {
@@ -1101,7 +946,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
memset(pipe_staged, 0, sizeof(pipe_staged));
- mixer_width = _dpu_crtc_get_mixer_width(cstate, mode);
+ mixer_width = mode->hdisplay / cstate->num_mixers;
_dpu_crtc_setup_lm_bounds(crtc, state);
@@ -1289,21 +1134,32 @@ end:
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
{
- struct dpu_crtc *dpu_crtc;
-
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return -EINVAL;
- }
- dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_encoder *enc;
- mutex_lock(&dpu_crtc->crtc_lock);
trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
- if (dpu_crtc->enabled && !dpu_crtc->suspend) {
- _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
+
+ /*
+ * Normally we would iterate through encoder_mask in crtc state to find
+ * attached encoders. In this case, we might be disabling vblank _after_
+ * encoder_mask has been cleared.
+ *
+ * Instead, we "assign" a crtc to the encoder in enable and clear it in
+ * disable (which is also after encoder_mask is cleared). So instead of
+ * using encoder mask, we'll ask the encoder to toggle itself iff it's
+ * currently assigned to our crtc.
+ *
+ * Note also that this function cannot be called while crtc is disabled
+ * since we use drm_crtc_vblank_on/off. So we don't need to worry
+ * about the assigned crtcs being inconsistent with the current state
+ * (which means no need to worry about modeset locks).
+ */
+ list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
+ trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
+ dpu_crtc);
+
+ dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
}
- dpu_crtc->vblank_requested = en;
- mutex_unlock(&dpu_crtc->crtc_lock);
return 0;
}
@@ -1324,18 +1180,14 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
int i, out_width;
- if (!s || !s->private)
- return -EINVAL;
-
dpu_crtc = s->private;
crtc = &dpu_crtc->base;
drm_modeset_lock_all(crtc->dev);
cstate = to_dpu_crtc_state(crtc->state);
- mutex_lock(&dpu_crtc->crtc_lock);
mode = &crtc->state->adjusted_mode;
- out_width = _dpu_crtc_get_mixer_width(cstate, mode);
+ out_width = mode->hdisplay / cstate->num_mixers;
seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
mode->hdisplay, mode->vdisplay);
@@ -1420,9 +1272,6 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
dpu_crtc->vblank_cb_time = ktime_set(0, 0);
}
- seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
-
- mutex_unlock(&dpu_crtc->crtc_lock);
drm_modeset_unlock_all(crtc->dev);
return 0;
@@ -1456,13 +1305,11 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
seq_printf(s, "core_clk_rate: %llu\n",
dpu_crtc->cur_perf.core_clk_rate);
- for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
- i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
- seq_printf(s, "bw_ctl[%s]: %llu\n",
- dpu_power_handle_get_dbus_name(i),
+ for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
+ i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
+ seq_printf(s, "bw_ctl[%d]: %llu\n", i,
dpu_crtc->cur_perf.bw_ctl[i]);
- seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
- dpu_power_handle_get_dbus_name(i),
+ seq_printf(s, "max_per_pipe_ib[%d]: %llu\n", i,
dpu_crtc->cur_perf.max_per_pipe_ib[i]);
}
@@ -1472,8 +1319,7 @@ DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
- struct dpu_kms *dpu_kms;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
static const struct file_operations debugfs_status_fops = {
.open = _dpu_debugfs_status_open,
@@ -1482,12 +1328,6 @@ static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
.release = single_release,
};
- if (!crtc)
- return -EINVAL;
- dpu_crtc = to_dpu_crtc(crtc);
-
- dpu_kms = _dpu_crtc_get_kms(crtc);
-
dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
crtc->dev->primary->debugfs_root);
if (!dpu_crtc->debugfs_root)
@@ -1504,25 +1344,11 @@ static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
return 0;
}
-
-static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
-{
- struct dpu_crtc *dpu_crtc;
-
- if (!crtc)
- return;
- dpu_crtc = to_dpu_crtc(crtc);
- debugfs_remove_recursive(dpu_crtc->debugfs_root);
-}
#else
static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
{
return 0;
}
-
-static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
-{
-}
#endif /* CONFIG_DEBUG_FS */
static int dpu_crtc_late_register(struct drm_crtc *crtc)
@@ -1532,7 +1358,9 @@ static int dpu_crtc_late_register(struct drm_crtc *crtc)
static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
{
- _dpu_crtc_destroy_debugfs(crtc);
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ debugfs_remove_recursive(dpu_crtc->debugfs_root);
}
static const struct drm_crtc_funcs dpu_crtc_funcs = {
@@ -1547,7 +1375,7 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = {
};
static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
- .disable = dpu_crtc_disable,
+ .atomic_disable = dpu_crtc_disable,
.atomic_enable = dpu_crtc_enable,
.atomic_check = dpu_crtc_atomic_check,
.atomic_begin = dpu_crtc_atomic_begin,
@@ -1574,7 +1402,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
crtc = &dpu_crtc->base;
crtc->dev = dev;
- mutex_init(&dpu_crtc->crtc_lock);
spin_lock_init(&dpu_crtc->spin_lock);
atomic_set(&dpu_crtc->frame_pending, 0);
@@ -1594,7 +1421,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
NULL);
drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
- plane->crtc = crtc;
/* save user friendly CRTC name for later */
snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
@@ -1602,8 +1428,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
/* initialize event handling */
spin_lock_init(&dpu_crtc->event_lock);
- dpu_crtc->phandle = &kms->phandle;
-
DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
return crtc;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index 3723b4830335..dbfb38a1986c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -132,8 +132,6 @@ struct dpu_crtc_frame_event {
* @vblank_cb_count : count of vblank callback since last reset
* @play_count : frame count between crtc enable and disable
* @vblank_cb_time : ktime at vblank count reset
- * @vblank_requested : whether the user has requested vblank events
- * @suspend : whether or not a suspend operation is in progress
* @enabled : whether the DPU CRTC is currently enabled. updated in the
* commit-thread, not state-swap time which is earlier, so
* safe to make decisions on during VBLANK on/off work
@@ -142,7 +140,6 @@ struct dpu_crtc_frame_event {
* @dirty_list : list of color processing features are dirty
* @ad_dirty: list containing ad properties that are dirty
* @ad_active: list containing ad properties that are active
- * @crtc_lock : crtc lock around create, destroy and access.
* @frame_pending : Whether or not an update is pending
* @frame_events : static allocation of in-flight frame events
* @frame_event_list : available frame event list
@@ -152,7 +149,6 @@ struct dpu_crtc_frame_event {
* @event_worker : Event worker queue
* @event_lock : Spinlock around event handling code
* @phandle: Pointer to power handler
- * @power_event : registered power event handle
* @cur_perf : current performance committed to clock/bandwidth driver
*/
struct dpu_crtc {
@@ -168,8 +164,6 @@ struct dpu_crtc {
u32 vblank_cb_count;
u64 play_count;
ktime_t vblank_cb_time;
- bool vblank_requested;
- bool suspend;
bool enabled;
struct list_head feature_list;
@@ -178,8 +172,6 @@ struct dpu_crtc {
struct list_head ad_dirty;
struct list_head ad_active;
- struct mutex crtc_lock;
-
atomic_t frame_pending;
struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE];
struct list_head frame_event_list;
@@ -189,9 +181,6 @@ struct dpu_crtc {
/* for handling internal event thread */
spinlock_t event_lock;
- struct dpu_power_handle *phandle;
- struct dpu_power_event *power_event;
-
struct dpu_core_perf_params cur_perf;
struct dpu_crtc_smmu_state_data smmu_state;
@@ -238,41 +227,12 @@ struct dpu_crtc_state {
container_of(x, struct dpu_crtc_state, base)
/**
- * dpu_crtc_state_is_stereo - Is crtc virtualized with two mixers?
- * @cstate: Pointer to dpu crtc state
- * @Return: true - has two mixers, false - has one mixer
- */
-static inline bool dpu_crtc_state_is_stereo(struct dpu_crtc_state *cstate)
-{
- return cstate->num_mixers == CRTC_DUAL_MIXERS;
-}
-
-/**
- * dpu_crtc_get_mixer_height - get the mixer height
- * Mixer height will be same as panel height
- */
-static inline int dpu_crtc_get_mixer_height(struct dpu_crtc *dpu_crtc,
- struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
-{
- if (!dpu_crtc || !cstate || !mode)
- return 0;
-
- return mode->vdisplay;
-}
-
-/**
* dpu_crtc_frame_pending - retun the number of pending frames
* @crtc: Pointer to drm crtc object
*/
static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
-
- if (!crtc)
- return -EINVAL;
-
- dpu_crtc = to_dpu_crtc(crtc);
- return atomic_read(&dpu_crtc->frame_pending);
+ return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL;
}
/**
@@ -283,10 +243,17 @@ static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
/**
+ * dpu_crtc_vblank_callback - called on vblank irq, issues completion events
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_vblank_callback(struct drm_crtc *crtc);
+
+/**
* dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
* @crtc: Pointer to drm crtc object
+ * @async: true if the commit is asynchronous, false otherwise
*/
-void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async);
/**
* dpu_crtc_complete_commit - callback signalling completion of current commit
@@ -329,22 +296,7 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
struct drm_crtc *crtc)
{
- struct dpu_crtc_state *cstate =
- crtc ? to_dpu_crtc_state(crtc->state) : NULL;
-
- if (!cstate)
- return NRT_CLIENT;
-
- return RT_CLIENT;
-}
-
-/**
- * dpu_crtc_is_enabled - check if dpu crtc is enabled or not
- * @crtc: Pointer to crtc
- */
-static inline bool dpu_crtc_is_enabled(struct drm_crtc *crtc)
-{
- return crtc ? crtc->enabled : false;
+ return crtc && crtc->state ? RT_CLIENT : NRT_CLIENT;
}
#endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
deleted file mode 100644
index ae2aee7ed9e1..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
+++ /dev/null
@@ -1,2393 +0,0 @@
-/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/ktime.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/dma-buf.h>
-#include <linux/slab.h>
-#include <linux/list_sort.h>
-#include <linux/pm_runtime.h>
-
-#include "dpu_dbg.h"
-#include "disp/dpu1/dpu_hw_catalog.h"
-
-
-#define DEFAULT_DBGBUS_DPU DPU_DBG_DUMP_IN_MEM
-#define DEFAULT_DBGBUS_VBIFRT DPU_DBG_DUMP_IN_MEM
-#define REG_BASE_NAME_LEN 80
-
-#define DBGBUS_FLAGS_DSPP BIT(0)
-#define DBGBUS_DSPP_STATUS 0x34C
-
-#define DBGBUS_NAME_DPU "dpu"
-#define DBGBUS_NAME_VBIF_RT "vbif_rt"
-
-/* offsets from dpu top address for the debug buses */
-#define DBGBUS_SSPP0 0x188
-#define DBGBUS_AXI_INTF 0x194
-#define DBGBUS_SSPP1 0x298
-#define DBGBUS_DSPP 0x348
-#define DBGBUS_PERIPH 0x418
-
-#define TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
-
-/* following offsets are with respect to MDP VBIF base for DBG BUS access */
-#define MMSS_VBIF_CLKON 0x4
-#define MMSS_VBIF_TEST_BUS_OUT_CTRL 0x210
-#define MMSS_VBIF_TEST_BUS_OUT 0x230
-
-/* Vbif error info */
-#define MMSS_VBIF_PND_ERR 0x190
-#define MMSS_VBIF_SRC_ERR 0x194
-#define MMSS_VBIF_XIN_HALT_CTRL1 0x204
-#define MMSS_VBIF_ERR_INFO 0X1a0
-#define MMSS_VBIF_ERR_INFO_1 0x1a4
-#define MMSS_VBIF_CLIENT_NUM 14
-
-/**
- * struct dpu_dbg_reg_base - register region base.
- * may sub-ranges: sub-ranges are used for dumping
- * or may not have sub-ranges: dumping is base -> max_offset
- * @reg_base_head: head of this node
- * @name: register base name
- * @base: base pointer
- * @off: cached offset of region for manual register dumping
- * @cnt: cached range of region for manual register dumping
- * @max_offset: length of region
- * @buf: buffer used for manual register dumping
- * @buf_len: buffer length used for manual register dumping
- * @cb: callback for external dump function, null if not defined
- * @cb_ptr: private pointer to callback function
- */
-struct dpu_dbg_reg_base {
- struct list_head reg_base_head;
- char name[REG_BASE_NAME_LEN];
- void __iomem *base;
- size_t off;
- size_t cnt;
- size_t max_offset;
- char *buf;
- size_t buf_len;
- void (*cb)(void *ptr);
- void *cb_ptr;
-};
-
-struct dpu_debug_bus_entry {
- u32 wr_addr;
- u32 block_id;
- u32 test_id;
- void (*analyzer)(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val);
-};
-
-struct vbif_debug_bus_entry {
- u32 disable_bus_addr;
- u32 block_bus_addr;
- u32 bit_offset;
- u32 block_cnt;
- u32 test_pnt_start;
- u32 test_pnt_cnt;
-};
-
-struct dpu_dbg_debug_bus_common {
- char *name;
- u32 enable_mask;
- bool include_in_deferred_work;
- u32 flags;
- u32 entries_size;
- u32 *dumped_content;
-};
-
-struct dpu_dbg_dpu_debug_bus {
- struct dpu_dbg_debug_bus_common cmn;
- struct dpu_debug_bus_entry *entries;
- u32 top_blk_off;
-};
-
-struct dpu_dbg_vbif_debug_bus {
- struct dpu_dbg_debug_bus_common cmn;
- struct vbif_debug_bus_entry *entries;
-};
-
-/**
- * struct dpu_dbg_base - global dpu debug base structure
- * @reg_base_list: list of register dumping regions
- * @dev: device pointer
- * @dump_work: work struct for deferring register dump work to separate thread
- * @dbgbus_dpu: debug bus structure for the dpu
- * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
- */
-static struct dpu_dbg_base {
- struct list_head reg_base_list;
- struct device *dev;
-
- struct work_struct dump_work;
-
- struct dpu_dbg_dpu_debug_bus dbgbus_dpu;
- struct dpu_dbg_vbif_debug_bus dbgbus_vbif_rt;
-} dpu_dbg_base;
-
-static void _dpu_debug_bus_xbar_dump(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val)
-{
- dev_err(dpu_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
- entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _dpu_debug_bus_lm_dump(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val)
-{
- if (!(val & 0xFFF000))
- return;
-
- dev_err(dpu_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
- entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _dpu_debug_bus_ppb0_dump(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val)
-{
- if (!(val & BIT(15)))
- return;
-
- dev_err(dpu_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
- entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _dpu_debug_bus_ppb1_dump(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val)
-{
- if (!(val & BIT(15)))
- return;
-
- dev_err(dpu_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
- entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static struct dpu_debug_bus_entry dbg_bus_dpu_8998[] = {
-
- /* Unpack 0 sspp 0*/
- { DBGBUS_SSPP0, 50, 2 },
- { DBGBUS_SSPP0, 60, 2 },
- { DBGBUS_SSPP0, 70, 2 },
- { DBGBUS_SSPP0, 85, 2 },
-
- /* Upack 0 sspp 1*/
- { DBGBUS_SSPP1, 50, 2 },
- { DBGBUS_SSPP1, 60, 2 },
- { DBGBUS_SSPP1, 70, 2 },
- { DBGBUS_SSPP1, 85, 2 },
-
- /* scheduler */
- { DBGBUS_DSPP, 130, 0 },
- { DBGBUS_DSPP, 130, 1 },
- { DBGBUS_DSPP, 130, 2 },
- { DBGBUS_DSPP, 130, 3 },
- { DBGBUS_DSPP, 130, 4 },
- { DBGBUS_DSPP, 130, 5 },
-
- /* qseed */
- { DBGBUS_SSPP0, 6, 0},
- { DBGBUS_SSPP0, 6, 1},
- { DBGBUS_SSPP0, 26, 0},
- { DBGBUS_SSPP0, 26, 1},
- { DBGBUS_SSPP1, 6, 0},
- { DBGBUS_SSPP1, 6, 1},
- { DBGBUS_SSPP1, 26, 0},
- { DBGBUS_SSPP1, 26, 1},
-
- /* scale */
- { DBGBUS_SSPP0, 16, 0},
- { DBGBUS_SSPP0, 16, 1},
- { DBGBUS_SSPP0, 36, 0},
- { DBGBUS_SSPP0, 36, 1},
- { DBGBUS_SSPP1, 16, 0},
- { DBGBUS_SSPP1, 16, 1},
- { DBGBUS_SSPP1, 36, 0},
- { DBGBUS_SSPP1, 36, 1},
-
- /* fetch sspp0 */
-
- /* vig 0 */
- { DBGBUS_SSPP0, 0, 0 },
- { DBGBUS_SSPP0, 0, 1 },
- { DBGBUS_SSPP0, 0, 2 },
- { DBGBUS_SSPP0, 0, 3 },
- { DBGBUS_SSPP0, 0, 4 },
- { DBGBUS_SSPP0, 0, 5 },
- { DBGBUS_SSPP0, 0, 6 },
- { DBGBUS_SSPP0, 0, 7 },
-
- { DBGBUS_SSPP0, 1, 0 },
- { DBGBUS_SSPP0, 1, 1 },
- { DBGBUS_SSPP0, 1, 2 },
- { DBGBUS_SSPP0, 1, 3 },
- { DBGBUS_SSPP0, 1, 4 },
- { DBGBUS_SSPP0, 1, 5 },
- { DBGBUS_SSPP0, 1, 6 },
- { DBGBUS_SSPP0, 1, 7 },
-
- { DBGBUS_SSPP0, 2, 0 },
- { DBGBUS_SSPP0, 2, 1 },
- { DBGBUS_SSPP0, 2, 2 },
- { DBGBUS_SSPP0, 2, 3 },
- { DBGBUS_SSPP0, 2, 4 },
- { DBGBUS_SSPP0, 2, 5 },
- { DBGBUS_SSPP0, 2, 6 },
- { DBGBUS_SSPP0, 2, 7 },
-
- { DBGBUS_SSPP0, 4, 0 },
- { DBGBUS_SSPP0, 4, 1 },
- { DBGBUS_SSPP0, 4, 2 },
- { DBGBUS_SSPP0, 4, 3 },
- { DBGBUS_SSPP0, 4, 4 },
- { DBGBUS_SSPP0, 4, 5 },
- { DBGBUS_SSPP0, 4, 6 },
- { DBGBUS_SSPP0, 4, 7 },
-
- { DBGBUS_SSPP0, 5, 0 },
- { DBGBUS_SSPP0, 5, 1 },
- { DBGBUS_SSPP0, 5, 2 },
- { DBGBUS_SSPP0, 5, 3 },
- { DBGBUS_SSPP0, 5, 4 },
- { DBGBUS_SSPP0, 5, 5 },
- { DBGBUS_SSPP0, 5, 6 },
- { DBGBUS_SSPP0, 5, 7 },
-
- /* vig 2 */
- { DBGBUS_SSPP0, 20, 0 },
- { DBGBUS_SSPP0, 20, 1 },
- { DBGBUS_SSPP0, 20, 2 },
- { DBGBUS_SSPP0, 20, 3 },
- { DBGBUS_SSPP0, 20, 4 },
- { DBGBUS_SSPP0, 20, 5 },
- { DBGBUS_SSPP0, 20, 6 },
- { DBGBUS_SSPP0, 20, 7 },
-
- { DBGBUS_SSPP0, 21, 0 },
- { DBGBUS_SSPP0, 21, 1 },
- { DBGBUS_SSPP0, 21, 2 },
- { DBGBUS_SSPP0, 21, 3 },
- { DBGBUS_SSPP0, 21, 4 },
- { DBGBUS_SSPP0, 21, 5 },
- { DBGBUS_SSPP0, 21, 6 },
- { DBGBUS_SSPP0, 21, 7 },
-
- { DBGBUS_SSPP0, 22, 0 },
- { DBGBUS_SSPP0, 22, 1 },
- { DBGBUS_SSPP0, 22, 2 },
- { DBGBUS_SSPP0, 22, 3 },
- { DBGBUS_SSPP0, 22, 4 },
- { DBGBUS_SSPP0, 22, 5 },
- { DBGBUS_SSPP0, 22, 6 },
- { DBGBUS_SSPP0, 22, 7 },
-
- { DBGBUS_SSPP0, 24, 0 },
- { DBGBUS_SSPP0, 24, 1 },
- { DBGBUS_SSPP0, 24, 2 },
- { DBGBUS_SSPP0, 24, 3 },
- { DBGBUS_SSPP0, 24, 4 },
- { DBGBUS_SSPP0, 24, 5 },
- { DBGBUS_SSPP0, 24, 6 },
- { DBGBUS_SSPP0, 24, 7 },
-
- { DBGBUS_SSPP0, 25, 0 },
- { DBGBUS_SSPP0, 25, 1 },
- { DBGBUS_SSPP0, 25, 2 },
- { DBGBUS_SSPP0, 25, 3 },
- { DBGBUS_SSPP0, 25, 4 },
- { DBGBUS_SSPP0, 25, 5 },
- { DBGBUS_SSPP0, 25, 6 },
- { DBGBUS_SSPP0, 25, 7 },
-
- /* dma 2 */
- { DBGBUS_SSPP0, 30, 0 },
- { DBGBUS_SSPP0, 30, 1 },
- { DBGBUS_SSPP0, 30, 2 },
- { DBGBUS_SSPP0, 30, 3 },
- { DBGBUS_SSPP0, 30, 4 },
- { DBGBUS_SSPP0, 30, 5 },
- { DBGBUS_SSPP0, 30, 6 },
- { DBGBUS_SSPP0, 30, 7 },
-
- { DBGBUS_SSPP0, 31, 0 },
- { DBGBUS_SSPP0, 31, 1 },
- { DBGBUS_SSPP0, 31, 2 },
- { DBGBUS_SSPP0, 31, 3 },
- { DBGBUS_SSPP0, 31, 4 },
- { DBGBUS_SSPP0, 31, 5 },
- { DBGBUS_SSPP0, 31, 6 },
- { DBGBUS_SSPP0, 31, 7 },
-
- { DBGBUS_SSPP0, 32, 0 },
- { DBGBUS_SSPP0, 32, 1 },
- { DBGBUS_SSPP0, 32, 2 },
- { DBGBUS_SSPP0, 32, 3 },
- { DBGBUS_SSPP0, 32, 4 },
- { DBGBUS_SSPP0, 32, 5 },
- { DBGBUS_SSPP0, 32, 6 },
- { DBGBUS_SSPP0, 32, 7 },
-
- { DBGBUS_SSPP0, 33, 0 },
- { DBGBUS_SSPP0, 33, 1 },
- { DBGBUS_SSPP0, 33, 2 },
- { DBGBUS_SSPP0, 33, 3 },
- { DBGBUS_SSPP0, 33, 4 },
- { DBGBUS_SSPP0, 33, 5 },
- { DBGBUS_SSPP0, 33, 6 },
- { DBGBUS_SSPP0, 33, 7 },
-
- { DBGBUS_SSPP0, 34, 0 },
- { DBGBUS_SSPP0, 34, 1 },
- { DBGBUS_SSPP0, 34, 2 },
- { DBGBUS_SSPP0, 34, 3 },
- { DBGBUS_SSPP0, 34, 4 },
- { DBGBUS_SSPP0, 34, 5 },
- { DBGBUS_SSPP0, 34, 6 },
- { DBGBUS_SSPP0, 34, 7 },
-
- { DBGBUS_SSPP0, 35, 0 },
- { DBGBUS_SSPP0, 35, 1 },
- { DBGBUS_SSPP0, 35, 2 },
- { DBGBUS_SSPP0, 35, 3 },
-
- /* dma 0 */
- { DBGBUS_SSPP0, 40, 0 },
- { DBGBUS_SSPP0, 40, 1 },
- { DBGBUS_SSPP0, 40, 2 },
- { DBGBUS_SSPP0, 40, 3 },
- { DBGBUS_SSPP0, 40, 4 },
- { DBGBUS_SSPP0, 40, 5 },
- { DBGBUS_SSPP0, 40, 6 },
- { DBGBUS_SSPP0, 40, 7 },
-
- { DBGBUS_SSPP0, 41, 0 },
- { DBGBUS_SSPP0, 41, 1 },
- { DBGBUS_SSPP0, 41, 2 },
- { DBGBUS_SSPP0, 41, 3 },
- { DBGBUS_SSPP0, 41, 4 },
- { DBGBUS_SSPP0, 41, 5 },
- { DBGBUS_SSPP0, 41, 6 },
- { DBGBUS_SSPP0, 41, 7 },
-
- { DBGBUS_SSPP0, 42, 0 },
- { DBGBUS_SSPP0, 42, 1 },
- { DBGBUS_SSPP0, 42, 2 },
- { DBGBUS_SSPP0, 42, 3 },
- { DBGBUS_SSPP0, 42, 4 },
- { DBGBUS_SSPP0, 42, 5 },
- { DBGBUS_SSPP0, 42, 6 },
- { DBGBUS_SSPP0, 42, 7 },
-
- { DBGBUS_SSPP0, 44, 0 },
- { DBGBUS_SSPP0, 44, 1 },
- { DBGBUS_SSPP0, 44, 2 },
- { DBGBUS_SSPP0, 44, 3 },
- { DBGBUS_SSPP0, 44, 4 },
- { DBGBUS_SSPP0, 44, 5 },
- { DBGBUS_SSPP0, 44, 6 },
- { DBGBUS_SSPP0, 44, 7 },
-
- { DBGBUS_SSPP0, 45, 0 },
- { DBGBUS_SSPP0, 45, 1 },
- { DBGBUS_SSPP0, 45, 2 },
- { DBGBUS_SSPP0, 45, 3 },
- { DBGBUS_SSPP0, 45, 4 },
- { DBGBUS_SSPP0, 45, 5 },
- { DBGBUS_SSPP0, 45, 6 },
- { DBGBUS_SSPP0, 45, 7 },
-
- /* fetch sspp1 */
- /* vig 1 */
- { DBGBUS_SSPP1, 0, 0 },
- { DBGBUS_SSPP1, 0, 1 },
- { DBGBUS_SSPP1, 0, 2 },
- { DBGBUS_SSPP1, 0, 3 },
- { DBGBUS_SSPP1, 0, 4 },
- { DBGBUS_SSPP1, 0, 5 },
- { DBGBUS_SSPP1, 0, 6 },
- { DBGBUS_SSPP1, 0, 7 },
-
- { DBGBUS_SSPP1, 1, 0 },
- { DBGBUS_SSPP1, 1, 1 },
- { DBGBUS_SSPP1, 1, 2 },
- { DBGBUS_SSPP1, 1, 3 },
- { DBGBUS_SSPP1, 1, 4 },
- { DBGBUS_SSPP1, 1, 5 },
- { DBGBUS_SSPP1, 1, 6 },
- { DBGBUS_SSPP1, 1, 7 },
-
- { DBGBUS_SSPP1, 2, 0 },
- { DBGBUS_SSPP1, 2, 1 },
- { DBGBUS_SSPP1, 2, 2 },
- { DBGBUS_SSPP1, 2, 3 },
- { DBGBUS_SSPP1, 2, 4 },
- { DBGBUS_SSPP1, 2, 5 },
- { DBGBUS_SSPP1, 2, 6 },
- { DBGBUS_SSPP1, 2, 7 },
-
- { DBGBUS_SSPP1, 4, 0 },
- { DBGBUS_SSPP1, 4, 1 },
- { DBGBUS_SSPP1, 4, 2 },
- { DBGBUS_SSPP1, 4, 3 },
- { DBGBUS_SSPP1, 4, 4 },
- { DBGBUS_SSPP1, 4, 5 },
- { DBGBUS_SSPP1, 4, 6 },
- { DBGBUS_SSPP1, 4, 7 },
-
- { DBGBUS_SSPP1, 5, 0 },
- { DBGBUS_SSPP1, 5, 1 },
- { DBGBUS_SSPP1, 5, 2 },
- { DBGBUS_SSPP1, 5, 3 },
- { DBGBUS_SSPP1, 5, 4 },
- { DBGBUS_SSPP1, 5, 5 },
- { DBGBUS_SSPP1, 5, 6 },
- { DBGBUS_SSPP1, 5, 7 },
-
- /* vig 3 */
- { DBGBUS_SSPP1, 20, 0 },
- { DBGBUS_SSPP1, 20, 1 },
- { DBGBUS_SSPP1, 20, 2 },
- { DBGBUS_SSPP1, 20, 3 },
- { DBGBUS_SSPP1, 20, 4 },
- { DBGBUS_SSPP1, 20, 5 },
- { DBGBUS_SSPP1, 20, 6 },
- { DBGBUS_SSPP1, 20, 7 },
-
- { DBGBUS_SSPP1, 21, 0 },
- { DBGBUS_SSPP1, 21, 1 },
- { DBGBUS_SSPP1, 21, 2 },
- { DBGBUS_SSPP1, 21, 3 },
- { DBGBUS_SSPP1, 21, 4 },
- { DBGBUS_SSPP1, 21, 5 },
- { DBGBUS_SSPP1, 21, 6 },
- { DBGBUS_SSPP1, 21, 7 },
-
- { DBGBUS_SSPP1, 22, 0 },
- { DBGBUS_SSPP1, 22, 1 },
- { DBGBUS_SSPP1, 22, 2 },
- { DBGBUS_SSPP1, 22, 3 },
- { DBGBUS_SSPP1, 22, 4 },
- { DBGBUS_SSPP1, 22, 5 },
- { DBGBUS_SSPP1, 22, 6 },
- { DBGBUS_SSPP1, 22, 7 },
-
- { DBGBUS_SSPP1, 24, 0 },
- { DBGBUS_SSPP1, 24, 1 },
- { DBGBUS_SSPP1, 24, 2 },
- { DBGBUS_SSPP1, 24, 3 },
- { DBGBUS_SSPP1, 24, 4 },
- { DBGBUS_SSPP1, 24, 5 },
- { DBGBUS_SSPP1, 24, 6 },
- { DBGBUS_SSPP1, 24, 7 },
-
- { DBGBUS_SSPP1, 25, 0 },
- { DBGBUS_SSPP1, 25, 1 },
- { DBGBUS_SSPP1, 25, 2 },
- { DBGBUS_SSPP1, 25, 3 },
- { DBGBUS_SSPP1, 25, 4 },
- { DBGBUS_SSPP1, 25, 5 },
- { DBGBUS_SSPP1, 25, 6 },
- { DBGBUS_SSPP1, 25, 7 },
-
- /* dma 3 */
- { DBGBUS_SSPP1, 30, 0 },
- { DBGBUS_SSPP1, 30, 1 },
- { DBGBUS_SSPP1, 30, 2 },
- { DBGBUS_SSPP1, 30, 3 },
- { DBGBUS_SSPP1, 30, 4 },
- { DBGBUS_SSPP1, 30, 5 },
- { DBGBUS_SSPP1, 30, 6 },
- { DBGBUS_SSPP1, 30, 7 },
-
- { DBGBUS_SSPP1, 31, 0 },
- { DBGBUS_SSPP1, 31, 1 },
- { DBGBUS_SSPP1, 31, 2 },
- { DBGBUS_SSPP1, 31, 3 },
- { DBGBUS_SSPP1, 31, 4 },
- { DBGBUS_SSPP1, 31, 5 },
- { DBGBUS_SSPP1, 31, 6 },
- { DBGBUS_SSPP1, 31, 7 },
-
- { DBGBUS_SSPP1, 32, 0 },
- { DBGBUS_SSPP1, 32, 1 },
- { DBGBUS_SSPP1, 32, 2 },
- { DBGBUS_SSPP1, 32, 3 },
- { DBGBUS_SSPP1, 32, 4 },
- { DBGBUS_SSPP1, 32, 5 },
- { DBGBUS_SSPP1, 32, 6 },
- { DBGBUS_SSPP1, 32, 7 },
-
- { DBGBUS_SSPP1, 33, 0 },
- { DBGBUS_SSPP1, 33, 1 },
- { DBGBUS_SSPP1, 33, 2 },
- { DBGBUS_SSPP1, 33, 3 },
- { DBGBUS_SSPP1, 33, 4 },
- { DBGBUS_SSPP1, 33, 5 },
- { DBGBUS_SSPP1, 33, 6 },
- { DBGBUS_SSPP1, 33, 7 },
-
- { DBGBUS_SSPP1, 34, 0 },
- { DBGBUS_SSPP1, 34, 1 },
- { DBGBUS_SSPP1, 34, 2 },
- { DBGBUS_SSPP1, 34, 3 },
- { DBGBUS_SSPP1, 34, 4 },
- { DBGBUS_SSPP1, 34, 5 },
- { DBGBUS_SSPP1, 34, 6 },
- { DBGBUS_SSPP1, 34, 7 },
-
- { DBGBUS_SSPP1, 35, 0 },
- { DBGBUS_SSPP1, 35, 1 },
- { DBGBUS_SSPP1, 35, 2 },
-
- /* dma 1 */
- { DBGBUS_SSPP1, 40, 0 },
- { DBGBUS_SSPP1, 40, 1 },
- { DBGBUS_SSPP1, 40, 2 },
- { DBGBUS_SSPP1, 40, 3 },
- { DBGBUS_SSPP1, 40, 4 },
- { DBGBUS_SSPP1, 40, 5 },
- { DBGBUS_SSPP1, 40, 6 },
- { DBGBUS_SSPP1, 40, 7 },
-
- { DBGBUS_SSPP1, 41, 0 },
- { DBGBUS_SSPP1, 41, 1 },
- { DBGBUS_SSPP1, 41, 2 },
- { DBGBUS_SSPP1, 41, 3 },
- { DBGBUS_SSPP1, 41, 4 },
- { DBGBUS_SSPP1, 41, 5 },
- { DBGBUS_SSPP1, 41, 6 },
- { DBGBUS_SSPP1, 41, 7 },
-
- { DBGBUS_SSPP1, 42, 0 },
- { DBGBUS_SSPP1, 42, 1 },
- { DBGBUS_SSPP1, 42, 2 },
- { DBGBUS_SSPP1, 42, 3 },
- { DBGBUS_SSPP1, 42, 4 },
- { DBGBUS_SSPP1, 42, 5 },
- { DBGBUS_SSPP1, 42, 6 },
- { DBGBUS_SSPP1, 42, 7 },
-
- { DBGBUS_SSPP1, 44, 0 },
- { DBGBUS_SSPP1, 44, 1 },
- { DBGBUS_SSPP1, 44, 2 },
- { DBGBUS_SSPP1, 44, 3 },
- { DBGBUS_SSPP1, 44, 4 },
- { DBGBUS_SSPP1, 44, 5 },
- { DBGBUS_SSPP1, 44, 6 },
- { DBGBUS_SSPP1, 44, 7 },
-
- { DBGBUS_SSPP1, 45, 0 },
- { DBGBUS_SSPP1, 45, 1 },
- { DBGBUS_SSPP1, 45, 2 },
- { DBGBUS_SSPP1, 45, 3 },
- { DBGBUS_SSPP1, 45, 4 },
- { DBGBUS_SSPP1, 45, 5 },
- { DBGBUS_SSPP1, 45, 6 },
- { DBGBUS_SSPP1, 45, 7 },
-
- /* cursor 1 */
- { DBGBUS_SSPP1, 80, 0 },
- { DBGBUS_SSPP1, 80, 1 },
- { DBGBUS_SSPP1, 80, 2 },
- { DBGBUS_SSPP1, 80, 3 },
- { DBGBUS_SSPP1, 80, 4 },
- { DBGBUS_SSPP1, 80, 5 },
- { DBGBUS_SSPP1, 80, 6 },
- { DBGBUS_SSPP1, 80, 7 },
-
- { DBGBUS_SSPP1, 81, 0 },
- { DBGBUS_SSPP1, 81, 1 },
- { DBGBUS_SSPP1, 81, 2 },
- { DBGBUS_SSPP1, 81, 3 },
- { DBGBUS_SSPP1, 81, 4 },
- { DBGBUS_SSPP1, 81, 5 },
- { DBGBUS_SSPP1, 81, 6 },
- { DBGBUS_SSPP1, 81, 7 },
-
- { DBGBUS_SSPP1, 82, 0 },
- { DBGBUS_SSPP1, 82, 1 },
- { DBGBUS_SSPP1, 82, 2 },
- { DBGBUS_SSPP1, 82, 3 },
- { DBGBUS_SSPP1, 82, 4 },
- { DBGBUS_SSPP1, 82, 5 },
- { DBGBUS_SSPP1, 82, 6 },
- { DBGBUS_SSPP1, 82, 7 },
-
- { DBGBUS_SSPP1, 83, 0 },
- { DBGBUS_SSPP1, 83, 1 },
- { DBGBUS_SSPP1, 83, 2 },
- { DBGBUS_SSPP1, 83, 3 },
- { DBGBUS_SSPP1, 83, 4 },
- { DBGBUS_SSPP1, 83, 5 },
- { DBGBUS_SSPP1, 83, 6 },
- { DBGBUS_SSPP1, 83, 7 },
-
- { DBGBUS_SSPP1, 84, 0 },
- { DBGBUS_SSPP1, 84, 1 },
- { DBGBUS_SSPP1, 84, 2 },
- { DBGBUS_SSPP1, 84, 3 },
- { DBGBUS_SSPP1, 84, 4 },
- { DBGBUS_SSPP1, 84, 5 },
- { DBGBUS_SSPP1, 84, 6 },
- { DBGBUS_SSPP1, 84, 7 },
-
- /* dspp */
- { DBGBUS_DSPP, 13, 0 },
- { DBGBUS_DSPP, 19, 0 },
- { DBGBUS_DSPP, 14, 0 },
- { DBGBUS_DSPP, 14, 1 },
- { DBGBUS_DSPP, 14, 3 },
- { DBGBUS_DSPP, 20, 0 },
- { DBGBUS_DSPP, 20, 1 },
- { DBGBUS_DSPP, 20, 3 },
-
- /* ppb_0 */
- { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
-
- /* ppb_1 */
- { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
-
- /* lm_lut */
- { DBGBUS_DSPP, 109, 0 },
- { DBGBUS_DSPP, 105, 0 },
- { DBGBUS_DSPP, 103, 0 },
-
- /* tear-check */
- { DBGBUS_PERIPH, 63, 0 },
- { DBGBUS_PERIPH, 64, 0 },
- { DBGBUS_PERIPH, 65, 0 },
- { DBGBUS_PERIPH, 73, 0 },
- { DBGBUS_PERIPH, 74, 0 },
-
- /* crossbar */
- { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
-
- /* rotator */
- { DBGBUS_DSPP, 9, 0},
-
- /* blend */
- /* LM0 */
- { DBGBUS_DSPP, 63, 0},
- { DBGBUS_DSPP, 63, 1},
- { DBGBUS_DSPP, 63, 2},
- { DBGBUS_DSPP, 63, 3},
- { DBGBUS_DSPP, 63, 4},
- { DBGBUS_DSPP, 63, 5},
- { DBGBUS_DSPP, 63, 6},
- { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 64, 0},
- { DBGBUS_DSPP, 64, 1},
- { DBGBUS_DSPP, 64, 2},
- { DBGBUS_DSPP, 64, 3},
- { DBGBUS_DSPP, 64, 4},
- { DBGBUS_DSPP, 64, 5},
- { DBGBUS_DSPP, 64, 6},
- { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 65, 0},
- { DBGBUS_DSPP, 65, 1},
- { DBGBUS_DSPP, 65, 2},
- { DBGBUS_DSPP, 65, 3},
- { DBGBUS_DSPP, 65, 4},
- { DBGBUS_DSPP, 65, 5},
- { DBGBUS_DSPP, 65, 6},
- { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 66, 0},
- { DBGBUS_DSPP, 66, 1},
- { DBGBUS_DSPP, 66, 2},
- { DBGBUS_DSPP, 66, 3},
- { DBGBUS_DSPP, 66, 4},
- { DBGBUS_DSPP, 66, 5},
- { DBGBUS_DSPP, 66, 6},
- { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 67, 0},
- { DBGBUS_DSPP, 67, 1},
- { DBGBUS_DSPP, 67, 2},
- { DBGBUS_DSPP, 67, 3},
- { DBGBUS_DSPP, 67, 4},
- { DBGBUS_DSPP, 67, 5},
- { DBGBUS_DSPP, 67, 6},
- { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 68, 0},
- { DBGBUS_DSPP, 68, 1},
- { DBGBUS_DSPP, 68, 2},
- { DBGBUS_DSPP, 68, 3},
- { DBGBUS_DSPP, 68, 4},
- { DBGBUS_DSPP, 68, 5},
- { DBGBUS_DSPP, 68, 6},
- { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 69, 0},
- { DBGBUS_DSPP, 69, 1},
- { DBGBUS_DSPP, 69, 2},
- { DBGBUS_DSPP, 69, 3},
- { DBGBUS_DSPP, 69, 4},
- { DBGBUS_DSPP, 69, 5},
- { DBGBUS_DSPP, 69, 6},
- { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
-
- /* LM1 */
- { DBGBUS_DSPP, 70, 0},
- { DBGBUS_DSPP, 70, 1},
- { DBGBUS_DSPP, 70, 2},
- { DBGBUS_DSPP, 70, 3},
- { DBGBUS_DSPP, 70, 4},
- { DBGBUS_DSPP, 70, 5},
- { DBGBUS_DSPP, 70, 6},
- { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 71, 0},
- { DBGBUS_DSPP, 71, 1},
- { DBGBUS_DSPP, 71, 2},
- { DBGBUS_DSPP, 71, 3},
- { DBGBUS_DSPP, 71, 4},
- { DBGBUS_DSPP, 71, 5},
- { DBGBUS_DSPP, 71, 6},
- { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 72, 0},
- { DBGBUS_DSPP, 72, 1},
- { DBGBUS_DSPP, 72, 2},
- { DBGBUS_DSPP, 72, 3},
- { DBGBUS_DSPP, 72, 4},
- { DBGBUS_DSPP, 72, 5},
- { DBGBUS_DSPP, 72, 6},
- { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 73, 0},
- { DBGBUS_DSPP, 73, 1},
- { DBGBUS_DSPP, 73, 2},
- { DBGBUS_DSPP, 73, 3},
- { DBGBUS_DSPP, 73, 4},
- { DBGBUS_DSPP, 73, 5},
- { DBGBUS_DSPP, 73, 6},
- { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 74, 0},
- { DBGBUS_DSPP, 74, 1},
- { DBGBUS_DSPP, 74, 2},
- { DBGBUS_DSPP, 74, 3},
- { DBGBUS_DSPP, 74, 4},
- { DBGBUS_DSPP, 74, 5},
- { DBGBUS_DSPP, 74, 6},
- { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 75, 0},
- { DBGBUS_DSPP, 75, 1},
- { DBGBUS_DSPP, 75, 2},
- { DBGBUS_DSPP, 75, 3},
- { DBGBUS_DSPP, 75, 4},
- { DBGBUS_DSPP, 75, 5},
- { DBGBUS_DSPP, 75, 6},
- { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 76, 0},
- { DBGBUS_DSPP, 76, 1},
- { DBGBUS_DSPP, 76, 2},
- { DBGBUS_DSPP, 76, 3},
- { DBGBUS_DSPP, 76, 4},
- { DBGBUS_DSPP, 76, 5},
- { DBGBUS_DSPP, 76, 6},
- { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
-
- /* LM2 */
- { DBGBUS_DSPP, 77, 0},
- { DBGBUS_DSPP, 77, 1},
- { DBGBUS_DSPP, 77, 2},
- { DBGBUS_DSPP, 77, 3},
- { DBGBUS_DSPP, 77, 4},
- { DBGBUS_DSPP, 77, 5},
- { DBGBUS_DSPP, 77, 6},
- { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 78, 0},
- { DBGBUS_DSPP, 78, 1},
- { DBGBUS_DSPP, 78, 2},
- { DBGBUS_DSPP, 78, 3},
- { DBGBUS_DSPP, 78, 4},
- { DBGBUS_DSPP, 78, 5},
- { DBGBUS_DSPP, 78, 6},
- { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 79, 0},
- { DBGBUS_DSPP, 79, 1},
- { DBGBUS_DSPP, 79, 2},
- { DBGBUS_DSPP, 79, 3},
- { DBGBUS_DSPP, 79, 4},
- { DBGBUS_DSPP, 79, 5},
- { DBGBUS_DSPP, 79, 6},
- { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 80, 0},
- { DBGBUS_DSPP, 80, 1},
- { DBGBUS_DSPP, 80, 2},
- { DBGBUS_DSPP, 80, 3},
- { DBGBUS_DSPP, 80, 4},
- { DBGBUS_DSPP, 80, 5},
- { DBGBUS_DSPP, 80, 6},
- { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 81, 0},
- { DBGBUS_DSPP, 81, 1},
- { DBGBUS_DSPP, 81, 2},
- { DBGBUS_DSPP, 81, 3},
- { DBGBUS_DSPP, 81, 4},
- { DBGBUS_DSPP, 81, 5},
- { DBGBUS_DSPP, 81, 6},
- { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 82, 0},
- { DBGBUS_DSPP, 82, 1},
- { DBGBUS_DSPP, 82, 2},
- { DBGBUS_DSPP, 82, 3},
- { DBGBUS_DSPP, 82, 4},
- { DBGBUS_DSPP, 82, 5},
- { DBGBUS_DSPP, 82, 6},
- { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 83, 0},
- { DBGBUS_DSPP, 83, 1},
- { DBGBUS_DSPP, 83, 2},
- { DBGBUS_DSPP, 83, 3},
- { DBGBUS_DSPP, 83, 4},
- { DBGBUS_DSPP, 83, 5},
- { DBGBUS_DSPP, 83, 6},
- { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
-
- /* csc */
- { DBGBUS_SSPP0, 7, 0},
- { DBGBUS_SSPP0, 7, 1},
- { DBGBUS_SSPP0, 27, 0},
- { DBGBUS_SSPP0, 27, 1},
- { DBGBUS_SSPP1, 7, 0},
- { DBGBUS_SSPP1, 7, 1},
- { DBGBUS_SSPP1, 27, 0},
- { DBGBUS_SSPP1, 27, 1},
-
- /* pcc */
- { DBGBUS_SSPP0, 3, 3},
- { DBGBUS_SSPP0, 23, 3},
- { DBGBUS_SSPP0, 33, 3},
- { DBGBUS_SSPP0, 43, 3},
- { DBGBUS_SSPP1, 3, 3},
- { DBGBUS_SSPP1, 23, 3},
- { DBGBUS_SSPP1, 33, 3},
- { DBGBUS_SSPP1, 43, 3},
-
- /* spa */
- { DBGBUS_SSPP0, 8, 0},
- { DBGBUS_SSPP0, 28, 0},
- { DBGBUS_SSPP1, 8, 0},
- { DBGBUS_SSPP1, 28, 0},
- { DBGBUS_DSPP, 13, 0},
- { DBGBUS_DSPP, 19, 0},
-
- /* igc */
- { DBGBUS_SSPP0, 9, 0},
- { DBGBUS_SSPP0, 9, 1},
- { DBGBUS_SSPP0, 9, 3},
- { DBGBUS_SSPP0, 29, 0},
- { DBGBUS_SSPP0, 29, 1},
- { DBGBUS_SSPP0, 29, 3},
- { DBGBUS_SSPP0, 17, 0},
- { DBGBUS_SSPP0, 17, 1},
- { DBGBUS_SSPP0, 17, 3},
- { DBGBUS_SSPP0, 37, 0},
- { DBGBUS_SSPP0, 37, 1},
- { DBGBUS_SSPP0, 37, 3},
- { DBGBUS_SSPP0, 46, 0},
- { DBGBUS_SSPP0, 46, 1},
- { DBGBUS_SSPP0, 46, 3},
-
- { DBGBUS_SSPP1, 9, 0},
- { DBGBUS_SSPP1, 9, 1},
- { DBGBUS_SSPP1, 9, 3},
- { DBGBUS_SSPP1, 29, 0},
- { DBGBUS_SSPP1, 29, 1},
- { DBGBUS_SSPP1, 29, 3},
- { DBGBUS_SSPP1, 17, 0},
- { DBGBUS_SSPP1, 17, 1},
- { DBGBUS_SSPP1, 17, 3},
- { DBGBUS_SSPP1, 37, 0},
- { DBGBUS_SSPP1, 37, 1},
- { DBGBUS_SSPP1, 37, 3},
- { DBGBUS_SSPP1, 46, 0},
- { DBGBUS_SSPP1, 46, 1},
- { DBGBUS_SSPP1, 46, 3},
-
- { DBGBUS_DSPP, 14, 0},
- { DBGBUS_DSPP, 14, 1},
- { DBGBUS_DSPP, 14, 3},
- { DBGBUS_DSPP, 20, 0},
- { DBGBUS_DSPP, 20, 1},
- { DBGBUS_DSPP, 20, 3},
-
- { DBGBUS_PERIPH, 60, 0},
-};
-
-static struct dpu_debug_bus_entry dbg_bus_dpu_sdm845[] = {
-
- /* Unpack 0 sspp 0*/
- { DBGBUS_SSPP0, 50, 2 },
- { DBGBUS_SSPP0, 60, 2 },
- { DBGBUS_SSPP0, 70, 2 },
-
- /* Upack 0 sspp 1*/
- { DBGBUS_SSPP1, 50, 2 },
- { DBGBUS_SSPP1, 60, 2 },
- { DBGBUS_SSPP1, 70, 2 },
-
- /* scheduler */
- { DBGBUS_DSPP, 130, 0 },
- { DBGBUS_DSPP, 130, 1 },
- { DBGBUS_DSPP, 130, 2 },
- { DBGBUS_DSPP, 130, 3 },
- { DBGBUS_DSPP, 130, 4 },
- { DBGBUS_DSPP, 130, 5 },
-
- /* qseed */
- { DBGBUS_SSPP0, 6, 0},
- { DBGBUS_SSPP0, 6, 1},
- { DBGBUS_SSPP0, 26, 0},
- { DBGBUS_SSPP0, 26, 1},
- { DBGBUS_SSPP1, 6, 0},
- { DBGBUS_SSPP1, 6, 1},
- { DBGBUS_SSPP1, 26, 0},
- { DBGBUS_SSPP1, 26, 1},
-
- /* scale */
- { DBGBUS_SSPP0, 16, 0},
- { DBGBUS_SSPP0, 16, 1},
- { DBGBUS_SSPP0, 36, 0},
- { DBGBUS_SSPP0, 36, 1},
- { DBGBUS_SSPP1, 16, 0},
- { DBGBUS_SSPP1, 16, 1},
- { DBGBUS_SSPP1, 36, 0},
- { DBGBUS_SSPP1, 36, 1},
-
- /* fetch sspp0 */
-
- /* vig 0 */
- { DBGBUS_SSPP0, 0, 0 },
- { DBGBUS_SSPP0, 0, 1 },
- { DBGBUS_SSPP0, 0, 2 },
- { DBGBUS_SSPP0, 0, 3 },
- { DBGBUS_SSPP0, 0, 4 },
- { DBGBUS_SSPP0, 0, 5 },
- { DBGBUS_SSPP0, 0, 6 },
- { DBGBUS_SSPP0, 0, 7 },
-
- { DBGBUS_SSPP0, 1, 0 },
- { DBGBUS_SSPP0, 1, 1 },
- { DBGBUS_SSPP0, 1, 2 },
- { DBGBUS_SSPP0, 1, 3 },
- { DBGBUS_SSPP0, 1, 4 },
- { DBGBUS_SSPP0, 1, 5 },
- { DBGBUS_SSPP0, 1, 6 },
- { DBGBUS_SSPP0, 1, 7 },
-
- { DBGBUS_SSPP0, 2, 0 },
- { DBGBUS_SSPP0, 2, 1 },
- { DBGBUS_SSPP0, 2, 2 },
- { DBGBUS_SSPP0, 2, 3 },
- { DBGBUS_SSPP0, 2, 4 },
- { DBGBUS_SSPP0, 2, 5 },
- { DBGBUS_SSPP0, 2, 6 },
- { DBGBUS_SSPP0, 2, 7 },
-
- { DBGBUS_SSPP0, 4, 0 },
- { DBGBUS_SSPP0, 4, 1 },
- { DBGBUS_SSPP0, 4, 2 },
- { DBGBUS_SSPP0, 4, 3 },
- { DBGBUS_SSPP0, 4, 4 },
- { DBGBUS_SSPP0, 4, 5 },
- { DBGBUS_SSPP0, 4, 6 },
- { DBGBUS_SSPP0, 4, 7 },
-
- { DBGBUS_SSPP0, 5, 0 },
- { DBGBUS_SSPP0, 5, 1 },
- { DBGBUS_SSPP0, 5, 2 },
- { DBGBUS_SSPP0, 5, 3 },
- { DBGBUS_SSPP0, 5, 4 },
- { DBGBUS_SSPP0, 5, 5 },
- { DBGBUS_SSPP0, 5, 6 },
- { DBGBUS_SSPP0, 5, 7 },
-
- /* vig 2 */
- { DBGBUS_SSPP0, 20, 0 },
- { DBGBUS_SSPP0, 20, 1 },
- { DBGBUS_SSPP0, 20, 2 },
- { DBGBUS_SSPP0, 20, 3 },
- { DBGBUS_SSPP0, 20, 4 },
- { DBGBUS_SSPP0, 20, 5 },
- { DBGBUS_SSPP0, 20, 6 },
- { DBGBUS_SSPP0, 20, 7 },
-
- { DBGBUS_SSPP0, 21, 0 },
- { DBGBUS_SSPP0, 21, 1 },
- { DBGBUS_SSPP0, 21, 2 },
- { DBGBUS_SSPP0, 21, 3 },
- { DBGBUS_SSPP0, 21, 4 },
- { DBGBUS_SSPP0, 21, 5 },
- { DBGBUS_SSPP0, 21, 6 },
- { DBGBUS_SSPP0, 21, 7 },
-
- { DBGBUS_SSPP0, 22, 0 },
- { DBGBUS_SSPP0, 22, 1 },
- { DBGBUS_SSPP0, 22, 2 },
- { DBGBUS_SSPP0, 22, 3 },
- { DBGBUS_SSPP0, 22, 4 },
- { DBGBUS_SSPP0, 22, 5 },
- { DBGBUS_SSPP0, 22, 6 },
- { DBGBUS_SSPP0, 22, 7 },
-
- { DBGBUS_SSPP0, 24, 0 },
- { DBGBUS_SSPP0, 24, 1 },
- { DBGBUS_SSPP0, 24, 2 },
- { DBGBUS_SSPP0, 24, 3 },
- { DBGBUS_SSPP0, 24, 4 },
- { DBGBUS_SSPP0, 24, 5 },
- { DBGBUS_SSPP0, 24, 6 },
- { DBGBUS_SSPP0, 24, 7 },
-
- { DBGBUS_SSPP0, 25, 0 },
- { DBGBUS_SSPP0, 25, 1 },
- { DBGBUS_SSPP0, 25, 2 },
- { DBGBUS_SSPP0, 25, 3 },
- { DBGBUS_SSPP0, 25, 4 },
- { DBGBUS_SSPP0, 25, 5 },
- { DBGBUS_SSPP0, 25, 6 },
- { DBGBUS_SSPP0, 25, 7 },
-
- /* dma 2 */
- { DBGBUS_SSPP0, 30, 0 },
- { DBGBUS_SSPP0, 30, 1 },
- { DBGBUS_SSPP0, 30, 2 },
- { DBGBUS_SSPP0, 30, 3 },
- { DBGBUS_SSPP0, 30, 4 },
- { DBGBUS_SSPP0, 30, 5 },
- { DBGBUS_SSPP0, 30, 6 },
- { DBGBUS_SSPP0, 30, 7 },
-
- { DBGBUS_SSPP0, 31, 0 },
- { DBGBUS_SSPP0, 31, 1 },
- { DBGBUS_SSPP0, 31, 2 },
- { DBGBUS_SSPP0, 31, 3 },
- { DBGBUS_SSPP0, 31, 4 },
- { DBGBUS_SSPP0, 31, 5 },
- { DBGBUS_SSPP0, 31, 6 },
- { DBGBUS_SSPP0, 31, 7 },
-
- { DBGBUS_SSPP0, 32, 0 },
- { DBGBUS_SSPP0, 32, 1 },
- { DBGBUS_SSPP0, 32, 2 },
- { DBGBUS_SSPP0, 32, 3 },
- { DBGBUS_SSPP0, 32, 4 },
- { DBGBUS_SSPP0, 32, 5 },
- { DBGBUS_SSPP0, 32, 6 },
- { DBGBUS_SSPP0, 32, 7 },
-
- { DBGBUS_SSPP0, 33, 0 },
- { DBGBUS_SSPP0, 33, 1 },
- { DBGBUS_SSPP0, 33, 2 },
- { DBGBUS_SSPP0, 33, 3 },
- { DBGBUS_SSPP0, 33, 4 },
- { DBGBUS_SSPP0, 33, 5 },
- { DBGBUS_SSPP0, 33, 6 },
- { DBGBUS_SSPP0, 33, 7 },
-
- { DBGBUS_SSPP0, 34, 0 },
- { DBGBUS_SSPP0, 34, 1 },
- { DBGBUS_SSPP0, 34, 2 },
- { DBGBUS_SSPP0, 34, 3 },
- { DBGBUS_SSPP0, 34, 4 },
- { DBGBUS_SSPP0, 34, 5 },
- { DBGBUS_SSPP0, 34, 6 },
- { DBGBUS_SSPP0, 34, 7 },
-
- { DBGBUS_SSPP0, 35, 0 },
- { DBGBUS_SSPP0, 35, 1 },
- { DBGBUS_SSPP0, 35, 2 },
- { DBGBUS_SSPP0, 35, 3 },
-
- /* dma 0 */
- { DBGBUS_SSPP0, 40, 0 },
- { DBGBUS_SSPP0, 40, 1 },
- { DBGBUS_SSPP0, 40, 2 },
- { DBGBUS_SSPP0, 40, 3 },
- { DBGBUS_SSPP0, 40, 4 },
- { DBGBUS_SSPP0, 40, 5 },
- { DBGBUS_SSPP0, 40, 6 },
- { DBGBUS_SSPP0, 40, 7 },
-
- { DBGBUS_SSPP0, 41, 0 },
- { DBGBUS_SSPP0, 41, 1 },
- { DBGBUS_SSPP0, 41, 2 },
- { DBGBUS_SSPP0, 41, 3 },
- { DBGBUS_SSPP0, 41, 4 },
- { DBGBUS_SSPP0, 41, 5 },
- { DBGBUS_SSPP0, 41, 6 },
- { DBGBUS_SSPP0, 41, 7 },
-
- { DBGBUS_SSPP0, 42, 0 },
- { DBGBUS_SSPP0, 42, 1 },
- { DBGBUS_SSPP0, 42, 2 },
- { DBGBUS_SSPP0, 42, 3 },
- { DBGBUS_SSPP0, 42, 4 },
- { DBGBUS_SSPP0, 42, 5 },
- { DBGBUS_SSPP0, 42, 6 },
- { DBGBUS_SSPP0, 42, 7 },
-
- { DBGBUS_SSPP0, 44, 0 },
- { DBGBUS_SSPP0, 44, 1 },
- { DBGBUS_SSPP0, 44, 2 },
- { DBGBUS_SSPP0, 44, 3 },
- { DBGBUS_SSPP0, 44, 4 },
- { DBGBUS_SSPP0, 44, 5 },
- { DBGBUS_SSPP0, 44, 6 },
- { DBGBUS_SSPP0, 44, 7 },
-
- { DBGBUS_SSPP0, 45, 0 },
- { DBGBUS_SSPP0, 45, 1 },
- { DBGBUS_SSPP0, 45, 2 },
- { DBGBUS_SSPP0, 45, 3 },
- { DBGBUS_SSPP0, 45, 4 },
- { DBGBUS_SSPP0, 45, 5 },
- { DBGBUS_SSPP0, 45, 6 },
- { DBGBUS_SSPP0, 45, 7 },
-
- /* fetch sspp1 */
- /* vig 1 */
- { DBGBUS_SSPP1, 0, 0 },
- { DBGBUS_SSPP1, 0, 1 },
- { DBGBUS_SSPP1, 0, 2 },
- { DBGBUS_SSPP1, 0, 3 },
- { DBGBUS_SSPP1, 0, 4 },
- { DBGBUS_SSPP1, 0, 5 },
- { DBGBUS_SSPP1, 0, 6 },
- { DBGBUS_SSPP1, 0, 7 },
-
- { DBGBUS_SSPP1, 1, 0 },
- { DBGBUS_SSPP1, 1, 1 },
- { DBGBUS_SSPP1, 1, 2 },
- { DBGBUS_SSPP1, 1, 3 },
- { DBGBUS_SSPP1, 1, 4 },
- { DBGBUS_SSPP1, 1, 5 },
- { DBGBUS_SSPP1, 1, 6 },
- { DBGBUS_SSPP1, 1, 7 },
-
- { DBGBUS_SSPP1, 2, 0 },
- { DBGBUS_SSPP1, 2, 1 },
- { DBGBUS_SSPP1, 2, 2 },
- { DBGBUS_SSPP1, 2, 3 },
- { DBGBUS_SSPP1, 2, 4 },
- { DBGBUS_SSPP1, 2, 5 },
- { DBGBUS_SSPP1, 2, 6 },
- { DBGBUS_SSPP1, 2, 7 },
-
- { DBGBUS_SSPP1, 4, 0 },
- { DBGBUS_SSPP1, 4, 1 },
- { DBGBUS_SSPP1, 4, 2 },
- { DBGBUS_SSPP1, 4, 3 },
- { DBGBUS_SSPP1, 4, 4 },
- { DBGBUS_SSPP1, 4, 5 },
- { DBGBUS_SSPP1, 4, 6 },
- { DBGBUS_SSPP1, 4, 7 },
-
- { DBGBUS_SSPP1, 5, 0 },
- { DBGBUS_SSPP1, 5, 1 },
- { DBGBUS_SSPP1, 5, 2 },
- { DBGBUS_SSPP1, 5, 3 },
- { DBGBUS_SSPP1, 5, 4 },
- { DBGBUS_SSPP1, 5, 5 },
- { DBGBUS_SSPP1, 5, 6 },
- { DBGBUS_SSPP1, 5, 7 },
-
- /* vig 3 */
- { DBGBUS_SSPP1, 20, 0 },
- { DBGBUS_SSPP1, 20, 1 },
- { DBGBUS_SSPP1, 20, 2 },
- { DBGBUS_SSPP1, 20, 3 },
- { DBGBUS_SSPP1, 20, 4 },
- { DBGBUS_SSPP1, 20, 5 },
- { DBGBUS_SSPP1, 20, 6 },
- { DBGBUS_SSPP1, 20, 7 },
-
- { DBGBUS_SSPP1, 21, 0 },
- { DBGBUS_SSPP1, 21, 1 },
- { DBGBUS_SSPP1, 21, 2 },
- { DBGBUS_SSPP1, 21, 3 },
- { DBGBUS_SSPP1, 21, 4 },
- { DBGBUS_SSPP1, 21, 5 },
- { DBGBUS_SSPP1, 21, 6 },
- { DBGBUS_SSPP1, 21, 7 },
-
- { DBGBUS_SSPP1, 22, 0 },
- { DBGBUS_SSPP1, 22, 1 },
- { DBGBUS_SSPP1, 22, 2 },
- { DBGBUS_SSPP1, 22, 3 },
- { DBGBUS_SSPP1, 22, 4 },
- { DBGBUS_SSPP1, 22, 5 },
- { DBGBUS_SSPP1, 22, 6 },
- { DBGBUS_SSPP1, 22, 7 },
-
- { DBGBUS_SSPP1, 24, 0 },
- { DBGBUS_SSPP1, 24, 1 },
- { DBGBUS_SSPP1, 24, 2 },
- { DBGBUS_SSPP1, 24, 3 },
- { DBGBUS_SSPP1, 24, 4 },
- { DBGBUS_SSPP1, 24, 5 },
- { DBGBUS_SSPP1, 24, 6 },
- { DBGBUS_SSPP1, 24, 7 },
-
- { DBGBUS_SSPP1, 25, 0 },
- { DBGBUS_SSPP1, 25, 1 },
- { DBGBUS_SSPP1, 25, 2 },
- { DBGBUS_SSPP1, 25, 3 },
- { DBGBUS_SSPP1, 25, 4 },
- { DBGBUS_SSPP1, 25, 5 },
- { DBGBUS_SSPP1, 25, 6 },
- { DBGBUS_SSPP1, 25, 7 },
-
- /* dma 3 */
- { DBGBUS_SSPP1, 30, 0 },
- { DBGBUS_SSPP1, 30, 1 },
- { DBGBUS_SSPP1, 30, 2 },
- { DBGBUS_SSPP1, 30, 3 },
- { DBGBUS_SSPP1, 30, 4 },
- { DBGBUS_SSPP1, 30, 5 },
- { DBGBUS_SSPP1, 30, 6 },
- { DBGBUS_SSPP1, 30, 7 },
-
- { DBGBUS_SSPP1, 31, 0 },
- { DBGBUS_SSPP1, 31, 1 },
- { DBGBUS_SSPP1, 31, 2 },
- { DBGBUS_SSPP1, 31, 3 },
- { DBGBUS_SSPP1, 31, 4 },
- { DBGBUS_SSPP1, 31, 5 },
- { DBGBUS_SSPP1, 31, 6 },
- { DBGBUS_SSPP1, 31, 7 },
-
- { DBGBUS_SSPP1, 32, 0 },
- { DBGBUS_SSPP1, 32, 1 },
- { DBGBUS_SSPP1, 32, 2 },
- { DBGBUS_SSPP1, 32, 3 },
- { DBGBUS_SSPP1, 32, 4 },
- { DBGBUS_SSPP1, 32, 5 },
- { DBGBUS_SSPP1, 32, 6 },
- { DBGBUS_SSPP1, 32, 7 },
-
- { DBGBUS_SSPP1, 33, 0 },
- { DBGBUS_SSPP1, 33, 1 },
- { DBGBUS_SSPP1, 33, 2 },
- { DBGBUS_SSPP1, 33, 3 },
- { DBGBUS_SSPP1, 33, 4 },
- { DBGBUS_SSPP1, 33, 5 },
- { DBGBUS_SSPP1, 33, 6 },
- { DBGBUS_SSPP1, 33, 7 },
-
- { DBGBUS_SSPP1, 34, 0 },
- { DBGBUS_SSPP1, 34, 1 },
- { DBGBUS_SSPP1, 34, 2 },
- { DBGBUS_SSPP1, 34, 3 },
- { DBGBUS_SSPP1, 34, 4 },
- { DBGBUS_SSPP1, 34, 5 },
- { DBGBUS_SSPP1, 34, 6 },
- { DBGBUS_SSPP1, 34, 7 },
-
- { DBGBUS_SSPP1, 35, 0 },
- { DBGBUS_SSPP1, 35, 1 },
- { DBGBUS_SSPP1, 35, 2 },
-
- /* dma 1 */
- { DBGBUS_SSPP1, 40, 0 },
- { DBGBUS_SSPP1, 40, 1 },
- { DBGBUS_SSPP1, 40, 2 },
- { DBGBUS_SSPP1, 40, 3 },
- { DBGBUS_SSPP1, 40, 4 },
- { DBGBUS_SSPP1, 40, 5 },
- { DBGBUS_SSPP1, 40, 6 },
- { DBGBUS_SSPP1, 40, 7 },
-
- { DBGBUS_SSPP1, 41, 0 },
- { DBGBUS_SSPP1, 41, 1 },
- { DBGBUS_SSPP1, 41, 2 },
- { DBGBUS_SSPP1, 41, 3 },
- { DBGBUS_SSPP1, 41, 4 },
- { DBGBUS_SSPP1, 41, 5 },
- { DBGBUS_SSPP1, 41, 6 },
- { DBGBUS_SSPP1, 41, 7 },
-
- { DBGBUS_SSPP1, 42, 0 },
- { DBGBUS_SSPP1, 42, 1 },
- { DBGBUS_SSPP1, 42, 2 },
- { DBGBUS_SSPP1, 42, 3 },
- { DBGBUS_SSPP1, 42, 4 },
- { DBGBUS_SSPP1, 42, 5 },
- { DBGBUS_SSPP1, 42, 6 },
- { DBGBUS_SSPP1, 42, 7 },
-
- { DBGBUS_SSPP1, 44, 0 },
- { DBGBUS_SSPP1, 44, 1 },
- { DBGBUS_SSPP1, 44, 2 },
- { DBGBUS_SSPP1, 44, 3 },
- { DBGBUS_SSPP1, 44, 4 },
- { DBGBUS_SSPP1, 44, 5 },
- { DBGBUS_SSPP1, 44, 6 },
- { DBGBUS_SSPP1, 44, 7 },
-
- { DBGBUS_SSPP1, 45, 0 },
- { DBGBUS_SSPP1, 45, 1 },
- { DBGBUS_SSPP1, 45, 2 },
- { DBGBUS_SSPP1, 45, 3 },
- { DBGBUS_SSPP1, 45, 4 },
- { DBGBUS_SSPP1, 45, 5 },
- { DBGBUS_SSPP1, 45, 6 },
- { DBGBUS_SSPP1, 45, 7 },
-
- /* dspp */
- { DBGBUS_DSPP, 13, 0 },
- { DBGBUS_DSPP, 19, 0 },
- { DBGBUS_DSPP, 14, 0 },
- { DBGBUS_DSPP, 14, 1 },
- { DBGBUS_DSPP, 14, 3 },
- { DBGBUS_DSPP, 20, 0 },
- { DBGBUS_DSPP, 20, 1 },
- { DBGBUS_DSPP, 20, 3 },
-
- /* ppb_0 */
- { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
-
- /* ppb_1 */
- { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
-
- /* lm_lut */
- { DBGBUS_DSPP, 109, 0 },
- { DBGBUS_DSPP, 105, 0 },
- { DBGBUS_DSPP, 103, 0 },
-
- /* crossbar */
- { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
-
- /* rotator */
- { DBGBUS_DSPP, 9, 0},
-
- /* blend */
- /* LM0 */
- { DBGBUS_DSPP, 63, 1},
- { DBGBUS_DSPP, 63, 2},
- { DBGBUS_DSPP, 63, 3},
- { DBGBUS_DSPP, 63, 4},
- { DBGBUS_DSPP, 63, 5},
- { DBGBUS_DSPP, 63, 6},
- { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 64, 1},
- { DBGBUS_DSPP, 64, 2},
- { DBGBUS_DSPP, 64, 3},
- { DBGBUS_DSPP, 64, 4},
- { DBGBUS_DSPP, 64, 5},
- { DBGBUS_DSPP, 64, 6},
- { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 65, 1},
- { DBGBUS_DSPP, 65, 2},
- { DBGBUS_DSPP, 65, 3},
- { DBGBUS_DSPP, 65, 4},
- { DBGBUS_DSPP, 65, 5},
- { DBGBUS_DSPP, 65, 6},
- { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 66, 1},
- { DBGBUS_DSPP, 66, 2},
- { DBGBUS_DSPP, 66, 3},
- { DBGBUS_DSPP, 66, 4},
- { DBGBUS_DSPP, 66, 5},
- { DBGBUS_DSPP, 66, 6},
- { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 67, 1},
- { DBGBUS_DSPP, 67, 2},
- { DBGBUS_DSPP, 67, 3},
- { DBGBUS_DSPP, 67, 4},
- { DBGBUS_DSPP, 67, 5},
- { DBGBUS_DSPP, 67, 6},
- { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 68, 1},
- { DBGBUS_DSPP, 68, 2},
- { DBGBUS_DSPP, 68, 3},
- { DBGBUS_DSPP, 68, 4},
- { DBGBUS_DSPP, 68, 5},
- { DBGBUS_DSPP, 68, 6},
- { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 69, 1},
- { DBGBUS_DSPP, 69, 2},
- { DBGBUS_DSPP, 69, 3},
- { DBGBUS_DSPP, 69, 4},
- { DBGBUS_DSPP, 69, 5},
- { DBGBUS_DSPP, 69, 6},
- { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 84, 1},
- { DBGBUS_DSPP, 84, 2},
- { DBGBUS_DSPP, 84, 3},
- { DBGBUS_DSPP, 84, 4},
- { DBGBUS_DSPP, 84, 5},
- { DBGBUS_DSPP, 84, 6},
- { DBGBUS_DSPP, 84, 7, _dpu_debug_bus_lm_dump },
-
-
- { DBGBUS_DSPP, 85, 1},
- { DBGBUS_DSPP, 85, 2},
- { DBGBUS_DSPP, 85, 3},
- { DBGBUS_DSPP, 85, 4},
- { DBGBUS_DSPP, 85, 5},
- { DBGBUS_DSPP, 85, 6},
- { DBGBUS_DSPP, 85, 7, _dpu_debug_bus_lm_dump },
-
-
- { DBGBUS_DSPP, 86, 1},
- { DBGBUS_DSPP, 86, 2},
- { DBGBUS_DSPP, 86, 3},
- { DBGBUS_DSPP, 86, 4},
- { DBGBUS_DSPP, 86, 5},
- { DBGBUS_DSPP, 86, 6},
- { DBGBUS_DSPP, 86, 7, _dpu_debug_bus_lm_dump },
-
-
- { DBGBUS_DSPP, 87, 1},
- { DBGBUS_DSPP, 87, 2},
- { DBGBUS_DSPP, 87, 3},
- { DBGBUS_DSPP, 87, 4},
- { DBGBUS_DSPP, 87, 5},
- { DBGBUS_DSPP, 87, 6},
- { DBGBUS_DSPP, 87, 7, _dpu_debug_bus_lm_dump },
-
- /* LM1 */
- { DBGBUS_DSPP, 70, 1},
- { DBGBUS_DSPP, 70, 2},
- { DBGBUS_DSPP, 70, 3},
- { DBGBUS_DSPP, 70, 4},
- { DBGBUS_DSPP, 70, 5},
- { DBGBUS_DSPP, 70, 6},
- { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 71, 1},
- { DBGBUS_DSPP, 71, 2},
- { DBGBUS_DSPP, 71, 3},
- { DBGBUS_DSPP, 71, 4},
- { DBGBUS_DSPP, 71, 5},
- { DBGBUS_DSPP, 71, 6},
- { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 72, 1},
- { DBGBUS_DSPP, 72, 2},
- { DBGBUS_DSPP, 72, 3},
- { DBGBUS_DSPP, 72, 4},
- { DBGBUS_DSPP, 72, 5},
- { DBGBUS_DSPP, 72, 6},
- { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 73, 1},
- { DBGBUS_DSPP, 73, 2},
- { DBGBUS_DSPP, 73, 3},
- { DBGBUS_DSPP, 73, 4},
- { DBGBUS_DSPP, 73, 5},
- { DBGBUS_DSPP, 73, 6},
- { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 74, 1},
- { DBGBUS_DSPP, 74, 2},
- { DBGBUS_DSPP, 74, 3},
- { DBGBUS_DSPP, 74, 4},
- { DBGBUS_DSPP, 74, 5},
- { DBGBUS_DSPP, 74, 6},
- { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 75, 1},
- { DBGBUS_DSPP, 75, 2},
- { DBGBUS_DSPP, 75, 3},
- { DBGBUS_DSPP, 75, 4},
- { DBGBUS_DSPP, 75, 5},
- { DBGBUS_DSPP, 75, 6},
- { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 76, 1},
- { DBGBUS_DSPP, 76, 2},
- { DBGBUS_DSPP, 76, 3},
- { DBGBUS_DSPP, 76, 4},
- { DBGBUS_DSPP, 76, 5},
- { DBGBUS_DSPP, 76, 6},
- { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 88, 1},
- { DBGBUS_DSPP, 88, 2},
- { DBGBUS_DSPP, 88, 3},
- { DBGBUS_DSPP, 88, 4},
- { DBGBUS_DSPP, 88, 5},
- { DBGBUS_DSPP, 88, 6},
- { DBGBUS_DSPP, 88, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 89, 1},
- { DBGBUS_DSPP, 89, 2},
- { DBGBUS_DSPP, 89, 3},
- { DBGBUS_DSPP, 89, 4},
- { DBGBUS_DSPP, 89, 5},
- { DBGBUS_DSPP, 89, 6},
- { DBGBUS_DSPP, 89, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 90, 1},
- { DBGBUS_DSPP, 90, 2},
- { DBGBUS_DSPP, 90, 3},
- { DBGBUS_DSPP, 90, 4},
- { DBGBUS_DSPP, 90, 5},
- { DBGBUS_DSPP, 90, 6},
- { DBGBUS_DSPP, 90, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 91, 1},
- { DBGBUS_DSPP, 91, 2},
- { DBGBUS_DSPP, 91, 3},
- { DBGBUS_DSPP, 91, 4},
- { DBGBUS_DSPP, 91, 5},
- { DBGBUS_DSPP, 91, 6},
- { DBGBUS_DSPP, 91, 7, _dpu_debug_bus_lm_dump },
-
- /* LM2 */
- { DBGBUS_DSPP, 77, 0},
- { DBGBUS_DSPP, 77, 1},
- { DBGBUS_DSPP, 77, 2},
- { DBGBUS_DSPP, 77, 3},
- { DBGBUS_DSPP, 77, 4},
- { DBGBUS_DSPP, 77, 5},
- { DBGBUS_DSPP, 77, 6},
- { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 78, 0},
- { DBGBUS_DSPP, 78, 1},
- { DBGBUS_DSPP, 78, 2},
- { DBGBUS_DSPP, 78, 3},
- { DBGBUS_DSPP, 78, 4},
- { DBGBUS_DSPP, 78, 5},
- { DBGBUS_DSPP, 78, 6},
- { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 79, 0},
- { DBGBUS_DSPP, 79, 1},
- { DBGBUS_DSPP, 79, 2},
- { DBGBUS_DSPP, 79, 3},
- { DBGBUS_DSPP, 79, 4},
- { DBGBUS_DSPP, 79, 5},
- { DBGBUS_DSPP, 79, 6},
- { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 80, 0},
- { DBGBUS_DSPP, 80, 1},
- { DBGBUS_DSPP, 80, 2},
- { DBGBUS_DSPP, 80, 3},
- { DBGBUS_DSPP, 80, 4},
- { DBGBUS_DSPP, 80, 5},
- { DBGBUS_DSPP, 80, 6},
- { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 81, 0},
- { DBGBUS_DSPP, 81, 1},
- { DBGBUS_DSPP, 81, 2},
- { DBGBUS_DSPP, 81, 3},
- { DBGBUS_DSPP, 81, 4},
- { DBGBUS_DSPP, 81, 5},
- { DBGBUS_DSPP, 81, 6},
- { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 82, 0},
- { DBGBUS_DSPP, 82, 1},
- { DBGBUS_DSPP, 82, 2},
- { DBGBUS_DSPP, 82, 3},
- { DBGBUS_DSPP, 82, 4},
- { DBGBUS_DSPP, 82, 5},
- { DBGBUS_DSPP, 82, 6},
- { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 83, 0},
- { DBGBUS_DSPP, 83, 1},
- { DBGBUS_DSPP, 83, 2},
- { DBGBUS_DSPP, 83, 3},
- { DBGBUS_DSPP, 83, 4},
- { DBGBUS_DSPP, 83, 5},
- { DBGBUS_DSPP, 83, 6},
- { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 92, 1},
- { DBGBUS_DSPP, 92, 2},
- { DBGBUS_DSPP, 92, 3},
- { DBGBUS_DSPP, 92, 4},
- { DBGBUS_DSPP, 92, 5},
- { DBGBUS_DSPP, 92, 6},
- { DBGBUS_DSPP, 92, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 93, 1},
- { DBGBUS_DSPP, 93, 2},
- { DBGBUS_DSPP, 93, 3},
- { DBGBUS_DSPP, 93, 4},
- { DBGBUS_DSPP, 93, 5},
- { DBGBUS_DSPP, 93, 6},
- { DBGBUS_DSPP, 93, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 94, 1},
- { DBGBUS_DSPP, 94, 2},
- { DBGBUS_DSPP, 94, 3},
- { DBGBUS_DSPP, 94, 4},
- { DBGBUS_DSPP, 94, 5},
- { DBGBUS_DSPP, 94, 6},
- { DBGBUS_DSPP, 94, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 95, 1},
- { DBGBUS_DSPP, 95, 2},
- { DBGBUS_DSPP, 95, 3},
- { DBGBUS_DSPP, 95, 4},
- { DBGBUS_DSPP, 95, 5},
- { DBGBUS_DSPP, 95, 6},
- { DBGBUS_DSPP, 95, 7, _dpu_debug_bus_lm_dump },
-
- /* LM5 */
- { DBGBUS_DSPP, 110, 1},
- { DBGBUS_DSPP, 110, 2},
- { DBGBUS_DSPP, 110, 3},
- { DBGBUS_DSPP, 110, 4},
- { DBGBUS_DSPP, 110, 5},
- { DBGBUS_DSPP, 110, 6},
- { DBGBUS_DSPP, 110, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 111, 1},
- { DBGBUS_DSPP, 111, 2},
- { DBGBUS_DSPP, 111, 3},
- { DBGBUS_DSPP, 111, 4},
- { DBGBUS_DSPP, 111, 5},
- { DBGBUS_DSPP, 111, 6},
- { DBGBUS_DSPP, 111, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 112, 1},
- { DBGBUS_DSPP, 112, 2},
- { DBGBUS_DSPP, 112, 3},
- { DBGBUS_DSPP, 112, 4},
- { DBGBUS_DSPP, 112, 5},
- { DBGBUS_DSPP, 112, 6},
- { DBGBUS_DSPP, 112, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 113, 1},
- { DBGBUS_DSPP, 113, 2},
- { DBGBUS_DSPP, 113, 3},
- { DBGBUS_DSPP, 113, 4},
- { DBGBUS_DSPP, 113, 5},
- { DBGBUS_DSPP, 113, 6},
- { DBGBUS_DSPP, 113, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 114, 1},
- { DBGBUS_DSPP, 114, 2},
- { DBGBUS_DSPP, 114, 3},
- { DBGBUS_DSPP, 114, 4},
- { DBGBUS_DSPP, 114, 5},
- { DBGBUS_DSPP, 114, 6},
- { DBGBUS_DSPP, 114, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 115, 1},
- { DBGBUS_DSPP, 115, 2},
- { DBGBUS_DSPP, 115, 3},
- { DBGBUS_DSPP, 115, 4},
- { DBGBUS_DSPP, 115, 5},
- { DBGBUS_DSPP, 115, 6},
- { DBGBUS_DSPP, 115, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 116, 1},
- { DBGBUS_DSPP, 116, 2},
- { DBGBUS_DSPP, 116, 3},
- { DBGBUS_DSPP, 116, 4},
- { DBGBUS_DSPP, 116, 5},
- { DBGBUS_DSPP, 116, 6},
- { DBGBUS_DSPP, 116, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 117, 1},
- { DBGBUS_DSPP, 117, 2},
- { DBGBUS_DSPP, 117, 3},
- { DBGBUS_DSPP, 117, 4},
- { DBGBUS_DSPP, 117, 5},
- { DBGBUS_DSPP, 117, 6},
- { DBGBUS_DSPP, 117, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 118, 1},
- { DBGBUS_DSPP, 118, 2},
- { DBGBUS_DSPP, 118, 3},
- { DBGBUS_DSPP, 118, 4},
- { DBGBUS_DSPP, 118, 5},
- { DBGBUS_DSPP, 118, 6},
- { DBGBUS_DSPP, 118, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 119, 1},
- { DBGBUS_DSPP, 119, 2},
- { DBGBUS_DSPP, 119, 3},
- { DBGBUS_DSPP, 119, 4},
- { DBGBUS_DSPP, 119, 5},
- { DBGBUS_DSPP, 119, 6},
- { DBGBUS_DSPP, 119, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 120, 1},
- { DBGBUS_DSPP, 120, 2},
- { DBGBUS_DSPP, 120, 3},
- { DBGBUS_DSPP, 120, 4},
- { DBGBUS_DSPP, 120, 5},
- { DBGBUS_DSPP, 120, 6},
- { DBGBUS_DSPP, 120, 7, _dpu_debug_bus_lm_dump },
-
- /* csc */
- { DBGBUS_SSPP0, 7, 0},
- { DBGBUS_SSPP0, 7, 1},
- { DBGBUS_SSPP0, 27, 0},
- { DBGBUS_SSPP0, 27, 1},
- { DBGBUS_SSPP1, 7, 0},
- { DBGBUS_SSPP1, 7, 1},
- { DBGBUS_SSPP1, 27, 0},
- { DBGBUS_SSPP1, 27, 1},
-
- /* pcc */
- { DBGBUS_SSPP0, 3, 3},
- { DBGBUS_SSPP0, 23, 3},
- { DBGBUS_SSPP0, 33, 3},
- { DBGBUS_SSPP0, 43, 3},
- { DBGBUS_SSPP1, 3, 3},
- { DBGBUS_SSPP1, 23, 3},
- { DBGBUS_SSPP1, 33, 3},
- { DBGBUS_SSPP1, 43, 3},
-
- /* spa */
- { DBGBUS_SSPP0, 8, 0},
- { DBGBUS_SSPP0, 28, 0},
- { DBGBUS_SSPP1, 8, 0},
- { DBGBUS_SSPP1, 28, 0},
- { DBGBUS_DSPP, 13, 0},
- { DBGBUS_DSPP, 19, 0},
-
- /* igc */
- { DBGBUS_SSPP0, 17, 0},
- { DBGBUS_SSPP0, 17, 1},
- { DBGBUS_SSPP0, 17, 3},
- { DBGBUS_SSPP0, 37, 0},
- { DBGBUS_SSPP0, 37, 1},
- { DBGBUS_SSPP0, 37, 3},
- { DBGBUS_SSPP0, 46, 0},
- { DBGBUS_SSPP0, 46, 1},
- { DBGBUS_SSPP0, 46, 3},
-
- { DBGBUS_SSPP1, 17, 0},
- { DBGBUS_SSPP1, 17, 1},
- { DBGBUS_SSPP1, 17, 3},
- { DBGBUS_SSPP1, 37, 0},
- { DBGBUS_SSPP1, 37, 1},
- { DBGBUS_SSPP1, 37, 3},
- { DBGBUS_SSPP1, 46, 0},
- { DBGBUS_SSPP1, 46, 1},
- { DBGBUS_SSPP1, 46, 3},
-
- { DBGBUS_DSPP, 14, 0},
- { DBGBUS_DSPP, 14, 1},
- { DBGBUS_DSPP, 14, 3},
- { DBGBUS_DSPP, 20, 0},
- { DBGBUS_DSPP, 20, 1},
- { DBGBUS_DSPP, 20, 3},
-
- /* intf0-3 */
- { DBGBUS_PERIPH, 0, 0},
- { DBGBUS_PERIPH, 1, 0},
- { DBGBUS_PERIPH, 2, 0},
- { DBGBUS_PERIPH, 3, 0},
-
- /* te counter wrapper */
- { DBGBUS_PERIPH, 60, 0},
-
- /* dsc0 */
- { DBGBUS_PERIPH, 47, 0},
- { DBGBUS_PERIPH, 47, 1},
- { DBGBUS_PERIPH, 47, 2},
- { DBGBUS_PERIPH, 47, 3},
- { DBGBUS_PERIPH, 47, 4},
- { DBGBUS_PERIPH, 47, 5},
- { DBGBUS_PERIPH, 47, 6},
- { DBGBUS_PERIPH, 47, 7},
-
- /* dsc1 */
- { DBGBUS_PERIPH, 48, 0},
- { DBGBUS_PERIPH, 48, 1},
- { DBGBUS_PERIPH, 48, 2},
- { DBGBUS_PERIPH, 48, 3},
- { DBGBUS_PERIPH, 48, 4},
- { DBGBUS_PERIPH, 48, 5},
- { DBGBUS_PERIPH, 48, 6},
- { DBGBUS_PERIPH, 48, 7},
-
- /* dsc2 */
- { DBGBUS_PERIPH, 51, 0},
- { DBGBUS_PERIPH, 51, 1},
- { DBGBUS_PERIPH, 51, 2},
- { DBGBUS_PERIPH, 51, 3},
- { DBGBUS_PERIPH, 51, 4},
- { DBGBUS_PERIPH, 51, 5},
- { DBGBUS_PERIPH, 51, 6},
- { DBGBUS_PERIPH, 51, 7},
-
- /* dsc3 */
- { DBGBUS_PERIPH, 52, 0},
- { DBGBUS_PERIPH, 52, 1},
- { DBGBUS_PERIPH, 52, 2},
- { DBGBUS_PERIPH, 52, 3},
- { DBGBUS_PERIPH, 52, 4},
- { DBGBUS_PERIPH, 52, 5},
- { DBGBUS_PERIPH, 52, 6},
- { DBGBUS_PERIPH, 52, 7},
-
- /* tear-check */
- { DBGBUS_PERIPH, 63, 0 },
- { DBGBUS_PERIPH, 64, 0 },
- { DBGBUS_PERIPH, 65, 0 },
- { DBGBUS_PERIPH, 73, 0 },
- { DBGBUS_PERIPH, 74, 0 },
-
- /* cdwn */
- { DBGBUS_PERIPH, 80, 0},
- { DBGBUS_PERIPH, 80, 1},
- { DBGBUS_PERIPH, 80, 2},
-
- { DBGBUS_PERIPH, 81, 0},
- { DBGBUS_PERIPH, 81, 1},
- { DBGBUS_PERIPH, 81, 2},
-
- { DBGBUS_PERIPH, 82, 0},
- { DBGBUS_PERIPH, 82, 1},
- { DBGBUS_PERIPH, 82, 2},
- { DBGBUS_PERIPH, 82, 3},
- { DBGBUS_PERIPH, 82, 4},
- { DBGBUS_PERIPH, 82, 5},
- { DBGBUS_PERIPH, 82, 6},
- { DBGBUS_PERIPH, 82, 7},
-
- /* hdmi */
- { DBGBUS_PERIPH, 68, 0},
- { DBGBUS_PERIPH, 68, 1},
- { DBGBUS_PERIPH, 68, 2},
- { DBGBUS_PERIPH, 68, 3},
- { DBGBUS_PERIPH, 68, 4},
- { DBGBUS_PERIPH, 68, 5},
-
- /* edp */
- { DBGBUS_PERIPH, 69, 0},
- { DBGBUS_PERIPH, 69, 1},
- { DBGBUS_PERIPH, 69, 2},
- { DBGBUS_PERIPH, 69, 3},
- { DBGBUS_PERIPH, 69, 4},
- { DBGBUS_PERIPH, 69, 5},
-
- /* dsi0 */
- { DBGBUS_PERIPH, 70, 0},
- { DBGBUS_PERIPH, 70, 1},
- { DBGBUS_PERIPH, 70, 2},
- { DBGBUS_PERIPH, 70, 3},
- { DBGBUS_PERIPH, 70, 4},
- { DBGBUS_PERIPH, 70, 5},
-
- /* dsi1 */
- { DBGBUS_PERIPH, 71, 0},
- { DBGBUS_PERIPH, 71, 1},
- { DBGBUS_PERIPH, 71, 2},
- { DBGBUS_PERIPH, 71, 3},
- { DBGBUS_PERIPH, 71, 4},
- { DBGBUS_PERIPH, 71, 5},
-};
-
-static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
- {0x214, 0x21c, 16, 2, 0x0, 0xd}, /* arb clients */
- {0x214, 0x21c, 16, 2, 0x80, 0xc0}, /* arb clients */
- {0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */
- {0x214, 0x21c, 0, 16, 0x0, 0xf}, /* xin blocks - axi side */
- {0x214, 0x21c, 0, 16, 0x80, 0xa4}, /* xin blocks - axi side */
- {0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */
- {0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
-};
-
-/**
- * _dpu_dbg_enable_power - use callback to turn power on for hw register access
- * @enable: whether to turn power on or off
- */
-static inline void _dpu_dbg_enable_power(int enable)
-{
- if (enable)
- pm_runtime_get_sync(dpu_dbg_base.dev);
- else
- pm_runtime_put_sync(dpu_dbg_base.dev);
-}
-
-static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
-{
- bool in_log, in_mem;
- u32 **dump_mem = NULL;
- u32 *dump_addr = NULL;
- u32 status = 0;
- struct dpu_debug_bus_entry *head;
- phys_addr_t phys = 0;
- int list_size;
- int i;
- u32 offset;
- void __iomem *mem_base = NULL;
- struct dpu_dbg_reg_base *reg_base;
-
- if (!bus || !bus->cmn.entries_size)
- return;
-
- list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
- reg_base_head)
- if (strlen(reg_base->name) &&
- !strcmp(reg_base->name, bus->cmn.name))
- mem_base = reg_base->base + bus->top_blk_off;
-
- if (!mem_base) {
- pr_err("unable to find mem_base for %s\n", bus->cmn.name);
- return;
- }
-
- dump_mem = &bus->cmn.dumped_content;
-
- /* will keep in memory 4 entries of 4 bytes each */
- list_size = (bus->cmn.entries_size * 4 * 4);
-
- in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
- in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
-
- if (!in_log && !in_mem)
- return;
-
- dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
- bus->cmn.name);
-
- if (in_mem) {
- if (!(*dump_mem))
- *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
- list_size, &phys, GFP_KERNEL);
-
- if (*dump_mem) {
- dump_addr = *dump_mem;
- dev_info(dpu_dbg_base.dev,
- "%s: start_addr:0x%pK len:0x%x\n",
- __func__, dump_addr, list_size);
- } else {
- in_mem = false;
- pr_err("dump_mem: allocation fails\n");
- }
- }
-
- _dpu_dbg_enable_power(true);
- for (i = 0; i < bus->cmn.entries_size; i++) {
- head = bus->entries + i;
- writel_relaxed(TEST_MASK(head->block_id, head->test_id),
- mem_base + head->wr_addr);
- wmb(); /* make sure test bits were written */
-
- if (bus->cmn.flags & DBGBUS_FLAGS_DSPP) {
- offset = DBGBUS_DSPP_STATUS;
- /* keep DSPP test point enabled */
- if (head->wr_addr != DBGBUS_DSPP)
- writel_relaxed(0xF, mem_base + DBGBUS_DSPP);
- } else {
- offset = head->wr_addr + 0x4;
- }
-
- status = readl_relaxed(mem_base + offset);
-
- if (in_log)
- dev_info(dpu_dbg_base.dev,
- "waddr=0x%x blk=%d tst=%d val=0x%x\n",
- head->wr_addr, head->block_id,
- head->test_id, status);
-
- if (dump_addr && in_mem) {
- dump_addr[i*4] = head->wr_addr;
- dump_addr[i*4 + 1] = head->block_id;
- dump_addr[i*4 + 2] = head->test_id;
- dump_addr[i*4 + 3] = status;
- }
-
- if (head->analyzer)
- head->analyzer(mem_base, head, status);
-
- /* Disable debug bus once we are done */
- writel_relaxed(0, mem_base + head->wr_addr);
- if (bus->cmn.flags & DBGBUS_FLAGS_DSPP &&
- head->wr_addr != DBGBUS_DSPP)
- writel_relaxed(0x0, mem_base + DBGBUS_DSPP);
- }
- _dpu_dbg_enable_power(false);
-
- dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
- bus->cmn.name);
-}
-
-static void _dpu_dbg_dump_vbif_debug_bus_entry(
- struct vbif_debug_bus_entry *head, void __iomem *mem_base,
- u32 *dump_addr, bool in_log)
-{
- int i, j;
- u32 val;
-
- if (!dump_addr && !in_log)
- return;
-
- for (i = 0; i < head->block_cnt; i++) {
- writel_relaxed(1 << (i + head->bit_offset),
- mem_base + head->block_bus_addr);
- /* make sure that current bus blcok enable */
- wmb();
- for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) {
- writel_relaxed(j, mem_base + head->block_bus_addr + 4);
- /* make sure that test point is enabled */
- wmb();
- val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT);
- if (dump_addr) {
- *dump_addr++ = head->block_bus_addr;
- *dump_addr++ = i;
- *dump_addr++ = j;
- *dump_addr++ = val;
- }
- if (in_log)
- dev_info(dpu_dbg_base.dev,
- "testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
- head->block_bus_addr, i, j, val);
- }
- }
-}
-
-static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
-{
- bool in_log, in_mem;
- u32 **dump_mem = NULL;
- u32 *dump_addr = NULL;
- u32 value, d0, d1;
- unsigned long reg, reg1, reg2;
- struct vbif_debug_bus_entry *head;
- phys_addr_t phys = 0;
- int i, list_size = 0;
- void __iomem *mem_base = NULL;
- struct vbif_debug_bus_entry *dbg_bus;
- u32 bus_size;
- struct dpu_dbg_reg_base *reg_base;
-
- if (!bus || !bus->cmn.entries_size)
- return;
-
- list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
- reg_base_head)
- if (strlen(reg_base->name) &&
- !strcmp(reg_base->name, bus->cmn.name))
- mem_base = reg_base->base;
-
- if (!mem_base) {
- pr_err("unable to find mem_base for %s\n", bus->cmn.name);
- return;
- }
-
- dbg_bus = bus->entries;
- bus_size = bus->cmn.entries_size;
- list_size = bus->cmn.entries_size;
- dump_mem = &bus->cmn.dumped_content;
-
- dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
- bus->cmn.name);
-
- if (!dump_mem || !dbg_bus || !bus_size || !list_size)
- return;
-
- /* allocate memory for each test point */
- for (i = 0; i < bus_size; i++) {
- head = dbg_bus + i;
- list_size += (head->block_cnt * head->test_pnt_cnt);
- }
-
- /* 4 bytes * 4 entries for each test point*/
- list_size *= 16;
-
- in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
- in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
-
- if (!in_log && !in_mem)
- return;
-
- if (in_mem) {
- if (!(*dump_mem))
- *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
- list_size, &phys, GFP_KERNEL);
-
- if (*dump_mem) {
- dump_addr = *dump_mem;
- dev_info(dpu_dbg_base.dev,
- "%s: start_addr:0x%pK len:0x%x\n",
- __func__, dump_addr, list_size);
- } else {
- in_mem = false;
- pr_err("dump_mem: allocation fails\n");
- }
- }
-
- _dpu_dbg_enable_power(true);
-
- value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
- writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
-
- /* make sure that vbif core is on */
- wmb();
-
- /**
- * Extract VBIF error info based on XIN halt and error status.
- * If the XIN client is not in HALT state, or an error is detected,
- * then retrieve the VBIF error info for it.
- */
- reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
- reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
- reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
- dev_err(dpu_dbg_base.dev,
- "XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
- reg, reg1, reg2);
- reg >>= 16;
- reg &= ~(reg1 | reg2);
- for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
- if (!test_bit(0, &reg)) {
- writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
- /* make sure reg write goes through */
- wmb();
-
- d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
- d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
-
- dev_err(dpu_dbg_base.dev,
- "Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
- i, d0, d1);
- }
- reg >>= 1;
- }
-
- for (i = 0; i < bus_size; i++) {
- head = dbg_bus + i;
-
- writel_relaxed(0, mem_base + head->disable_bus_addr);
- writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
- /* make sure that other bus is off */
- wmb();
-
- _dpu_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr,
- in_log);
- if (dump_addr)
- dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
- }
-
- _dpu_dbg_enable_power(false);
-
- dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
- bus->cmn.name);
-}
-
-/**
- * _dpu_dump_array - dump array of register bases
- * @name: string indicating origin of dump
- * @dump_dbgbus_dpu: whether to dump the dpu debug bus
- * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus
- */
-static void _dpu_dump_array(const char *name, bool dump_dbgbus_dpu,
- bool dump_dbgbus_vbif_rt)
-{
- if (dump_dbgbus_dpu)
- _dpu_dbg_dump_dpu_dbg_bus(&dpu_dbg_base.dbgbus_dpu);
-
- if (dump_dbgbus_vbif_rt)
- _dpu_dbg_dump_vbif_dbg_bus(&dpu_dbg_base.dbgbus_vbif_rt);
-}
-
-/**
- * _dpu_dump_work - deferred dump work function
- * @work: work structure
- */
-static void _dpu_dump_work(struct work_struct *work)
-{
- _dpu_dump_array("dpudump_workitem",
- dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work,
- dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work);
-}
-
-void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
- bool dump_dbgbus_vbif_rt)
-{
- if (queue_work && work_pending(&dpu_dbg_base.dump_work))
- return;
-
- if (!queue_work) {
- _dpu_dump_array(name, dump_dbgbus_dpu, dump_dbgbus_vbif_rt);
- return;
- }
-
- /* schedule work to dump later */
- dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work = dump_dbgbus_dpu;
- dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
- dump_dbgbus_vbif_rt;
- schedule_work(&dpu_dbg_base.dump_work);
-}
-
-/*
- * dpu_dbg_debugfs_open - debugfs open handler for debug dump
- * @inode: debugfs inode
- * @file: file handle
- */
-static int dpu_dbg_debugfs_open(struct inode *inode, struct file *file)
-{
- /* non-seekable */
- file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
- file->private_data = inode->i_private;
- return 0;
-}
-
-/**
- * dpu_dbg_dump_write - debugfs write handler for debug dump
- * @file: file handler
- * @user_buf: user buffer content from debugfs
- * @count: size of user buffer
- * @ppos: position offset of user buffer
- */
-static ssize_t dpu_dbg_dump_write(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- _dpu_dump_array("dump_debugfs", true, true);
- return count;
-}
-
-static const struct file_operations dpu_dbg_dump_fops = {
- .open = dpu_dbg_debugfs_open,
- .write = dpu_dbg_dump_write,
-};
-
-int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
-{
- static struct dpu_dbg_base *dbg = &dpu_dbg_base;
- char debug_name[80] = "";
-
- if (!debugfs_root)
- return -EINVAL;
-
- debugfs_create_file("dump", 0600, debugfs_root, NULL,
- &dpu_dbg_dump_fops);
-
- if (dbg->dbgbus_dpu.entries) {
- dbg->dbgbus_dpu.cmn.name = DBGBUS_NAME_DPU;
- snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
- dbg->dbgbus_dpu.cmn.name);
- dbg->dbgbus_dpu.cmn.enable_mask = DEFAULT_DBGBUS_DPU;
- debugfs_create_u32(debug_name, 0600, debugfs_root,
- &dbg->dbgbus_dpu.cmn.enable_mask);
- }
-
- if (dbg->dbgbus_vbif_rt.entries) {
- dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT;
- snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
- dbg->dbgbus_vbif_rt.cmn.name);
- dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
- debugfs_create_u32(debug_name, 0600, debugfs_root,
- &dbg->dbgbus_vbif_rt.cmn.enable_mask);
- }
-
- return 0;
-}
-
-static void _dpu_dbg_debugfs_destroy(void)
-{
-}
-
-void dpu_dbg_init_dbg_buses(u32 hwversion)
-{
- static struct dpu_dbg_base *dbg = &dpu_dbg_base;
-
- memset(&dbg->dbgbus_dpu, 0, sizeof(dbg->dbgbus_dpu));
- memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
-
- if (IS_MSM8998_TARGET(hwversion)) {
- dbg->dbgbus_dpu.entries = dbg_bus_dpu_8998;
- dbg->dbgbus_dpu.cmn.entries_size = ARRAY_SIZE(dbg_bus_dpu_8998);
- dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
-
- dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
- dbg->dbgbus_vbif_rt.cmn.entries_size =
- ARRAY_SIZE(vbif_dbg_bus_msm8998);
- } else if (IS_SDM845_TARGET(hwversion) || IS_SDM670_TARGET(hwversion)) {
- dbg->dbgbus_dpu.entries = dbg_bus_dpu_sdm845;
- dbg->dbgbus_dpu.cmn.entries_size =
- ARRAY_SIZE(dbg_bus_dpu_sdm845);
- dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
-
- /* vbif is unchanged vs 8998 */
- dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
- dbg->dbgbus_vbif_rt.cmn.entries_size =
- ARRAY_SIZE(vbif_dbg_bus_msm8998);
- } else {
- pr_err("unsupported chipset id %X\n", hwversion);
- }
-}
-
-int dpu_dbg_init(struct device *dev)
-{
- if (!dev) {
- pr_err("invalid params\n");
- return -EINVAL;
- }
-
- INIT_LIST_HEAD(&dpu_dbg_base.reg_base_list);
- dpu_dbg_base.dev = dev;
-
- INIT_WORK(&dpu_dbg_base.dump_work, _dpu_dump_work);
-
- return 0;
-}
-
-/**
- * dpu_dbg_destroy - destroy dpu debug facilities
- */
-void dpu_dbg_destroy(void)
-{
- _dpu_dbg_debugfs_destroy();
-}
-
-void dpu_dbg_set_dpu_top_offset(u32 blk_off)
-{
- dpu_dbg_base.dbgbus_dpu.top_blk_off = blk_off;
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
deleted file mode 100644
index 1e6fa945f98b..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef DPU_DBG_H_
-#define DPU_DBG_H_
-
-#include <stdarg.h>
-#include <linux/debugfs.h>
-#include <linux/list.h>
-
-enum dpu_dbg_dump_flag {
- DPU_DBG_DUMP_IN_LOG = BIT(0),
- DPU_DBG_DUMP_IN_MEM = BIT(1),
-};
-
-#if defined(CONFIG_DEBUG_FS)
-
-/**
- * dpu_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
- * @hwversion: Chipset revision
- */
-void dpu_dbg_init_dbg_buses(u32 hwversion);
-
-/**
- * dpu_dbg_init - initialize global dpu debug facilities: regdump
- * @dev: device handle
- * Returns: 0 or -ERROR
- */
-int dpu_dbg_init(struct device *dev);
-
-/**
- * dpu_dbg_debugfs_register - register entries at the given debugfs dir
- * @debugfs_root: debugfs root in which to create dpu debug entries
- * Returns: 0 or -ERROR
- */
-int dpu_dbg_debugfs_register(struct dentry *debugfs_root);
-
-/**
- * dpu_dbg_destroy - destroy the global dpu debug facilities
- * Returns: none
- */
-void dpu_dbg_destroy(void);
-
-/**
- * dpu_dbg_dump - trigger dumping of all dpu_dbg facilities
- * @queue_work: whether to queue the dumping work to the work_struct
- * @name: string indicating origin of dump
- * @dump_dbgbus: dump the dpu debug bus
- * @dump_vbif_rt: dump the vbif rt bus
- * Returns: none
- */
-void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
- bool dump_dbgbus_vbif_rt);
-
-/**
- * dpu_dbg_set_dpu_top_offset - set the target specific offset from mdss base
- * address of the top registers. Used for accessing debug bus controls.
- * @blk_off: offset from mdss base of the top block
- */
-void dpu_dbg_set_dpu_top_offset(u32 blk_off);
-
-#else
-
-static inline void dpu_dbg_init_dbg_buses(u32 hwversion)
-{
-}
-
-static inline int dpu_dbg_init(struct device *dev)
-{
- return 0;
-}
-
-static inline int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
-{
- return 0;
-}
-
-static inline void dpu_dbg_destroy(void)
-{
-}
-
-static inline void dpu_dbg_dump(bool queue_work, const char *name,
- bool dump_dbgbus_dpu, bool dump_dbgbus_vbif_rt)
-{
-}
-
-static inline void dpu_dbg_set_dpu_top_offset(u32 blk_off)
-{
-}
-
-#endif /* defined(CONFIG_DEBUG_FS) */
-
-
-#endif /* DPU_DBG_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 96cdf06e7da2..36158b7d99cd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -130,8 +130,9 @@ enum dpu_enc_rc_states {
* Virtual encoder defers as much as possible to the physical encoders.
* Virtual encoder registers itself with the DRM Framework as the encoder.
* @base: drm_encoder base class for registration with DRM
- * @enc_spin_lock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @bus_scaling_client: Client handle to the bus scaling interface
+ * @enabled: True if the encoder is active, protected by enc_lock
* @num_phys_encs: Actual number of physical encoders contained.
* @phys_encs: Container of physical encoders managed.
* @cur_master: Pointer to the current master in this mode. Optimization
@@ -141,15 +142,17 @@ enum dpu_enc_rc_states {
* @intfs_swapped Whether or not the phys_enc interfaces have been swapped
* for partial update right-only cases, such as pingpong
* split where virtual pingpong does not generate IRQs
- * @crtc_vblank_cb: Callback into the upper layer / CRTC for
- * notification of the VBLANK
- * @crtc_vblank_cb_data: Data from upper layer for VBLANK notification
+ * @crtc: Pointer to the currently assigned crtc. Normally you
+ * would use crtc->state->encoder_mask to determine the
+ * link between encoder/crtc. However in this case we need
+ * to track crtc in the disable() hook which is called
+ * _after_ encoder_mask is cleared.
* @crtc_kickoff_cb: Callback into CRTC that will flush & start
* all CTL paths
* @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
* @debugfs_root: Debug file system root file node
- * @enc_lock: Lock around physical encoder create/destroy and
- access.
+ * @enc_lock: Lock around physical encoder
+ * create/destroy/enable/disable
* @frame_busy_mask: Bitmask tracking which phys_enc we are still
* busy processing current command.
* Bit0 = phys_encs[0] etc.
@@ -175,6 +178,8 @@ struct dpu_encoder_virt {
spinlock_t enc_spinlock;
uint32_t bus_scaling_client;
+ bool enabled;
+
unsigned int num_phys_encs;
struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
struct dpu_encoder_phys *cur_master;
@@ -183,8 +188,7 @@ struct dpu_encoder_virt {
bool intfs_swapped;
- void (*crtc_vblank_cb)(void *);
- void *crtc_vblank_cb_data;
+ struct drm_crtc *crtc;
struct dentry *debugfs_root;
struct mutex enc_lock;
@@ -210,39 +214,6 @@ struct dpu_encoder_virt {
};
#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
-static inline int _dpu_encoder_power_enable(struct dpu_encoder_virt *dpu_enc,
- bool enable)
-{
- struct drm_encoder *drm_enc;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
-
- if (!dpu_enc) {
- DPU_ERROR("invalid dpu enc\n");
- return -EINVAL;
- }
-
- drm_enc = &dpu_enc->base;
- if (!drm_enc->dev || !drm_enc->dev->dev_private) {
- DPU_ERROR("drm device invalid\n");
- return -EINVAL;
- }
-
- priv = drm_enc->dev->dev_private;
- if (!priv->kms) {
- DPU_ERROR("invalid kms\n");
- return -EINVAL;
- }
-
- dpu_kms = to_dpu_kms(priv->kms);
-
- if (enable)
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
- else
- pm_runtime_put_sync(&dpu_kms->pdev->dev);
-
- return 0;
-}
void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx)
@@ -488,8 +459,6 @@ static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
drm_encoder_cleanup(drm_enc);
mutex_destroy(&dpu_enc->enc_lock);
-
- kfree(dpu_enc);
}
void dpu_encoder_helper_split_config(
@@ -1119,28 +1088,24 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
}
-void dpu_encoder_virt_restore(struct drm_encoder *drm_enc)
+void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
{
- struct dpu_encoder_virt *dpu_enc = NULL;
- int i;
-
- if (!drm_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
- dpu_enc = to_dpu_encoder_virt(drm_enc);
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+ mutex_lock(&dpu_enc->enc_lock);
- if (phys && (phys != dpu_enc->cur_master) && phys->ops.restore)
- phys->ops.restore(phys);
- }
+ if (!dpu_enc->enabled)
+ goto out;
+ if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
+ dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
_dpu_encoder_virt_enable_helper(drm_enc);
+
+out:
+ mutex_unlock(&dpu_enc->enc_lock);
}
static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
@@ -1154,6 +1119,8 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
return;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ mutex_lock(&dpu_enc->enc_lock);
cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
@@ -1170,10 +1137,15 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
if (ret) {
DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
ret);
- return;
+ goto out;
}
_dpu_encoder_virt_enable_helper(drm_enc);
+
+ dpu_enc->enabled = true;
+
+out:
+ mutex_unlock(&dpu_enc->enc_lock);
}
static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
@@ -1195,11 +1167,14 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
return;
}
- mode = &drm_enc->crtc->state->adjusted_mode;
-
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");
+ mutex_lock(&dpu_enc->enc_lock);
+ dpu_enc->enabled = false;
+
+ mode = &drm_enc->crtc->state->adjusted_mode;
+
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
@@ -1233,6 +1208,8 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
dpu_rm_release(&dpu_kms->rm, drm_enc);
+
+ mutex_unlock(&dpu_enc->enc_lock);
}
static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
@@ -1263,8 +1240,8 @@ static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
dpu_enc = to_dpu_encoder_virt(drm_enc);
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
- if (dpu_enc->crtc_vblank_cb)
- dpu_enc->crtc_vblank_cb(dpu_enc->crtc_vblank_cb_data);
+ if (dpu_enc->crtc)
+ dpu_crtc_vblank_callback(dpu_enc->crtc);
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
atomic_inc(&phy_enc->vsync_cnt);
@@ -1284,25 +1261,32 @@ static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
DPU_ATRACE_END("encoder_underrun_callback");
}
-void dpu_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
- void (*vbl_cb)(void *), void *vbl_data)
+void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
unsigned long lock_flags;
- bool enable;
- int i;
- enable = vbl_cb ? true : false;
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ /* crtc should always be cleared before re-assigning */
+ WARN_ON(crtc && dpu_enc->crtc);
+ dpu_enc->crtc = crtc;
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
+ struct drm_crtc *crtc, bool enable)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ int i;
- if (!drm_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
- dpu_enc->crtc_vblank_cb = vbl_cb;
- dpu_enc->crtc_vblank_cb_data = vbl_data;
+ if (dpu_enc->crtc != crtc) {
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+ return;
+ }
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@@ -1407,8 +1391,9 @@ static void dpu_encoder_off_work(struct kthread_work *work)
* phys: Pointer to physical encoder structure
* extra_flush_bits: Additional bit mask to include in flush trigger
*/
-static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
- struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
+static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phys, uint32_t extra_flush_bits,
+ bool async)
{
struct dpu_hw_ctl *ctl;
int pending_kickoff_cnt;
@@ -1431,7 +1416,10 @@ static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
return;
}
- pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+ if (!async)
+ pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+ else
+ pending_kickoff_cnt = atomic_read(&phys->pending_kickoff_cnt);
if (extra_flush_bits && ctl->ops.update_pending_flush)
ctl->ops.update_pending_flush(ctl, extra_flush_bits);
@@ -1450,7 +1438,7 @@ static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
* _dpu_encoder_trigger_start - trigger start for a physical encoder
* phys: Pointer to physical encoder structure
*/
-static inline void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
+static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
{
if (!phys) {
DPU_ERROR("invalid argument(s)\n");
@@ -1507,7 +1495,7 @@ static int dpu_encoder_helper_wait_event_timeout(
return rc;
}
-void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
+static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_hw_ctl *ctl;
@@ -1527,10 +1515,8 @@ void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
ctl->idx);
rc = ctl->ops.reset(ctl);
- if (rc) {
+ if (rc)
DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
- dpu_dbg_dump(false, __func__, true, true);
- }
phys_enc->enable_state = DPU_ENC_ENABLED;
}
@@ -1544,7 +1530,8 @@ void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
* a time.
* dpu_enc: Pointer to virtual encoder structure
*/
-static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
+static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc,
+ bool async)
{
struct dpu_hw_ctl *ctl;
uint32_t i, pending_flush;
@@ -1575,7 +1562,8 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
set_bit(i, dpu_enc->frame_busy_mask);
if (!phys->ops.needs_single_flush ||
!phys->ops.needs_single_flush(phys))
- _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
+ _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0,
+ async);
else if (ctl->ops.get_pending_flush)
pending_flush |= ctl->ops.get_pending_flush(ctl);
}
@@ -1585,7 +1573,7 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
_dpu_encoder_trigger_flush(
&dpu_enc->base,
dpu_enc->cur_master,
- pending_flush);
+ pending_flush, async);
}
_dpu_encoder_trigger_start(dpu_enc->cur_master);
@@ -1769,7 +1757,7 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
}
void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
- struct dpu_encoder_kickoff_params *params)
+ struct dpu_encoder_kickoff_params *params, bool async)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
@@ -1803,14 +1791,12 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
if (needs_hw_reset) {
trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.hw_reset)
- phys->ops.hw_reset(phys);
+ dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
}
}
}
-void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
+void dpu_encoder_kickoff(struct drm_encoder *drm_enc, bool async)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
@@ -1833,7 +1819,7 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000));
/* All phys encs are ready to go, trigger the kickoff */
- _dpu_encoder_kickoff_phys(dpu_enc);
+ _dpu_encoder_kickoff_phys(dpu_enc, async);
/* allow phys encs to handle any post-kickoff business */
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@@ -1875,14 +1861,9 @@ void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
#ifdef CONFIG_DEBUG_FS
static int _dpu_encoder_status_show(struct seq_file *s, void *data)
{
- struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_virt *dpu_enc = s->private;
int i;
- if (!s || !s->private)
- return -EINVAL;
-
- dpu_enc = s->private;
-
mutex_lock(&dpu_enc->enc_lock);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
@@ -1920,7 +1901,7 @@ static int _dpu_encoder_debugfs_status_open(struct inode *inode,
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
- struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
int i;
@@ -1934,12 +1915,11 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
char name[DPU_NAME_SIZE];
- if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ if (!drm_enc->dev || !drm_enc->dev->dev_private) {
DPU_ERROR("invalid encoder or kms\n");
return -EINVAL;
}
- dpu_enc = to_dpu_encoder_virt(drm_enc);
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
@@ -1964,26 +1944,11 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
return 0;
}
-
-static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
-{
- struct dpu_encoder_virt *dpu_enc;
-
- if (!drm_enc)
- return;
-
- dpu_enc = to_dpu_encoder_virt(drm_enc);
- debugfs_remove_recursive(dpu_enc->debugfs_root);
-}
#else
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
return 0;
}
-
-static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
-{
-}
#endif
static int dpu_encoder_late_register(struct drm_encoder *encoder)
@@ -1993,7 +1958,9 @@ static int dpu_encoder_late_register(struct drm_encoder *encoder)
static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
{
- _dpu_encoder_destroy_debugfs(encoder);
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
+
+ debugfs_remove_recursive(dpu_enc->debugfs_root);
}
static int dpu_encoder_virt_add_phys_encs(
@@ -2268,6 +2235,8 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
+ dpu_enc->enabled = false;
+
return &dpu_enc->base;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 9dbf38f446d9..3f5dafe00580 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -55,14 +55,22 @@ void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
struct dpu_encoder_hw_resources *hw_res);
/**
- * dpu_encoder_register_vblank_callback - provide callback to encoder that
- * will be called on the next vblank.
+ * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to
* @encoder: encoder pointer
- * @cb: callback pointer, provide NULL to deregister and disable IRQs
- * @data: user data provided to callback
+ * @crtc: crtc pointer
+ */
+void dpu_encoder_assign_crtc(struct drm_encoder *encoder,
+ struct drm_crtc *crtc);
+
+/**
+ * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if
+ * the encoder is assigned to the given crtc
+ * @encoder: encoder pointer
+ * @crtc: crtc pointer
+ * @enable: true if vblank should be enabled
*/
-void dpu_encoder_register_vblank_callback(struct drm_encoder *encoder,
- void (*cb)(void *), void *data);
+void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *encoder,
+ struct drm_crtc *crtc, bool enable);
/**
* dpu_encoder_register_frame_event_callback - provide callback to encoder that
@@ -81,9 +89,10 @@ void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
* Delayed: Block until next trigger can be issued.
* @encoder: encoder pointer
* @params: kickoff time parameters
+ * @async: true if this is an asynchronous commit
*/
void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
- struct dpu_encoder_kickoff_params *params);
+ struct dpu_encoder_kickoff_params *params, bool async);
/**
* dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
@@ -96,8 +105,9 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
* dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
* (i.e. ctl flush and start) immediately.
* @encoder: encoder pointer
+ * @async: true if this is an asynchronous commit
*/
-void dpu_encoder_kickoff(struct drm_encoder *encoder);
+void dpu_encoder_kickoff(struct drm_encoder *encoder, bool async);
/**
* dpu_encoder_wait_for_event - Waits for encoder events
@@ -126,10 +136,10 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
/**
- * dpu_encoder_virt_restore - restore the encoder configs
+ * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs
* @encoder: encoder pointer
*/
-void dpu_encoder_virt_restore(struct drm_encoder *encoder);
+void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder);
/**
* dpu_encoder_init - initialize virtual encoder object
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index 964efcc757a4..44e6f8b68e70 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -114,8 +114,6 @@ struct dpu_encoder_virt_ops {
* @handle_post_kickoff: Do any work necessary post-kickoff work
* @trigger_start: Process start event on physical encoder
* @needs_single_flush: Whether encoder slaves need to be flushed
- * @hw_reset: Issue HW recovery such as CTL reset and clear
- * DPU_ENC_ERR_NEEDS_HW_RESET state
* @irq_control: Handler to enable/disable all the encoder IRQs
* @prepare_idle_pc: phys encoder can update the vsync_enable status
* on idle power collapse prepare
@@ -151,7 +149,6 @@ struct dpu_encoder_phys_ops {
void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
- void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
void (*restore)(struct dpu_encoder_phys *phys);
@@ -342,15 +339,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
*/
void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
-/**
- * dpu_encoder_helper_hw_reset - issue ctl hw reset
- * This helper function may be optionally specified by physical
- * encoders if they require ctl hw reset. If state is currently
- * DPU_ENC_ERR_NEEDS_HW_RESET, it is set back to DPU_ENC_ENABLED.
- * @phys_enc: Pointer to physical encoder structure
- */
-void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
-
static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
struct dpu_encoder_phys *phys_enc)
{
@@ -362,7 +350,7 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
if (phys_enc->split_role == ENC_ROLE_SOLO &&
- dpu_crtc_state_is_stereo(dpu_cstate))
+ dpu_cstate->num_mixers == CRTC_DUAL_MIXERS)
return BLEND_3D_H_ROW_INT;
return BLEND_3D_NONE;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index b2d7f0ded24c..99ab5ca9bed3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -44,14 +44,7 @@
#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
-static inline int _dpu_encoder_phys_cmd_get_idle_timeout(
- struct dpu_encoder_phys_cmd *cmd_enc)
-{
- return KICKOFF_TIMEOUT_MS;
-}
-
-static inline bool dpu_encoder_phys_cmd_is_master(
- struct dpu_encoder_phys *phys_enc)
+static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
{
return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
}
@@ -243,7 +236,6 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
atomic_read(&phys_enc->pending_kickoff_cnt));
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
- dpu_dbg_dump(false, __func__, true, true);
}
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
@@ -496,14 +488,11 @@ static void dpu_encoder_phys_cmd_enable_helper(
_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
- goto skip_flush;
+ return;
ctl = phys_enc->hw_ctl;
ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
-
-skip_flush:
- return;
}
static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
@@ -727,7 +716,7 @@ static int dpu_encoder_phys_cmd_wait_for_vblank(
wait_info.wq = &cmd_enc->pending_vblank_wq;
wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
- wait_info.timeout_ms = _dpu_encoder_phys_cmd_get_idle_timeout(cmd_enc);
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
atomic_inc(&cmd_enc->pending_vblank_cnt);
@@ -776,7 +765,6 @@ static void dpu_encoder_phys_cmd_init_ops(
ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
- ops->hw_reset = dpu_encoder_helper_hw_reset;
ops->irq_control = dpu_encoder_phys_cmd_irq_control;
ops->restore = dpu_encoder_phys_cmd_enable_helper;
ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
@@ -798,7 +786,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
if (!cmd_enc) {
ret = -ENOMEM;
DPU_ERROR("failed to allocate\n");
- goto fail;
+ return ERR_PTR(ret);
}
phys_enc = &cmd_enc->base;
phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
@@ -856,6 +844,5 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
return phys_enc;
-fail:
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 84de385a9f62..acdab5b0db18 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -110,7 +110,7 @@ static void drm_mode_to_intf_timing_params(
*/
}
-static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
+static u32 get_horizontal_total(const struct intf_timing_params *timing)
{
u32 active = timing->xres;
u32 inactive =
@@ -119,7 +119,7 @@ static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
return active + inactive;
}
-static inline u32 get_vertical_total(const struct intf_timing_params *timing)
+static u32 get_vertical_total(const struct intf_timing_params *timing)
{
u32 active = timing->yres;
u32 inactive =
@@ -331,7 +331,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
if (hw_ctl && hw_ctl->ops.get_flush_register)
flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
- if (flush_register == 0)
+ if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
-1, 0);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
@@ -613,7 +613,6 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
ctl->idx, rc);
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
- dpu_dbg_dump(false, __func__, true, true);
}
}
@@ -766,7 +765,6 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
- ops->hw_reset = dpu_encoder_helper_hw_reset;
ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index bfcd165e96df..0874f0a53bf9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -216,7 +216,7 @@ static const struct dpu_format dpu_format_map[] = {
INTERLEAVED_RGB_FMT(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- true, 4, 0,
+ false, 4, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA8888,
@@ -921,7 +921,7 @@ static int _dpu_format_populate_addrs_ubwc(
+ layout->plane_size[2] + layout->plane_size[3];
if (!meta)
- goto done;
+ return 0;
/* configure Y metadata plane */
layout->plane_addr[2] = base_addr;
@@ -952,12 +952,11 @@ static int _dpu_format_populate_addrs_ubwc(
layout->plane_addr[1] = 0;
if (!meta)
- goto done;
+ return 0;
layout->plane_addr[2] = base_addr;
layout->plane_addr[3] = 0;
}
-done:
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
index 58d29e43faef..92f1c4241b9a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
@@ -30,16 +30,10 @@ static LIST_HEAD(dpu_hw_blk_list);
* @type: hw block type - enum dpu_hw_blk_type
* @id: instance id of the hw block
* @ops: Pointer to block operations
- * return: 0 if success; error code otherwise
*/
-int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+void dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
struct dpu_hw_blk_ops *ops)
{
- if (!hw_blk) {
- pr_err("invalid parameters\n");
- return -EINVAL;
- }
-
INIT_LIST_HEAD(&hw_blk->list);
hw_blk->type = type;
hw_blk->id = id;
@@ -51,8 +45,6 @@ int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
mutex_lock(&dpu_hw_blk_lock);
list_add(&hw_blk->list, &dpu_hw_blk_list);
mutex_unlock(&dpu_hw_blk_lock);
-
- return 0;
}
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
index 0f4ca8af1ec5..1934c2f7e8fa 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
@@ -44,7 +44,7 @@ struct dpu_hw_blk {
struct dpu_hw_blk_ops ops;
};
-int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+void dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
struct dpu_hw_blk_ops *ops);
void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index dc060e7358e4..144358a3d0fb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -736,13 +736,4 @@ struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev);
*/
void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg);
-/**
- * dpu_hw_sspp_multirect_enabled - check multirect enabled for the sspp
- * @cfg: pointer to sspp cfg
- */
-static inline bool dpu_hw_sspp_multirect_enabled(const struct dpu_sspp_cfg *cfg)
-{
- return test_bit(DPU_SSPP_SMART_DMA_V1, &cfg->features) ||
- test_bit(DPU_SSPP_SMART_DMA_V2, &cfg->features);
-}
#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index eec1051f2afc..1068b4b7940f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -13,8 +13,8 @@
#include <linux/delay.h>
#include "dpu_hwio.h"
#include "dpu_hw_ctl.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
+#include "dpu_trace.h"
#define CTL_LAYER(lm) \
(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
@@ -72,24 +72,39 @@ static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
return stages;
}
+static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ return DPU_REG_READ(c, CTL_FLUSH);
+}
+
static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
{
+ trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
}
static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
{
+ trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
}
static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
{
+ trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
ctx->pending_flush_mask = 0x0;
}
static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
u32 flushbits)
{
+ trace_dpu_hw_ctl_update_pending_flush(flushbits,
+ ctx->pending_flush_mask);
ctx->pending_flush_mask |= flushbits;
}
@@ -103,18 +118,12 @@ static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
{
-
+ trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
-static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
-{
- struct dpu_hw_blk_reg_map *c = &ctx->hw;
-
- return DPU_REG_READ(c, CTL_FLUSH);
-}
-
-static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
+static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
enum dpu_sspp sspp)
{
uint32_t flushbits = 0;
@@ -169,7 +178,7 @@ static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
return flushbits;
}
-static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
+static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
enum dpu_lm lm)
{
uint32_t flushbits = 0;
@@ -202,7 +211,7 @@ static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
return flushbits;
}
-static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
+static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
u32 *flushbits, enum dpu_intf intf)
{
switch (intf) {
@@ -474,10 +483,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
};
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
void __iomem *addr,
@@ -485,7 +491,6 @@ struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
{
struct dpu_hw_ctl *c;
struct dpu_ctl_cfg *cfg;
- int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@@ -504,18 +509,9 @@ struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
c->mixer_count = m->mixer_count;
c->mixer_hw_caps = m->mixer;
- rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
}
void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index 9c6bba0ac7c3..f6a83daa385b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -13,7 +13,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_intf.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
#define INTF_TIMING_ENGINE_EN 0x000
@@ -265,10 +264,7 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
ops->get_line_count = dpu_hw_intf_get_line_count;
}
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
void __iomem *addr,
@@ -276,7 +272,6 @@ struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
{
struct dpu_hw_intf *c;
struct dpu_intf_cfg *cfg;
- int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@@ -297,18 +292,9 @@ struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
c->mdss = m;
_setup_intf_ops(&c->ops, c->cap->features);
- rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
}
void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 3b77df460dea..a2b0dbc23058 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -92,16 +92,6 @@ struct dpu_hw_intf {
};
/**
- * to_dpu_hw_intf - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
-{
- return container_of(hw, struct dpu_hw_intf, base);
-}
-
-/**
* dpu_hw_intf_init(): Initializes the intf driver for the passed
* interface idx.
* @idx: interface index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index acb8dc8acaa5..018df2c3b7ed 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -15,7 +15,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_mdss.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
#define LM_OP_MODE 0x00
@@ -64,16 +63,10 @@ static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
{
const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
- int rc;
+ if (stage != DPU_STAGE_BASE && stage <= sblk->maxblendstages)
+ return sblk->blendstage_base[stage - DPU_STAGE_0];
- if (stage == DPU_STAGE_BASE)
- rc = -EINVAL;
- else if (stage <= sblk->maxblendstages)
- rc = sblk->blendstage_base[stage - DPU_STAGE_0];
- else
- rc = -EINVAL;
-
- return rc;
+ return -EINVAL;
}
static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
@@ -163,11 +156,6 @@ static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
}
-static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
- void *cfg)
-{
-}
-
static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
struct dpu_hw_lm_ops *ops,
unsigned long features)
@@ -179,13 +167,9 @@ static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
ops->setup_alpha_out = dpu_hw_lm_setup_color3;
ops->setup_border_color = dpu_hw_lm_setup_border_color;
- ops->setup_gc = dpu_hw_lm_gc;
};
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
void __iomem *addr,
@@ -193,7 +177,6 @@ struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
{
struct dpu_hw_mixer *c;
struct dpu_lm_cfg *cfg;
- int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@@ -210,18 +193,9 @@ struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
c->cap = cfg;
_setup_mixer_ops(m, &c->ops, c->cap->features);
- rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
}
void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 5b036aca8340..6aee839a6a23 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -61,11 +61,6 @@ struct dpu_hw_lm_ops {
void (*setup_border_color)(struct dpu_hw_mixer *ctx,
struct dpu_mdss_color *color,
u8 border_en);
- /**
- * setup_gc : enable/disable gamma correction feature
- */
- void (*setup_gc)(struct dpu_hw_mixer *mixer,
- void *cfg);
};
struct dpu_hw_mixer {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index cc3a623903f4..3bdf47ed1845 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -16,7 +16,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_pingpong.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
#include "dpu_trace.h"
@@ -177,7 +176,7 @@ static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
if (height < init)
- goto line_count_exit;
+ return line;
line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
@@ -186,7 +185,6 @@ static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
else
line -= init;
-line_count_exit:
return line;
}
@@ -201,10 +199,7 @@ static void _setup_pingpong_ops(struct dpu_hw_pingpong_ops *ops,
ops->get_line_count = dpu_hw_pp_get_line_count;
};
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
void __iomem *addr,
@@ -212,7 +207,6 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
{
struct dpu_hw_pingpong *c;
struct dpu_pingpong_cfg *cfg;
- int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@@ -228,18 +222,9 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
c->caps = cfg;
_setup_pingpong_ops(&c->ops, c->caps);
- rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
}
void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 3caccd7d6a3e..0e02e43cee14 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -105,16 +105,6 @@ struct dpu_hw_pingpong {
};
/**
- * dpu_hw_pingpong - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
-{
- return container_of(hw, struct dpu_hw_pingpong, base);
-}
-
-/**
* dpu_hw_pingpong_init - initializes the pingpong driver for the passed
* pingpong idx.
* @idx: Pingpong index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index c25b52a6b219..e9132bf5166b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -14,7 +14,6 @@
#include "dpu_hw_catalog.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_sspp.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
@@ -141,7 +140,7 @@
/* traffic shaper clock in Hz */
#define TS_CLK 19200000
-static inline int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
+static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
int s_id,
u32 *idx)
{
@@ -662,7 +661,8 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
test_bit(DPU_SSPP_CSC_10BIT, &features))
c->ops.setup_csc = dpu_hw_sspp_setup_csc;
- if (dpu_hw_sspp_multirect_enabled(c->cap))
+ if (test_bit(DPU_SSPP_SMART_DMA_V1, &c->cap->features) ||
+ test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features))
c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
@@ -697,10 +697,7 @@ static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
return ERR_PTR(-ENOMEM);
}
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
void __iomem *addr, struct dpu_mdss_cfg *catalog,
@@ -708,7 +705,6 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
{
struct dpu_hw_pipe *hw_pipe;
struct dpu_sspp_cfg *cfg;
- int rc;
if (!addr || !catalog)
return ERR_PTR(-EINVAL);
@@ -730,18 +726,9 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
hw_pipe->cap = cfg;
_setup_layer_ops(hw_pipe, hw_pipe->cap->features);
- rc = dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
return hw_pipe;
-
-blk_init_error:
- kzfree(hw_pipe);
-
- return ERR_PTR(rc);
}
void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 4d81e5f5ce1b..119b4e1c16be 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -392,16 +392,6 @@ struct dpu_hw_pipe {
};
/**
- * dpu_hw_pipe - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_pipe *to_dpu_hw_pipe(struct dpu_hw_blk *hw)
-{
- return container_of(hw, struct dpu_hw_pipe, base);
-}
-
-/**
* dpu_hw_sspp_init - initializes the sspp hw driver object.
* Should be called once before accessing every pipe.
* @idx: Pipe index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index b8781256e21b..a041597bb849 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -13,7 +13,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_top.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
#define SSPP_SPARE 0x28
@@ -322,10 +321,7 @@ static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
return ERR_PTR(-EINVAL);
}
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
void __iomem *addr,
@@ -333,7 +329,6 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
{
struct dpu_hw_mdp *mdp;
const struct dpu_mdp_cfg *cfg;
- int rc;
if (!addr || !m)
return ERR_PTR(-EINVAL);
@@ -355,20 +350,9 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
mdp->caps = cfg;
_setup_mdp_ops(&mdp->ops, mdp->caps->features);
- rc = dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
-
- dpu_dbg_set_dpu_top_offset(mdp->hw.blk_off);
+ dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
return mdp;
-
-blk_init_error:
- kzfree(mdp);
-
- return ERR_PTR(rc);
}
void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
index 192e338f20bb..aa21fd834398 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -161,16 +161,6 @@ struct dpu_hw_mdp {
};
/**
- * to_dpu_hw_mdp - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_mdp *to_dpu_hw_mdp(struct dpu_hw_blk *hw)
-{
- return container_of(hw, struct dpu_hw_mdp, base);
-}
-
-/**
* dpu_hw_mdptop_init - initializes the top driver for the passed idx
* @idx: Interface index for which driver object is required
* @addr: Mapped register io address of MDP
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
index d43905525f92..38bfd222ed72 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -13,7 +13,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_vbif.h"
-#include "dpu_dbg.h"
#define VBIF_VERSION 0x0000
#define VBIF_CLK_FORCE_CTRL0 0x0008
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
index b557687b1964..78833c2c27f8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -16,6 +16,8 @@
#include <linux/err.h>
#include <linux/delay.h>
+#include <drm/drm_print.h>
+
#include "dpu_io_util.h"
void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
@@ -164,7 +166,7 @@ int msm_dss_parse_clock(struct platform_device *pdev,
"clock-names", i,
&clock_name);
if (rc) {
- dev_err(&pdev->dev, "Failed to get clock name for %d\n",
+ DRM_DEV_ERROR(&pdev->dev, "Failed to get clock name for %d\n",
i);
break;
}
@@ -176,13 +178,13 @@ int msm_dss_parse_clock(struct platform_device *pdev,
rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, num_clk);
if (rc) {
- dev_err(&pdev->dev, "Failed to get clock refs %d\n", rc);
+ DRM_DEV_ERROR(&pdev->dev, "Failed to get clock refs %d\n", rc);
goto err;
}
rc = of_clk_set_defaults(pdev->dev.of_node, false);
if (rc) {
- dev_err(&pdev->dev, "Failed to set clock defaults %d\n", rc);
+ DRM_DEV_ERROR(&pdev->dev, "Failed to set clock defaults %d\n", rc);
goto err;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
deleted file mode 100644
index d5e6ce0140cf..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/irqdomain.h>
-#include <linux/irq.h>
-#include <linux/kthread.h>
-
-#include "dpu_irq.h"
-#include "dpu_core_irq.h"
-
-irqreturn_t dpu_irq(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
- return dpu_core_irq(dpu_kms);
-}
-
-void dpu_irq_preinstall(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
- if (!dpu_kms->dev || !dpu_kms->dev->dev) {
- pr_err("invalid device handles\n");
- return;
- }
-
- dpu_core_irq_preinstall(dpu_kms);
-}
-
-int dpu_irq_postinstall(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
- int rc;
-
- if (!kms) {
- DPU_ERROR("invalid parameters\n");
- return -EINVAL;
- }
-
- rc = dpu_core_irq_postinstall(dpu_kms);
-
- return rc;
-}
-
-void dpu_irq_uninstall(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
- if (!kms) {
- DPU_ERROR("invalid parameters\n");
- return;
- }
-
- dpu_core_irq_uninstall(dpu_kms);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
deleted file mode 100644
index 3e147f7176e2..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __DPU_IRQ_H__
-#define __DPU_IRQ_H__
-
-#include <linux/kernel.h>
-#include <linux/irqdomain.h>
-
-#include "msm_kms.h"
-
-/**
- * dpu_irq_controller - define MDSS level interrupt controller context
- * @enabled_mask: enable status of MDSS level interrupt
- * @domain: interrupt domain of this controller
- */
-struct dpu_irq_controller {
- unsigned long enabled_mask;
- struct irq_domain *domain;
-};
-
-/**
- * dpu_irq_preinstall - perform pre-installation of MDSS IRQ handler
- * @kms: pointer to kms context
- * @return: none
- */
-void dpu_irq_preinstall(struct msm_kms *kms);
-
-/**
- * dpu_irq_postinstall - perform post-installation of MDSS IRQ handler
- * @kms: pointer to kms context
- * @return: 0 if success; error code otherwise
- */
-int dpu_irq_postinstall(struct msm_kms *kms);
-
-/**
- * dpu_irq_uninstall - uninstall MDSS IRQ handler
- * @drm_dev: pointer to kms context
- * @return: none
- */
-void dpu_irq_uninstall(struct msm_kms *kms);
-
-/**
- * dpu_irq - MDSS level IRQ handler
- * @kms: pointer to kms context
- * @return: interrupt handling status
- */
-irqreturn_t dpu_irq(struct msm_kms *kms);
-
-#endif /* __DPU_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 0a683e65a9f3..4d67b3c96702 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -81,7 +81,7 @@ static int _dpu_danger_signal_status(struct seq_file *s,
struct dpu_danger_safe_status status;
int i;
- if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+ if (!kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
DPU_ERROR("invalid arg(s)\n");
return 0;
}
@@ -138,46 +138,29 @@ static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
}
DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
-static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms)
-{
- debugfs_remove_recursive(dpu_kms->debugfs_danger);
- dpu_kms->debugfs_danger = NULL;
-}
-
-static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
+static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
- dpu_kms->debugfs_danger = debugfs_create_dir("danger",
- parent);
- if (!dpu_kms->debugfs_danger) {
- DPU_ERROR("failed to create danger debugfs\n");
- return -EINVAL;
- }
+ struct dentry *entry = debugfs_create_dir("danger", parent);
+ if (IS_ERR_OR_NULL(entry))
+ return;
- debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger,
+ debugfs_create_file("danger_status", 0600, entry,
dpu_kms, &dpu_debugfs_danger_stats_fops);
- debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger,
+ debugfs_create_file("safe_status", 0600, entry,
dpu_kms, &dpu_debugfs_safe_stats_fops);
-
- return 0;
}
static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
{
- struct dpu_debugfs_regset32 *regset;
- struct dpu_kms *dpu_kms;
+ struct dpu_debugfs_regset32 *regset = s->private;
+ struct dpu_kms *dpu_kms = regset->dpu_kms;
struct drm_device *dev;
struct msm_drm_private *priv;
void __iomem *base;
uint32_t i, addr;
- if (!s || !s->private)
- return 0;
-
- regset = s->private;
-
- dpu_kms = regset->dpu_kms;
- if (!dpu_kms || !dpu_kms->mmio)
+ if (!dpu_kms->mmio)
return 0;
dev = dpu_kms->dev;
@@ -250,57 +233,24 @@ void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
{
- void *p;
- int rc;
-
- p = dpu_hw_util_get_log_mask_ptr();
+ void *p = dpu_hw_util_get_log_mask_ptr();
+ struct dentry *entry;
- if (!dpu_kms || !p)
+ if (!p)
return -EINVAL;
- dpu_kms->debugfs_root = debugfs_create_dir("debug",
- dpu_kms->dev->primary->debugfs_root);
- if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) {
- DRM_ERROR("debugfs create_dir failed %ld\n",
- PTR_ERR(dpu_kms->debugfs_root));
- return PTR_ERR(dpu_kms->debugfs_root);
- }
-
- rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root);
- if (rc) {
- DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc);
- return rc;
- }
+ entry = debugfs_create_dir("debug", dpu_kms->dev->primary->debugfs_root);
+ if (IS_ERR_OR_NULL(entry))
+ return -ENODEV;
/* allow root to be NULL */
- debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p);
-
- (void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root);
- (void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root);
- (void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root);
-
- rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root);
- if (rc) {
- DPU_ERROR("failed to init perf %d\n", rc);
- return rc;
- }
+ debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
- return 0;
-}
+ dpu_debugfs_danger_init(dpu_kms, entry);
+ dpu_debugfs_vbif_init(dpu_kms, entry);
+ dpu_debugfs_core_irq_init(dpu_kms, entry);
-static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
-{
- /* don't need to NULL check debugfs_root */
- if (dpu_kms) {
- dpu_debugfs_vbif_destroy(dpu_kms);
- dpu_debugfs_danger_destroy(dpu_kms);
- dpu_debugfs_core_irq_destroy(dpu_kms);
- debugfs_remove_recursive(dpu_kms->debugfs_root);
- }
-}
-#else
-static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
-{
+ return dpu_core_perf_debugfs_init(dpu_kms, entry);
}
#endif
@@ -320,7 +270,10 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
struct dpu_kms *dpu_kms;
struct msm_drm_private *priv;
struct drm_device *dev;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
struct drm_encoder *encoder;
+ int i;
if (!kms)
return;
@@ -332,9 +285,13 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
priv = dev->dev_private;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- if (encoder->crtc != NULL)
+ /* Call prepare_commit for all affected encoders */
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc_state->encoder_mask) {
dpu_encoder_prepare_commit(encoder);
+ }
+ }
}
/*
@@ -344,15 +301,20 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
void dpu_kms_encoder_enable(struct drm_encoder *encoder)
{
const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
- struct drm_crtc *crtc = encoder->crtc;
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc;
/* Forward this enable call to the commit hook */
if (funcs && funcs->commit)
funcs->commit(encoder);
- if (crtc && crtc->state->active) {
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ drm_for_each_crtc(crtc, dev) {
+ if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder)))
+ continue;
+
trace_dpu_kms_enc_enable(DRMID(crtc));
- dpu_crtc_commit_kickoff(crtc);
+ dpu_crtc_commit_kickoff(crtc, false);
}
}
@@ -369,7 +331,8 @@ static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
if (crtc->state->active) {
trace_dpu_kms_commit(DRMID(crtc));
- dpu_crtc_commit_kickoff(crtc);
+ dpu_crtc_commit_kickoff(crtc,
+ state->legacy_cursor_update);
}
}
}
@@ -613,22 +576,7 @@ fail:
#ifdef CONFIG_DEBUG_FS
static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
- struct drm_device *dev;
- int rc;
-
- if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
- DPU_ERROR("invalid dpu_kms\n");
- return -EINVAL;
- }
-
- dev = dpu_kms->dev;
-
- rc = _dpu_debugfs_init(dpu_kms);
- if (rc)
- DPU_ERROR("dpu_debugfs init failed: %d\n", rc);
-
- return rc;
+ return _dpu_debugfs_init(to_dpu_kms(kms));
}
#endif
@@ -651,12 +599,7 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
dpu_hw_intr_destroy(dpu_kms->hw_intr);
dpu_kms->hw_intr = NULL;
- if (dpu_kms->power_event)
- dpu_power_handle_unregister_event(
- &dpu_kms->phandle, dpu_kms->power_event);
-
/* safe to call these more than once during shutdown */
- _dpu_debugfs_destroy(dpu_kms);
_dpu_kms_mmu_destroy(dpu_kms);
if (dpu_kms->catalog) {
@@ -676,11 +619,6 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
dpu_hw_catalog_deinit(dpu_kms->catalog);
dpu_kms->catalog = NULL;
- if (dpu_kms->core_client)
- dpu_power_client_destroy(&dpu_kms->phandle,
- dpu_kms->core_client);
- dpu_kms->core_client = NULL;
-
if (dpu_kms->vbif[VBIF_NRT])
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
dpu_kms->vbif[VBIF_NRT] = NULL;
@@ -705,131 +643,9 @@ static void dpu_kms_destroy(struct msm_kms *kms)
dpu_kms = to_dpu_kms(kms);
- dpu_dbg_destroy();
_dpu_kms_hw_destroy(dpu_kms);
}
-static int dpu_kms_pm_suspend(struct device *dev)
-{
- struct drm_device *ddev;
- struct drm_modeset_acquire_ctx ctx;
- struct drm_atomic_state *state;
- struct dpu_kms *dpu_kms;
- int ret = 0, num_crtcs = 0;
-
- if (!dev)
- return -EINVAL;
-
- ddev = dev_get_drvdata(dev);
- if (!ddev || !ddev_to_msm_kms(ddev))
- return -EINVAL;
-
- dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
-
- /* disable hot-plug polling */
- drm_kms_helper_poll_disable(ddev);
-
- /* acquire modeset lock(s) */
- drm_modeset_acquire_init(&ctx, 0);
-
-retry:
- DPU_ATRACE_BEGIN("kms_pm_suspend");
-
- ret = drm_modeset_lock_all_ctx(ddev, &ctx);
- if (ret)
- goto unlock;
-
- /* save current state for resume */
- if (dpu_kms->suspend_state)
- drm_atomic_state_put(dpu_kms->suspend_state);
- dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
- if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) {
- DRM_ERROR("failed to back up suspend state\n");
- dpu_kms->suspend_state = NULL;
- goto unlock;
- }
-
- /* create atomic state to disable all CRTCs */
- state = drm_atomic_state_alloc(ddev);
- if (IS_ERR_OR_NULL(state)) {
- DRM_ERROR("failed to allocate crtc disable state\n");
- goto unlock;
- }
-
- state->acquire_ctx = &ctx;
-
- /* check for nothing to do */
- if (num_crtcs == 0) {
- DRM_DEBUG("all crtcs are already in the off state\n");
- drm_atomic_state_put(state);
- goto suspended;
- }
-
- /* commit the "disable all" state */
- ret = drm_atomic_commit(state);
- if (ret < 0) {
- DRM_ERROR("failed to disable crtcs, %d\n", ret);
- drm_atomic_state_put(state);
- goto unlock;
- }
-
-suspended:
- dpu_kms->suspend_block = true;
-
-unlock:
- if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
- }
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- DPU_ATRACE_END("kms_pm_suspend");
- return 0;
-}
-
-static int dpu_kms_pm_resume(struct device *dev)
-{
- struct drm_device *ddev;
- struct dpu_kms *dpu_kms;
- int ret;
-
- if (!dev)
- return -EINVAL;
-
- ddev = dev_get_drvdata(dev);
- if (!ddev || !ddev_to_msm_kms(ddev))
- return -EINVAL;
-
- dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
-
- DPU_ATRACE_BEGIN("kms_pm_resume");
-
- drm_mode_config_reset(ddev);
-
- drm_modeset_lock_all(ddev);
-
- dpu_kms->suspend_block = false;
-
- if (dpu_kms->suspend_state) {
- dpu_kms->suspend_state->acquire_ctx =
- ddev->mode_config.acquire_ctx;
- ret = drm_atomic_commit(dpu_kms->suspend_state);
- if (ret < 0) {
- DRM_ERROR("failed to restore state, %d\n", ret);
- drm_atomic_state_put(dpu_kms->suspend_state);
- }
- dpu_kms->suspend_state = NULL;
- }
- drm_modeset_unlock_all(ddev);
-
- /* enable hot-plug polling */
- drm_kms_helper_poll_enable(ddev);
-
- DPU_ATRACE_END("kms_pm_resume");
- return 0;
-}
-
static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode)
@@ -858,10 +674,30 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
encoder->base.id, rc);
}
+static irqreturn_t dpu_irq(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ return dpu_core_irq(dpu_kms);
+}
+
+static void dpu_irq_preinstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ dpu_core_irq_preinstall(dpu_kms);
+}
+
+static void dpu_irq_uninstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ dpu_core_irq_uninstall(dpu_kms);
+}
+
static const struct msm_kms_funcs kms_funcs = {
.hw_init = dpu_kms_hw_init,
.irq_preinstall = dpu_irq_preinstall,
- .irq_postinstall = dpu_irq_postinstall,
.irq_uninstall = dpu_irq_uninstall,
.irq = dpu_irq,
.prepare_commit = dpu_kms_prepare_commit,
@@ -873,8 +709,6 @@ static const struct msm_kms_funcs kms_funcs = {
.check_modified_format = dpu_format_check_modified_format,
.get_format = dpu_get_msm_format,
.round_pixclk = dpu_kms_round_pixclk,
- .pm_suspend = dpu_kms_pm_suspend,
- .pm_resume = dpu_kms_pm_resume,
.destroy = dpu_kms_destroy,
.set_encoder_mode = _dpu_kms_set_encoder_mode,
#ifdef CONFIG_DEBUG_FS
@@ -882,12 +716,6 @@ static const struct msm_kms_funcs kms_funcs = {
#endif
};
-/* the caller api needs to turn on clock before calling it */
-static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms)
-{
- dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
-}
-
static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
{
struct msm_mmu *mmu;
@@ -911,6 +739,9 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
if (!domain)
return 0;
+ domain->geometry.aperture_start = 0x1000;
+ domain->geometry.aperture_end = 0xffffffff;
+
aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
domain, "dpu1");
if (IS_ERR(aspace)) {
@@ -960,16 +791,6 @@ u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
return clk_get_rate(clk->clk);
}
-static void dpu_kms_handle_power_event(u32 event_type, void *usr)
-{
- struct dpu_kms *dpu_kms = usr;
-
- if (!dpu_kms)
- return;
-
- dpu_vbif_init_memtypes(dpu_kms);
-}
-
static int dpu_kms_hw_init(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms;
@@ -979,26 +800,20 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
if (!kms) {
DPU_ERROR("invalid kms\n");
- goto end;
+ return rc;
}
dpu_kms = to_dpu_kms(kms);
dev = dpu_kms->dev;
if (!dev) {
DPU_ERROR("invalid device\n");
- goto end;
- }
-
- rc = dpu_dbg_init(&dpu_kms->pdev->dev);
- if (rc) {
- DRM_ERROR("failed to init dpu dbg: %d\n", rc);
- goto end;
+ return rc;
}
priv = dev->dev_private;
if (!priv) {
DPU_ERROR("invalid private data\n");
- goto dbg_destroy;
+ return rc;
}
dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
@@ -1036,20 +851,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
}
- dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle,
- "core");
- if (IS_ERR_OR_NULL(dpu_kms->core_client)) {
- rc = PTR_ERR(dpu_kms->core_client);
- if (!dpu_kms->core_client)
- rc = -EINVAL;
- DPU_ERROR("dpu power client create failed: %d\n", rc);
- dpu_kms->core_client = NULL;
- goto error;
- }
-
pm_runtime_get_sync(&dpu_kms->pdev->dev);
- _dpu_kms_core_hw_rev_init(dpu_kms);
+ dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
@@ -1063,8 +867,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto power_error;
}
- dpu_dbg_init_dbg_buses(dpu_kms->core_rev);
-
/*
* Now we need to read the HW catalog and initialize resources such as
* clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
@@ -1110,7 +912,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
}
rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
- &dpu_kms->phandle,
_dpu_kms_get_clk(dpu_kms, "core"));
if (rc) {
DPU_ERROR("failed to init perf %d\n", rc);
@@ -1151,13 +952,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
*/
dev->mode_config.allow_fb_modifiers = true;
- /*
- * Handle (re)initializations during power enable
- */
- dpu_kms_handle_power_event(DPU_POWER_EVENT_ENABLE, dpu_kms);
- dpu_kms->power_event = dpu_power_handle_register_event(
- &dpu_kms->phandle, DPU_POWER_EVENT_ENABLE,
- dpu_kms_handle_power_event, dpu_kms, "kms");
+ dpu_vbif_init_memtypes(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
@@ -1171,9 +966,7 @@ power_error:
pm_runtime_put_sync(&dpu_kms->pdev->dev);
error:
_dpu_kms_hw_destroy(dpu_kms);
-dbg_destroy:
- dpu_dbg_destroy();
-end:
+
return rc;
}
@@ -1221,8 +1014,6 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- dpu_power_resource_init(pdev, &dpu_kms->phandle);
-
platform_set_drvdata(pdev, dpu_kms);
msm_kms_init(&dpu_kms->base, &kms_funcs);
@@ -1242,7 +1033,6 @@ static void dpu_unbind(struct device *dev, struct device *master, void *data)
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
struct dss_module_power *mp = &dpu_kms->mp;
- dpu_power_resource_deinit(pdev, &dpu_kms->phandle);
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
mp->num_clk = 0;
@@ -1278,19 +1068,13 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
ddev = dpu_kms->dev;
if (!ddev) {
DPU_ERROR("invalid drm_device\n");
- goto exit;
+ return rc;
}
- rc = dpu_power_resource_enable(&dpu_kms->phandle,
- dpu_kms->core_client, false);
- if (rc)
- DPU_ERROR("resource disable failed: %d\n", rc);
-
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
if (rc)
DPU_ERROR("clock disable failed rc:%d\n", rc);
-exit:
return rc;
}
@@ -1299,27 +1083,27 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
int rc = -1;
struct platform_device *pdev = to_platform_device(dev);
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct drm_encoder *encoder;
struct drm_device *ddev;
struct dss_module_power *mp = &dpu_kms->mp;
ddev = dpu_kms->dev;
if (!ddev) {
DPU_ERROR("invalid drm_device\n");
- goto exit;
+ return rc;
}
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (rc) {
DPU_ERROR("clock enable failed rc:%d\n", rc);
- goto exit;
+ return rc;
}
- rc = dpu_power_resource_enable(&dpu_kms->phandle,
- dpu_kms->core_client, true);
- if (rc)
- DPU_ERROR("resource enable failed: %d\n", rc);
+ dpu_vbif_init_memtypes(dpu_kms);
+
+ drm_for_each_encoder(encoder, ddev)
+ dpu_encoder_virt_runtime_resume(encoder);
-exit:
return rc;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 66d466628e2b..ac75cfc267f4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -23,15 +23,13 @@
#include "msm_kms.h"
#include "msm_mmu.h"
#include "msm_gem.h"
-#include "dpu_dbg.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_interrupts.h"
#include "dpu_hw_top.h"
+#include "dpu_io_util.h"
#include "dpu_rm.h"
-#include "dpu_power_handle.h"
-#include "dpu_irq.h"
#include "dpu_core_perf.h"
#define DRMID(x) ((x) ? (x)->base.id : -1)
@@ -104,7 +102,6 @@ struct dpu_irq {
atomic_t *enable_counts;
atomic_t *irq_counts;
spinlock_t cb_lock;
- struct dentry *debugfs_file;
};
struct dpu_kms {
@@ -113,15 +110,6 @@ struct dpu_kms {
int core_rev;
struct dpu_mdss_cfg *catalog;
- struct dpu_power_handle phandle;
- struct dpu_power_client *core_client;
- struct dpu_power_event *power_event;
-
- /* directory entry for debugfs */
- struct dentry *debugfs_root;
- struct dentry *debugfs_danger;
- struct dentry *debugfs_vbif;
-
/* io/register spaces: */
void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len;
@@ -135,10 +123,6 @@ struct dpu_kms {
struct dpu_core_perf perf;
- /* saved atomic state during system suspend */
- struct drm_atomic_state *suspend_state;
- bool suspend_block;
-
struct dpu_rm rm;
bool rm_init;
@@ -164,33 +148,6 @@ struct vsync_info {
((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
/**
- * dpu_kms_is_suspend_state - whether or not the system is pm suspended
- * @dev: Pointer to drm device
- * Return: Suspend status
- */
-static inline bool dpu_kms_is_suspend_state(struct drm_device *dev)
-{
- if (!ddev_to_msm_kms(dev))
- return false;
-
- return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_state != NULL;
-}
-
-/**
- * dpu_kms_is_suspend_blocked - whether or not commits are blocked due to pm
- * suspend status
- * @dev: Pointer to drm device
- * Return: True if commits should be rejected due to pm suspend
- */
-static inline bool dpu_kms_is_suspend_blocked(struct drm_device *dev)
-{
- if (!dpu_kms_is_suspend_state(dev))
- return false;
-
- return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_block;
-}
-
-/**
* Debugfs functions - extra helper functions for debugfs support
*
* Main debugfs documentation is located at,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index 2235ef8129f4..cb307a2abf06 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -9,6 +9,11 @@
#define HW_INTR_STATUS 0x0010
+struct dpu_irq_controller {
+ unsigned long enabled_mask;
+ struct irq_domain *domain;
+};
+
struct dpu_mdss {
struct msm_mdss base;
void __iomem *mmio;
@@ -115,13 +120,12 @@ static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
return 0;
}
-static int _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
+static void _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
{
if (dpu_mdss->irq_controller.domain) {
irq_domain_remove(dpu_mdss->irq_controller.domain);
dpu_mdss->irq_controller.domain = NULL;
}
- return 0;
}
static int dpu_mdss_enable(struct msm_mdss *mdss)
{
@@ -156,18 +160,16 @@ static void dpu_mdss_destroy(struct drm_device *dev)
struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
+ pm_runtime_suspend(dev->dev);
+ pm_runtime_disable(dev->dev);
_dpu_mdss_irq_domain_fini(dpu_mdss);
-
free_irq(platform_get_irq(pdev, 0), dpu_mdss);
-
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
if (dpu_mdss->mmio)
devm_iounmap(&pdev->dev, dpu_mdss->mmio);
dpu_mdss->mmio = NULL;
-
- pm_runtime_disable(dev->dev);
priv->mdss = NULL;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index d77a8cb15404..fd75870eb17f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -137,7 +137,7 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
* @src_wdith: width of source buffer
* Return: fill level corresponding to the source buffer/format or 0 if error
*/
-static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
+static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
const struct dpu_format *fmt, u32 src_width)
{
struct dpu_plane *pdpu, *tmp;
@@ -430,24 +430,14 @@ static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
}
-/**
- * _dpu_plane_get_aspace: gets the address space
- */
-static inline struct msm_gem_address_space *_dpu_plane_get_aspace(
- struct dpu_plane *pdpu)
-{
- struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
-
- return kms->base.aspace;
-}
-
-static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
+static void _dpu_plane_set_scanout(struct drm_plane *plane,
struct dpu_plane_state *pstate,
struct dpu_hw_pipe_cfg *pipe_cfg,
struct drm_framebuffer *fb)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
- struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu);
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
+ struct msm_gem_address_space *aspace = kms->base.aspace;
int ret;
ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
@@ -525,7 +515,7 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
scale_cfg->enable = 1;
}
-static inline void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
+static void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
{
static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
{
@@ -801,7 +791,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_gem_object *obj;
struct msm_gem_object *msm_obj;
struct dma_fence *fence;
- struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu);
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
if (!new_state->fb)
@@ -810,7 +800,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
/* cache aspace */
- pstate->aspace = aspace;
+ pstate->aspace = kms->base.aspace;
/*
* TODO: Need to sort out the msm_framebuffer_prepare() call below so
@@ -1191,19 +1181,8 @@ static void dpu_plane_destroy(struct drm_plane *plane)
static void dpu_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct dpu_plane_state *pstate;
-
- if (!plane || !state) {
- DPU_ERROR("invalid arg(s), plane %d state %d\n",
- plane != 0, state != 0);
- return;
- }
-
- pstate = to_dpu_plane_state(state);
-
__drm_atomic_helper_plane_destroy_state(state);
-
- kfree(pstate);
+ kfree(to_dpu_plane_state(state));
}
static struct drm_plane_state *
@@ -1273,26 +1252,12 @@ static ssize_t _dpu_plane_danger_read(struct file *file,
char __user *buff, size_t count, loff_t *ppos)
{
struct dpu_kms *kms = file->private_data;
- struct dpu_mdss_cfg *cfg = kms->catalog;
- int len = 0;
- char buf[40] = {'\0'};
-
- if (!cfg)
- return -ENODEV;
+ int len;
+ char buf[40];
- if (*ppos)
- return 0; /* the end */
-
- len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
- if (len < 0 || len >= sizeof(buf))
- return 0;
+ len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
- if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
- return -EFAULT;
-
- *ppos += len; /* increase offset */
-
- return len;
+ return simple_read_from_buffer(buff, count, ppos, buf, len);
}
static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
@@ -1322,23 +1287,12 @@ static ssize_t _dpu_plane_danger_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
struct dpu_kms *kms = file->private_data;
- struct dpu_mdss_cfg *cfg = kms->catalog;
int disable_panic;
- char buf[10];
-
- if (!cfg)
- return -EFAULT;
-
- if (count >= sizeof(buf))
- return -EFAULT;
-
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
- buf[count] = 0; /* end of string */
+ int ret;
- if (kstrtoint(buf, 0, &disable_panic))
- return -EFAULT;
+ ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
+ if (ret)
+ return ret;
if (disable_panic) {
/* Disable panic signal for all active pipes */
@@ -1363,33 +1317,10 @@ static const struct file_operations dpu_plane_danger_enable = {
static int _dpu_plane_init_debugfs(struct drm_plane *plane)
{
- struct dpu_plane *pdpu;
- struct dpu_kms *kms;
- struct msm_drm_private *priv;
- const struct dpu_sspp_sub_blks *sblk = 0;
- const struct dpu_sspp_cfg *cfg = 0;
-
- if (!plane || !plane->dev) {
- DPU_ERROR("invalid arguments\n");
- return -EINVAL;
- }
-
- priv = plane->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid KMS reference\n");
- return -EINVAL;
- }
-
- kms = to_dpu_kms(priv->kms);
- pdpu = to_dpu_plane(plane);
-
- if (pdpu && pdpu->pipe_hw)
- cfg = pdpu->pipe_hw->cap;
- if (cfg)
- sblk = cfg->sblk;
-
- if (!sblk)
- return 0;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_kms *kms = _dpu_plane_get_kms(plane);
+ const struct dpu_sspp_cfg *cfg = pdpu->pipe_hw->cap;
+ const struct dpu_sspp_sub_blks *sblk = cfg->sblk;
/* create overall sub-directory for the pipe */
pdpu->debugfs_root =
@@ -1460,25 +1391,11 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane)
return 0;
}
-
-static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
-{
- struct dpu_plane *pdpu;
-
- if (!plane)
- return;
- pdpu = to_dpu_plane(plane);
-
- debugfs_remove_recursive(pdpu->debugfs_root);
-}
#else
static int _dpu_plane_init_debugfs(struct drm_plane *plane)
{
return 0;
}
-static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
-{
-}
#endif
static int dpu_plane_late_register(struct drm_plane *plane)
@@ -1488,7 +1405,9 @@ static int dpu_plane_late_register(struct drm_plane *plane)
static void dpu_plane_early_unregister(struct drm_plane *plane)
{
- _dpu_plane_destroy_debugfs(plane);
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+
+ debugfs_remove_recursive(pdpu->debugfs_root);
}
static const struct drm_plane_funcs dpu_plane_funcs = {
@@ -1537,7 +1456,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
if (!pdpu) {
DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
ret = -ENOMEM;
- goto exit;
+ return ERR_PTR(ret);
}
/* cache local stuff for later */
@@ -1623,6 +1542,5 @@ clean_sspp:
dpu_hw_sspp_destroy(pdpu->pipe_hw);
clean_plane:
kfree(pdpu);
-exit:
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
deleted file mode 100644
index fc14116789f2..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
+++ /dev/null
@@ -1,240 +0,0 @@
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) "[drm:%s:%d]: " fmt, __func__, __LINE__
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/string.h>
-#include <linux/of_address.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/of_platform.h>
-
-#include "dpu_power_handle.h"
-#include "dpu_trace.h"
-
-static const char *data_bus_name[DPU_POWER_HANDLE_DBUS_ID_MAX] = {
- [DPU_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,dpu-data-bus",
- [DPU_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,dpu-llcc-bus",
- [DPU_POWER_HANDLE_DBUS_ID_EBI] = "qcom,dpu-ebi-bus",
-};
-
-const char *dpu_power_handle_get_dbus_name(u32 bus_id)
-{
- if (bus_id < DPU_POWER_HANDLE_DBUS_ID_MAX)
- return data_bus_name[bus_id];
-
- return NULL;
-}
-
-static void dpu_power_event_trigger_locked(struct dpu_power_handle *phandle,
- u32 event_type)
-{
- struct dpu_power_event *event;
-
- list_for_each_entry(event, &phandle->event_list, list) {
- if (event->event_type & event_type)
- event->cb_fnc(event_type, event->usr);
- }
-}
-
-struct dpu_power_client *dpu_power_client_create(
- struct dpu_power_handle *phandle, char *client_name)
-{
- struct dpu_power_client *client;
- static u32 id;
-
- if (!client_name || !phandle) {
- pr_err("client name is null or invalid power data\n");
- return ERR_PTR(-EINVAL);
- }
-
- client = kzalloc(sizeof(struct dpu_power_client), GFP_KERNEL);
- if (!client)
- return ERR_PTR(-ENOMEM);
-
- mutex_lock(&phandle->phandle_lock);
- strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
- client->usecase_ndx = VOTE_INDEX_DISABLE;
- client->id = id;
- client->active = true;
- pr_debug("client %s created:%pK id :%d\n", client_name,
- client, id);
- id++;
- list_add(&client->list, &phandle->power_client_clist);
- mutex_unlock(&phandle->phandle_lock);
-
- return client;
-}
-
-void dpu_power_client_destroy(struct dpu_power_handle *phandle,
- struct dpu_power_client *client)
-{
- if (!client || !phandle) {
- pr_err("reg bus vote: invalid client handle\n");
- } else if (!client->active) {
- pr_err("dpu power deinit already done\n");
- kfree(client);
- } else {
- pr_debug("bus vote client %s destroyed:%pK id:%u\n",
- client->name, client, client->id);
- mutex_lock(&phandle->phandle_lock);
- list_del_init(&client->list);
- mutex_unlock(&phandle->phandle_lock);
- kfree(client);
- }
-}
-
-void dpu_power_resource_init(struct platform_device *pdev,
- struct dpu_power_handle *phandle)
-{
- phandle->dev = &pdev->dev;
-
- INIT_LIST_HEAD(&phandle->power_client_clist);
- INIT_LIST_HEAD(&phandle->event_list);
-
- mutex_init(&phandle->phandle_lock);
-}
-
-void dpu_power_resource_deinit(struct platform_device *pdev,
- struct dpu_power_handle *phandle)
-{
- struct dpu_power_client *curr_client, *next_client;
- struct dpu_power_event *curr_event, *next_event;
-
- if (!phandle || !pdev) {
- pr_err("invalid input param\n");
- return;
- }
-
- mutex_lock(&phandle->phandle_lock);
- list_for_each_entry_safe(curr_client, next_client,
- &phandle->power_client_clist, list) {
- pr_err("client:%s-%d still registered with refcount:%d\n",
- curr_client->name, curr_client->id,
- curr_client->refcount);
- curr_client->active = false;
- list_del(&curr_client->list);
- }
-
- list_for_each_entry_safe(curr_event, next_event,
- &phandle->event_list, list) {
- pr_err("event:%d, client:%s still registered\n",
- curr_event->event_type,
- curr_event->client_name);
- curr_event->active = false;
- list_del(&curr_event->list);
- }
- mutex_unlock(&phandle->phandle_lock);
-}
-
-int dpu_power_resource_enable(struct dpu_power_handle *phandle,
- struct dpu_power_client *pclient, bool enable)
-{
- bool changed = false;
- u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
- struct dpu_power_client *client;
- u32 event_type;
-
- if (!phandle || !pclient) {
- pr_err("invalid input argument\n");
- return -EINVAL;
- }
-
- mutex_lock(&phandle->phandle_lock);
- if (enable)
- pclient->refcount++;
- else if (pclient->refcount)
- pclient->refcount--;
-
- if (pclient->refcount)
- pclient->usecase_ndx = VOTE_INDEX_LOW;
- else
- pclient->usecase_ndx = VOTE_INDEX_DISABLE;
-
- list_for_each_entry(client, &phandle->power_client_clist, list) {
- if (client->usecase_ndx < VOTE_INDEX_MAX &&
- client->usecase_ndx > max_usecase_ndx)
- max_usecase_ndx = client->usecase_ndx;
- }
-
- if (phandle->current_usecase_ndx != max_usecase_ndx) {
- changed = true;
- prev_usecase_ndx = phandle->current_usecase_ndx;
- phandle->current_usecase_ndx = max_usecase_ndx;
- }
-
- pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
- __builtin_return_address(0), changed, max_usecase_ndx,
- pclient->name, pclient->id, enable, pclient->refcount);
-
- if (!changed)
- goto end;
-
- event_type = enable ? DPU_POWER_EVENT_ENABLE : DPU_POWER_EVENT_DISABLE;
-
- dpu_power_event_trigger_locked(phandle, event_type);
-end:
- mutex_unlock(&phandle->phandle_lock);
- return 0;
-}
-
-struct dpu_power_event *dpu_power_handle_register_event(
- struct dpu_power_handle *phandle,
- u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
- void *usr, char *client_name)
-{
- struct dpu_power_event *event;
-
- if (!phandle) {
- pr_err("invalid power handle\n");
- return ERR_PTR(-EINVAL);
- } else if (!cb_fnc || !event_type) {
- pr_err("no callback fnc or event type\n");
- return ERR_PTR(-EINVAL);
- }
-
- event = kzalloc(sizeof(struct dpu_power_event), GFP_KERNEL);
- if (!event)
- return ERR_PTR(-ENOMEM);
-
- event->event_type = event_type;
- event->cb_fnc = cb_fnc;
- event->usr = usr;
- strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
- event->active = true;
-
- mutex_lock(&phandle->phandle_lock);
- list_add(&event->list, &phandle->event_list);
- mutex_unlock(&phandle->phandle_lock);
-
- return event;
-}
-
-void dpu_power_handle_unregister_event(
- struct dpu_power_handle *phandle,
- struct dpu_power_event *event)
-{
- if (!phandle || !event) {
- pr_err("invalid phandle or event\n");
- } else if (!event->active) {
- pr_err("power handle deinit already done\n");
- kfree(event);
- } else {
- mutex_lock(&phandle->phandle_lock);
- list_del_init(&event->list);
- mutex_unlock(&phandle->phandle_lock);
- kfree(event);
- }
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
deleted file mode 100644
index a65b7a297f21..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
+++ /dev/null
@@ -1,217 +0,0 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _DPU_POWER_HANDLE_H_
-#define _DPU_POWER_HANDLE_H_
-
-#define MAX_CLIENT_NAME_LEN 128
-
-#define DPU_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 0
-#define DPU_POWER_HANDLE_DISABLE_BUS_AB_QUOTA 0
-#define DPU_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 1600000000
-#define DPU_POWER_HANDLE_DISABLE_BUS_IB_QUOTA 0
-
-#include "dpu_io_util.h"
-
-/* events will be triggered on power handler enable/disable */
-#define DPU_POWER_EVENT_DISABLE BIT(0)
-#define DPU_POWER_EVENT_ENABLE BIT(1)
-
-/**
- * mdss_bus_vote_type: register bus vote type
- * VOTE_INDEX_DISABLE: removes the client vote
- * VOTE_INDEX_LOW: keeps the lowest vote for register bus
- * VOTE_INDEX_MAX: invalid
- */
-enum mdss_bus_vote_type {
- VOTE_INDEX_DISABLE,
- VOTE_INDEX_LOW,
- VOTE_INDEX_MAX,
-};
-
-/**
- * enum dpu_power_handle_data_bus_client - type of axi bus clients
- * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
- * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
- * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
- */
-enum dpu_power_handle_data_bus_client {
- DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT,
- DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
- DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX
-};
-
-/**
- * enum DPU_POWER_HANDLE_DBUS_ID - data bus identifier
- * @DPU_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
- * @DPU_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
- * @DPU_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
- */
-enum DPU_POWER_HANDLE_DBUS_ID {
- DPU_POWER_HANDLE_DBUS_ID_MNOC,
- DPU_POWER_HANDLE_DBUS_ID_LLCC,
- DPU_POWER_HANDLE_DBUS_ID_EBI,
- DPU_POWER_HANDLE_DBUS_ID_MAX,
-};
-
-/**
- * struct dpu_power_client: stores the power client for dpu driver
- * @name: name of the client
- * @usecase_ndx: current regs bus vote type
- * @refcount: current refcount if multiple modules are using same
- * same client for enable/disable. Power module will
- * aggregate the refcount and vote accordingly for this
- * client.
- * @id: assigned during create. helps for debugging.
- * @list: list to attach power handle master list
- * @ab: arbitrated bandwidth for each bus client
- * @ib: instantaneous bandwidth for each bus client
- * @active: inidcates the state of dpu power handle
- */
-struct dpu_power_client {
- char name[MAX_CLIENT_NAME_LEN];
- short usecase_ndx;
- short refcount;
- u32 id;
- struct list_head list;
- u64 ab[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
- u64 ib[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
- bool active;
-};
-
-/*
- * struct dpu_power_event - local event registration structure
- * @client_name: name of the client registering
- * @cb_fnc: pointer to desired callback function
- * @usr: user pointer to pass to callback event trigger
- * @event: refer to DPU_POWER_HANDLE_EVENT_*
- * @list: list to attach event master list
- * @active: indicates the state of dpu power handle
- */
-struct dpu_power_event {
- char client_name[MAX_CLIENT_NAME_LEN];
- void (*cb_fnc)(u32 event_type, void *usr);
- void *usr;
- u32 event_type;
- struct list_head list;
- bool active;
-};
-
-/**
- * struct dpu_power_handle: power handle main struct
- * @client_clist: master list to store all clients
- * @phandle_lock: lock to synchronize the enable/disable
- * @dev: pointer to device structure
- * @usecase_ndx: current usecase index
- * @event_list: current power handle event list
- */
-struct dpu_power_handle {
- struct list_head power_client_clist;
- struct mutex phandle_lock;
- struct device *dev;
- u32 current_usecase_ndx;
- struct list_head event_list;
-};
-
-/**
- * dpu_power_resource_init() - initializes the dpu power handle
- * @pdev: platform device to search the power resources
- * @pdata: power handle to store the power resources
- */
-void dpu_power_resource_init(struct platform_device *pdev,
- struct dpu_power_handle *pdata);
-
-/**
- * dpu_power_resource_deinit() - release the dpu power handle
- * @pdev: platform device for power resources
- * @pdata: power handle containing the resources
- *
- * Return: error code.
- */
-void dpu_power_resource_deinit(struct platform_device *pdev,
- struct dpu_power_handle *pdata);
-
-/**
- * dpu_power_client_create() - create the client on power handle
- * @pdata: power handle containing the resources
- * @client_name: new client name for registration
- *
- * Return: error code.
- */
-struct dpu_power_client *dpu_power_client_create(struct dpu_power_handle *pdata,
- char *client_name);
-
-/**
- * dpu_power_client_destroy() - destroy the client on power handle
- * @pdata: power handle containing the resources
- * @client_name: new client name for registration
- *
- * Return: none
- */
-void dpu_power_client_destroy(struct dpu_power_handle *phandle,
- struct dpu_power_client *client);
-
-/**
- * dpu_power_resource_enable() - enable/disable the power resources
- * @pdata: power handle containing the resources
- * @client: client information to enable/disable its vote
- * @enable: boolean request for enable/disable
- *
- * Return: error code.
- */
-int dpu_power_resource_enable(struct dpu_power_handle *pdata,
- struct dpu_power_client *pclient, bool enable);
-
-/**
- * dpu_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
- * @phandle: power handle containing the resources
- * @client: client information to bandwidth control
- * @enable: true to enable bandwidth for data base
- *
- * Return: none
- */
-void dpu_power_data_bus_bandwidth_ctrl(struct dpu_power_handle *phandle,
- struct dpu_power_client *pclient, int enable);
-
-/**
- * dpu_power_handle_register_event - register a callback function for an event.
- * Clients can register for multiple events with a single register.
- * Any block with access to phandle can register for the event
- * notification.
- * @phandle: power handle containing the resources
- * @event_type: event type to register; refer DPU_POWER_HANDLE_EVENT_*
- * @cb_fnc: pointer to desired callback function
- * @usr: user pointer to pass to callback on event trigger
- *
- * Return: event pointer if success, or error code otherwise
- */
-struct dpu_power_event *dpu_power_handle_register_event(
- struct dpu_power_handle *phandle,
- u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
- void *usr, char *client_name);
-/**
- * dpu_power_handle_unregister_event - unregister callback for event(s)
- * @phandle: power handle containing the resources
- * @event: event pointer returned after power handle register
- */
-void dpu_power_handle_unregister_event(struct dpu_power_handle *phandle,
- struct dpu_power_event *event);
-
-/**
- * dpu_power_handle_get_dbus_name - get name of given data bus identifier
- * @bus_id: data bus identifier
- * Return: Pointer to name string if success; NULL otherwise
- */
-const char *dpu_power_handle_get_dbus_name(u32 bus_id);
-
-#endif /* _DPU_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index e12c4cefb742..c78b521ceda1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -99,27 +99,6 @@ TRACE_EVENT(dpu_perf_set_ot,
__entry->vbif_idx)
)
-TRACE_EVENT(dpu_perf_update_bus,
- TP_PROTO(int client, unsigned long long ab_quota,
- unsigned long long ib_quota),
- TP_ARGS(client, ab_quota, ib_quota),
- TP_STRUCT__entry(
- __field(int, client)
- __field(u64, ab_quota)
- __field(u64, ib_quota)
- ),
- TP_fast_assign(
- __entry->client = client;
- __entry->ab_quota = ab_quota;
- __entry->ib_quota = ib_quota;
- ),
- TP_printk("Request client:%d ab=%llu ib=%llu",
- __entry->client,
- __entry->ab_quota,
- __entry->ib_quota)
-)
-
-
TRACE_EVENT(dpu_cmd_release_bw,
TP_PROTO(u32 crtc_id),
TP_ARGS(crtc_id),
@@ -319,6 +298,10 @@ DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_wait_for_commit_done,
TP_PROTO(uint32_t drm_id),
TP_ARGS(drm_id)
);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_runtime_resume,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
TRACE_EVENT(dpu_enc_enable,
TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
@@ -539,10 +522,6 @@ DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_cb,
TP_PROTO(uint32_t drm_id, u32 event),
TP_ARGS(drm_id, event)
);
-DEFINE_EVENT(dpu_id_event_template, dpu_crtc_handle_power_event,
- TP_PROTO(uint32_t drm_id, u32 event),
- TP_ARGS(drm_id, event)
-);
DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_done,
TP_PROTO(uint32_t drm_id, u32 event),
TP_ARGS(drm_id, event)
@@ -749,24 +728,17 @@ TRACE_EVENT(dpu_crtc_vblank_enable,
__field( uint32_t, enc_id )
__field( bool, enable )
__field( bool, enabled )
- __field( bool, suspend )
- __field( bool, vblank_requested )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->enc_id = enc_id;
__entry->enable = enable;
__entry->enabled = crtc->enabled;
- __entry->suspend = crtc->suspend;
- __entry->vblank_requested = crtc->vblank_requested;
),
- TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s "
- "vblank_req:%s}",
+ TP_printk("id:%u encoder:%u enable:%s state{enabled:%s}",
__entry->drm_id, __entry->enc_id,
__entry->enable ? "true" : "false",
- __entry->enabled ? "true" : "false",
- __entry->suspend ? "true" : "false",
- __entry->vblank_requested ? "true" : "false")
+ __entry->enabled ? "true" : "false")
);
DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
@@ -776,25 +748,15 @@ DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
__field( uint32_t, drm_id )
__field( bool, enable )
__field( bool, enabled )
- __field( bool, suspend )
- __field( bool, vblank_requested )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->enable = enable;
__entry->enabled = crtc->enabled;
- __entry->suspend = crtc->suspend;
- __entry->vblank_requested = crtc->vblank_requested;
),
- TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}",
+ TP_printk("id:%u enable:%s state{enabled:%s}",
__entry->drm_id, __entry->enable ? "true" : "false",
- __entry->enabled ? "true" : "false",
- __entry->suspend ? "true" : "false",
- __entry->vblank_requested ? "true" : "false")
-);
-DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend,
- TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
- TP_ARGS(drm_id, enable, crtc)
+ __entry->enabled ? "true" : "false")
);
DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_enable,
TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
@@ -1004,6 +966,53 @@ TRACE_EVENT(dpu_core_perf_update_clk,
__entry->stop_req ? "true" : "false", __entry->clk_rate)
);
+TRACE_EVENT(dpu_hw_ctl_update_pending_flush,
+ TP_PROTO(u32 new_bits, u32 pending_mask),
+ TP_ARGS(new_bits, pending_mask),
+ TP_STRUCT__entry(
+ __field( u32, new_bits )
+ __field( u32, pending_mask )
+ ),
+ TP_fast_assign(
+ __entry->new_bits = new_bits;
+ __entry->pending_mask = pending_mask;
+ ),
+ TP_printk("new=%x existing=%x", __entry->new_bits,
+ __entry->pending_mask)
+);
+
+DECLARE_EVENT_CLASS(dpu_hw_ctl_pending_flush_template,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush),
+ TP_STRUCT__entry(
+ __field( u32, pending_mask )
+ __field( u32, ctl_flush )
+ ),
+ TP_fast_assign(
+ __entry->pending_mask = pending_mask;
+ __entry->ctl_flush = ctl_flush;
+ ),
+ TP_printk("pending_mask=%x CTL_FLUSH=%x", __entry->pending_mask,
+ __entry->ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_clear_pending_flush,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template,
+ dpu_hw_ctl_trigger_pending_flush,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_prepare,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_start,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+
#define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
#define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
#define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 295528292296..ef753ea9c499 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -191,7 +191,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
if (ot_lim == 0)
- goto exit;
+ return;
trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
params->vbif_idx);
@@ -210,8 +210,6 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
if (forced_on)
mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
-exit:
- return;
}
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
@@ -312,31 +310,25 @@ void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
}
#ifdef CONFIG_DEBUG_FS
-void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
-{
- debugfs_remove_recursive(dpu_kms->debugfs_vbif);
- dpu_kms->debugfs_vbif = NULL;
-}
-int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
+void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
{
char vbif_name[32];
- struct dentry *debugfs_vbif;
+ struct dentry *entry, *debugfs_vbif;
int i, j;
- dpu_kms->debugfs_vbif = debugfs_create_dir("vbif", debugfs_root);
- if (!dpu_kms->debugfs_vbif) {
- DPU_ERROR("failed to create vbif debugfs\n");
- return -EINVAL;
- }
+ entry = debugfs_create_dir("vbif", debugfs_root);
+ if (IS_ERR_OR_NULL(entry))
+ return;
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
- debugfs_vbif = debugfs_create_dir(vbif_name,
- dpu_kms->debugfs_vbif);
+ debugfs_vbif = debugfs_create_dir(vbif_name, entry);
+ if (IS_ERR_OR_NULL(debugfs_vbif))
+ continue;
debugfs_create_u32("features", 0600, debugfs_vbif,
(u32 *)&vbif->features);
@@ -378,7 +370,5 @@ int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
(u32 *)&cfg->ot_limit);
}
}
-
- return 0;
}
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
index f17af52dbbd5..6356876d7a66 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
@@ -78,17 +78,6 @@ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
*/
void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
-#ifdef CONFIG_DEBUG_FS
-int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
-void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms);
-#else
-static inline int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms,
- struct dentry *debugfs_root)
-{
- return 0;
-}
-static inline void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
-{
-}
-#endif
+void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+
#endif /* __DPU_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
index 4f12e5c534c8..9fc9dbde8a27 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
@@ -813,18 +813,6 @@ enum color_fmts {
#define COLOR_FMT_P010_UBWC COLOR_FMT_P010_UBWC
#define COLOR_FMT_P010 COLOR_FMT_P010
-static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
-{
- (void)height;
- (void)width;
-
- /*
- * In the future, calculate the size based on the w/h but just
- * hardcode it for now since 16K satisfies all current usecases.
- */
- return 16 * 1024;
-}
-
/*
* Function arguments:
* @color_fmt
@@ -832,38 +820,32 @@ static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
* Progressive: width
* Interlaced: width
*/
-static inline unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
{
- unsigned int alignment, stride = 0;
+ unsigned int stride = 0;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV21:
case COLOR_FMT_NV12:
case COLOR_FMT_NV12_MVTB:
case COLOR_FMT_NV12_UBWC:
- alignment = 128;
- stride = MSM_MEDIA_ALIGN(width, alignment);
+ stride = MSM_MEDIA_ALIGN(width, 128);
break;
case COLOR_FMT_NV12_BPP10_UBWC:
- alignment = 256;
stride = MSM_MEDIA_ALIGN(width, 192);
- stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+ stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256);
break;
case COLOR_FMT_P010_UBWC:
- alignment = 256;
- stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+ stride = MSM_MEDIA_ALIGN(width * 2, 256);
break;
case COLOR_FMT_P010:
- alignment = 128;
- stride = MSM_MEDIA_ALIGN(width*2, alignment);
- break;
- default:
+ stride = MSM_MEDIA_ALIGN(width * 2, 128);
break;
}
-invalid_input:
+
return stride;
}
@@ -874,38 +856,32 @@ invalid_input:
* Progressive: width
* Interlaced: width
*/
-static inline unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
{
- unsigned int alignment, stride = 0;
+ unsigned int stride = 0;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV21:
case COLOR_FMT_NV12:
case COLOR_FMT_NV12_MVTB:
case COLOR_FMT_NV12_UBWC:
- alignment = 128;
- stride = MSM_MEDIA_ALIGN(width, alignment);
+ stride = MSM_MEDIA_ALIGN(width, 128);
break;
case COLOR_FMT_NV12_BPP10_UBWC:
- alignment = 256;
stride = MSM_MEDIA_ALIGN(width, 192);
- stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+ stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256);
break;
case COLOR_FMT_P010_UBWC:
- alignment = 256;
- stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+ stride = MSM_MEDIA_ALIGN(width * 2, 256);
break;
case COLOR_FMT_P010:
- alignment = 128;
- stride = MSM_MEDIA_ALIGN(width*2, alignment);
- break;
- default:
+ stride = MSM_MEDIA_ALIGN(width * 2, 128);
break;
}
-invalid_input:
+
return stride;
}
@@ -916,12 +892,12 @@ invalid_input:
* Progressive: height
* Interlaced: (height+1)>>1
*/
-static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
{
- unsigned int alignment, sclines = 0;
+ unsigned int sclines = 0;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV21:
@@ -929,17 +905,14 @@ static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
case COLOR_FMT_NV12_MVTB:
case COLOR_FMT_NV12_UBWC:
case COLOR_FMT_P010:
- alignment = 32;
+ sclines = MSM_MEDIA_ALIGN(height, 32);
break;
case COLOR_FMT_NV12_BPP10_UBWC:
case COLOR_FMT_P010_UBWC:
- alignment = 16;
+ sclines = MSM_MEDIA_ALIGN(height, 16);
break;
- default:
- return 0;
}
- sclines = MSM_MEDIA_ALIGN(height, alignment);
-invalid_input:
+
return sclines;
}
@@ -950,12 +923,12 @@ invalid_input:
* Progressive: height
* Interlaced: (height+1)>>1
*/
-static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
{
- unsigned int alignment, sclines = 0;
+ unsigned int sclines = 0;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV21:
@@ -964,18 +937,13 @@ static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
case COLOR_FMT_NV12_BPP10_UBWC:
case COLOR_FMT_P010_UBWC:
case COLOR_FMT_P010:
- alignment = 16;
+ sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 16);
break;
case COLOR_FMT_NV12_UBWC:
- alignment = 32;
+ sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 32);
break;
- default:
- goto invalid_input;
}
- sclines = MSM_MEDIA_ALIGN((height+1)>>1, alignment);
-
-invalid_input:
return sclines;
}
@@ -986,12 +954,12 @@ invalid_input:
* Progressive: width
* Interlaced: width
*/
-static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
{
- int y_tile_width = 0, y_meta_stride = 0;
+ int y_tile_width = 0, y_meta_stride;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@@ -1002,14 +970,11 @@ static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
y_tile_width = 48;
break;
default:
- goto invalid_input;
+ return 0;
}
y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
- y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64);
-
-invalid_input:
- return y_meta_stride;
+ return MSM_MEDIA_ALIGN(y_meta_stride, 64);
}
/*
@@ -1019,12 +984,12 @@ invalid_input:
* Progressive: height
* Interlaced: (height+1)>>1
*/
-static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
{
- int y_tile_height = 0, y_meta_scanlines = 0;
+ int y_tile_height = 0, y_meta_scanlines;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@@ -1035,14 +1000,11 @@ static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
y_tile_height = 4;
break;
default:
- goto invalid_input;
+ return 0;
}
y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
- y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
-
-invalid_input:
- return y_meta_scanlines;
+ return MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
}
/*
@@ -1052,12 +1014,12 @@ invalid_input:
* Progressive: width
* Interlaced: width
*/
-static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
{
- int uv_tile_width = 0, uv_meta_stride = 0;
+ int uv_tile_width = 0, uv_meta_stride;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@@ -1068,14 +1030,11 @@ static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
uv_tile_width = 24;
break;
default:
- goto invalid_input;
+ return 0;
}
uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
- uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64);
-
-invalid_input:
- return uv_meta_stride;
+ return MSM_MEDIA_ALIGN(uv_meta_stride, 64);
}
/*
@@ -1085,12 +1044,12 @@ invalid_input:
* Progressive: height
* Interlaced: (height+1)>>1
*/
-static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
{
- int uv_tile_height = 0, uv_meta_scanlines = 0;
+ int uv_tile_height = 0, uv_meta_scanlines;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@@ -1101,22 +1060,19 @@ static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
uv_tile_height = 4;
break;
default:
- goto invalid_input;
+ return 0;
}
uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
- uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
-
-invalid_input:
- return uv_meta_scanlines;
+ return MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
}
-static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
{
- unsigned int alignment = 0, stride = 0, bpp = 4;
+ unsigned int alignment = 0, bpp = 4;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888:
@@ -1131,21 +1087,18 @@ static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
alignment = 256;
break;
default:
- goto invalid_input;
+ return 0;
}
- stride = MSM_MEDIA_ALIGN(width * bpp, alignment);
-
-invalid_input:
- return stride;
+ return MSM_MEDIA_ALIGN(width * bpp, alignment);
}
-static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
{
- unsigned int alignment = 0, scanlines = 0;
+ unsigned int alignment = 0;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888:
@@ -1157,220 +1110,46 @@ static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
alignment = 16;
break;
default:
- goto invalid_input;
+ return 0;
}
- scanlines = MSM_MEDIA_ALIGN(height, alignment);
-
-invalid_input:
- return scanlines;
+ return MSM_MEDIA_ALIGN(height, alignment);
}
-static inline unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
{
- int rgb_tile_width = 0, rgb_meta_stride = 0;
+ int rgb_meta_stride;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888_UBWC:
case COLOR_FMT_RGBA1010102_UBWC:
case COLOR_FMT_RGB565_UBWC:
- rgb_tile_width = 16;
- break;
- default:
- goto invalid_input;
+ rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, 16);
+ return MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
}
- rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width);
- rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
-
-invalid_input:
- return rgb_meta_stride;
+ return 0;
}
-static inline unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
{
- int rgb_tile_height = 0, rgb_meta_scanlines = 0;
+ int rgb_meta_scanlines;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888_UBWC:
case COLOR_FMT_RGBA1010102_UBWC:
case COLOR_FMT_RGB565_UBWC:
- rgb_tile_height = 4;
- break;
- default:
- goto invalid_input;
+ rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, 4);
+ return MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
}
- rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height);
- rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
-
-invalid_input:
- return rgb_meta_scanlines;
-}
-
-/*
- * Function arguments:
- * @color_fmt
- * @width
- * Progressive: width
- * Interlaced: width
- * @height
- * Progressive: height
- * Interlaced: height
- */
-static inline unsigned int VENUS_BUFFER_SIZE(
- int color_fmt, int width, int height)
-{
- const unsigned int extra_size = VENUS_EXTRADATA_SIZE(width, height);
- unsigned int uv_alignment = 0, size = 0;
- unsigned int y_plane, uv_plane, y_stride,
- uv_stride, y_sclines, uv_sclines;
- unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
- unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
- unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
- unsigned int y_meta_plane = 0, uv_meta_plane = 0;
- unsigned int rgb_stride = 0, rgb_scanlines = 0;
- unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0;
- unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0;
-
- if (!width || !height)
- goto invalid_input;
-
- y_stride = VENUS_Y_STRIDE(color_fmt, width);
- uv_stride = VENUS_UV_STRIDE(color_fmt, width);
- y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
- uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
- rgb_stride = VENUS_RGB_STRIDE(color_fmt, width);
- rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height);
-
- switch (color_fmt) {
- case COLOR_FMT_NV21:
- case COLOR_FMT_NV12:
- case COLOR_FMT_P010:
- uv_alignment = 4096;
- y_plane = y_stride * y_sclines;
- uv_plane = uv_stride * uv_sclines + uv_alignment;
- size = y_plane + uv_plane +
- MSM_MEDIA_MAX(extra_size, 8 * y_stride);
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_NV12_MVTB:
- uv_alignment = 4096;
- y_plane = y_stride * y_sclines;
- uv_plane = uv_stride * uv_sclines + uv_alignment;
- size = y_plane + uv_plane;
- size = 2 * size + extra_size;
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_NV12_UBWC:
- y_sclines = VENUS_Y_SCANLINES(color_fmt, (height+1)>>1);
- y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
- uv_sclines = VENUS_UV_SCANLINES(color_fmt, (height+1)>>1);
- uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
- y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
- y_meta_scanlines =
- VENUS_Y_META_SCANLINES(color_fmt, (height+1)>>1);
- y_meta_plane = MSM_MEDIA_ALIGN(
- y_meta_stride * y_meta_scanlines, 4096);
- uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
- uv_meta_scanlines =
- VENUS_UV_META_SCANLINES(color_fmt, (height+1)>>1);
- uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
- uv_meta_scanlines, 4096);
-
- size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
- uv_meta_plane)*2 +
- MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_NV12_BPP10_UBWC:
- y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
- uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
- y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
- y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
- y_meta_plane = MSM_MEDIA_ALIGN(
- y_meta_stride * y_meta_scanlines, 4096);
- uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
- uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
- uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
- uv_meta_scanlines, 4096);
-
- size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
- uv_meta_plane +
- MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_P010_UBWC:
- y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
- uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
- y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
- y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
- y_meta_plane = MSM_MEDIA_ALIGN(
- y_meta_stride * y_meta_scanlines, 4096);
- uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
- uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
- uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
- uv_meta_scanlines, 4096);
-
- size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
- uv_meta_plane;
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_RGBA8888:
- rgb_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines, 4096);
- size = rgb_plane;
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_RGBA8888_UBWC:
- case COLOR_FMT_RGBA1010102_UBWC:
- case COLOR_FMT_RGB565_UBWC:
- rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
- 4096);
- rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
- rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt,
- height);
- rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride *
- rgb_meta_scanlines, 4096);
- size = rgb_ubwc_plane + rgb_meta_plane;
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- default:
- break;
- }
-invalid_input:
- return size;
-}
-
-static inline unsigned int VENUS_VIEW2_OFFSET(
- int color_fmt, int width, int height)
-{
- unsigned int offset = 0;
- unsigned int y_plane, uv_plane, y_stride,
- uv_stride, y_sclines, uv_sclines;
- if (!width || !height)
- goto invalid_input;
-
- y_stride = VENUS_Y_STRIDE(color_fmt, width);
- uv_stride = VENUS_UV_STRIDE(color_fmt, width);
- y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
- uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
- switch (color_fmt) {
- case COLOR_FMT_NV12_MVTB:
- y_plane = y_stride * y_sclines;
- uv_plane = uv_stride * uv_sclines;
- offset = y_plane + uv_plane;
- break;
- default:
- break;
- }
-invalid_input:
- return offset;
+ return 0;
}
#endif
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index 7b028f778960..cc32ea5f4289 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -128,7 +128,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
struct msm_kms *kms = &mdp4_kms->base.base;
- msm_gem_put_iova(val, kms->aspace);
+ msm_gem_unpin_iova(val, kms->aspace);
drm_gem_object_put_unlocked(val);
}
@@ -378,7 +378,7 @@ static void update_cursor(struct drm_crtc *crtc)
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_get(next_bo);
- msm_gem_get_iova(next_bo, kms->aspace, &iova);
+ msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova);
/* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
@@ -423,7 +423,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
int ret;
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
- dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
+ DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
return -EINVAL;
}
@@ -436,7 +436,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
}
if (cursor_bo) {
- ret = msm_gem_get_iova(cursor_bo, kms->aspace, &iova);
+ ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova);
if (ret)
goto fail;
} else {
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
index f6bc86a35d8d..ff8f2da160c0 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
@@ -45,7 +45,7 @@ static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
if (!dtv_pdata) {
- dev_err(dev->dev, "could not find dtv pdata\n");
+ DRM_DEV_ERROR(dev->dev, "could not find dtv pdata\n");
return;
}
@@ -202,16 +202,16 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
if (ret)
- dev_err(dev->dev, "failed to set mdp_clk to %lu: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to set mdp_clk to %lu: %d\n",
pc, ret);
ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
if (ret)
- dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
if (ret)
- dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
@@ -251,14 +251,14 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
- dev_err(dev->dev, "failed to get hdmi_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get hdmi_clk\n");
ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
goto fail;
}
mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
- dev_err(dev->dev, "failed to get tv_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get tv_clk\n");
ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 44d1cda56974..e437aa806f7b 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -43,7 +43,7 @@ static int mdp4_hw_init(struct msm_kms *kms)
DBG("found MDP4 version v%d.%d", major, minor);
if (major != 4) {
- dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto out;
@@ -165,7 +165,7 @@ static void mdp4_destroy(struct msm_kms *kms)
struct msm_gem_address_space *aspace = kms->aspace;
if (mdp4_kms->blank_cursor_iova)
- msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
+ msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
@@ -206,7 +206,8 @@ int mdp4_disable(struct mdp4_kms *mdp4_kms)
clk_disable_unprepare(mdp4_kms->clk);
if (mdp4_kms->pclk)
clk_disable_unprepare(mdp4_kms->pclk);
- clk_disable_unprepare(mdp4_kms->lut_clk);
+ if (mdp4_kms->lut_clk)
+ clk_disable_unprepare(mdp4_kms->lut_clk);
if (mdp4_kms->axi_clk)
clk_disable_unprepare(mdp4_kms->axi_clk);
@@ -220,7 +221,8 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
clk_prepare_enable(mdp4_kms->clk);
if (mdp4_kms->pclk)
clk_prepare_enable(mdp4_kms->pclk);
- clk_prepare_enable(mdp4_kms->lut_clk);
+ if (mdp4_kms->lut_clk)
+ clk_prepare_enable(mdp4_kms->lut_clk);
if (mdp4_kms->axi_clk)
clk_prepare_enable(mdp4_kms->axi_clk);
@@ -251,7 +253,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
encoder = mdp4_lcdc_encoder_init(dev, panel_node);
if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct LCDC encoder\n");
+ DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
return PTR_ERR(encoder);
}
@@ -260,7 +262,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
if (IS_ERR(connector)) {
- dev_err(dev->dev, "failed to initialize LVDS connector\n");
+ DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
return PTR_ERR(connector);
}
@@ -271,7 +273,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
case DRM_MODE_ENCODER_TMDS:
encoder = mdp4_dtv_encoder_init(dev);
if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct DTV encoder\n");
+ DRM_DEV_ERROR(dev->dev, "failed to construct DTV encoder\n");
return PTR_ERR(encoder);
}
@@ -282,7 +284,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
/* Construct bridge/connector for HDMI: */
ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
if (ret) {
- dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret);
return ret;
}
}
@@ -300,7 +302,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
encoder = mdp4_dsi_encoder_init(dev);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
- dev_err(dev->dev,
+ DRM_DEV_ERROR(dev->dev,
"failed to construct DSI encoder: %d\n", ret);
return ret;
}
@@ -311,14 +313,14 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
if (ret) {
- dev_err(dev->dev, "failed to initialize DSI: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n",
ret);
return ret;
}
break;
default:
- dev_err(dev->dev, "Invalid or unsupported interface\n");
+ DRM_DEV_ERROR(dev->dev, "Invalid or unsupported interface\n");
return -EINVAL;
}
@@ -354,7 +356,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
plane = mdp4_plane_init(dev, vg_planes[i], false);
if (IS_ERR(plane)) {
- dev_err(dev->dev,
+ DRM_DEV_ERROR(dev->dev,
"failed to construct plane for VG%d\n", i + 1);
ret = PTR_ERR(plane);
goto fail;
@@ -365,7 +367,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
plane = mdp4_plane_init(dev, rgb_planes[i], true);
if (IS_ERR(plane)) {
- dev_err(dev->dev,
+ DRM_DEV_ERROR(dev->dev,
"failed to construct plane for RGB%d\n", i + 1);
ret = PTR_ERR(plane);
goto fail;
@@ -374,7 +376,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
mdp4_crtcs[i]);
if (IS_ERR(crtc)) {
- dev_err(dev->dev, "failed to construct crtc for %s\n",
+ DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n",
mdp4_crtc_names[i]);
ret = PTR_ERR(crtc);
goto fail;
@@ -396,7 +398,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
if (ret) {
- dev_err(dev->dev, "failed to initialize intf: %d, %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to initialize intf: %d, %d\n",
i, ret);
goto fail;
}
@@ -419,7 +421,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms) {
- dev_err(dev->dev, "failed to allocate kms\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n");
ret = -ENOMEM;
goto fail;
}
@@ -439,7 +441,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
- dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
goto fail;
}
@@ -456,14 +458,14 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
if (mdp4_kms->vdd) {
ret = regulator_enable(mdp4_kms->vdd);
if (ret) {
- dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", ret);
goto fail;
}
}
mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
if (IS_ERR(mdp4_kms->clk)) {
- dev_err(dev->dev, "failed to get core_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get core_clk\n");
ret = PTR_ERR(mdp4_kms->clk);
goto fail;
}
@@ -472,23 +474,25 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
if (IS_ERR(mdp4_kms->pclk))
mdp4_kms->pclk = NULL;
- // XXX if (rev >= MDP_REV_42) { ???
- mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
- if (IS_ERR(mdp4_kms->lut_clk)) {
- dev_err(dev->dev, "failed to get lut_clk\n");
- ret = PTR_ERR(mdp4_kms->lut_clk);
- goto fail;
+ if (mdp4_kms->rev >= 2) {
+ mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
+ if (IS_ERR(mdp4_kms->lut_clk)) {
+ DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
+ ret = PTR_ERR(mdp4_kms->lut_clk);
+ goto fail;
+ }
}
mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
if (IS_ERR(mdp4_kms->axi_clk)) {
- dev_err(dev->dev, "failed to get axi_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
ret = PTR_ERR(mdp4_kms->axi_clk);
goto fail;
}
clk_set_rate(mdp4_kms->clk, config->max_clk);
- clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
+ if (mdp4_kms->lut_clk)
+ clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
pm_runtime_enable(dev->dev);
mdp4_kms->rpm_enabled = true;
@@ -519,29 +523,29 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
if (ret)
goto fail;
} else {
- dev_info(dev->dev, "no iommu, fallback to phys "
+ DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
aspace = NULL;
}
ret = modeset_init(mdp4_kms);
if (ret) {
- dev_err(dev->dev, "modeset_init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret);
goto fail;
}
- mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
+ mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC | MSM_BO_SCANOUT);
if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
- dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
mdp4_kms->blank_cursor_bo = NULL;
goto fail;
}
- ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
+ ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
&mdp4_kms->blank_cursor_iova);
if (ret) {
- dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index d47b8f4af991..fff77a4b12c2 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -47,7 +47,7 @@ static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder)
struct lcdc_platform_data *lcdc_pdata = mdp4_find_pdata("lvds.0");
if (!lcdc_pdata) {
- dev_err(dev->dev, "could not find lvds pdata\n");
+ DRM_DEV_ERROR(dev->dev, "could not find lvds pdata\n");
return;
}
@@ -224,7 +224,7 @@ static void setup_phy(struct drm_encoder *encoder)
break;
default:
- dev_err(dev->dev, "unknown bpp: %d\n", bpp);
+ DRM_DEV_ERROR(dev->dev, "unknown bpp: %d\n", bpp);
return;
}
@@ -241,7 +241,7 @@ static void setup_phy(struct drm_encoder *encoder)
MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN;
break;
default:
- dev_err(dev->dev, "unknown # of channels: %d\n", nchan);
+ DRM_DEV_ERROR(dev->dev, "unknown # of channels: %d\n", nchan);
return;
}
@@ -354,7 +354,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
ret = regulator_disable(mdp4_lcdc_encoder->regs[i]);
if (ret)
- dev_err(dev->dev, "failed to disable regulator: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret);
}
bs_set(mdp4_lcdc_encoder, 0);
@@ -370,20 +370,25 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
unsigned long pc = mdp4_lcdc_encoder->pixclock;
struct mdp4_kms *mdp4_kms = get_kms(encoder);
struct drm_panel *panel;
+ uint32_t config;
int i, ret;
if (WARN_ON(mdp4_lcdc_encoder->enabled))
return;
/* TODO: hard-coded for 18bpp: */
- mdp4_crtc_set_config(encoder->crtc,
- MDP4_DMA_CONFIG_R_BPC(BPC6) |
- MDP4_DMA_CONFIG_G_BPC(BPC6) |
- MDP4_DMA_CONFIG_B_BPC(BPC6) |
- MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
- MDP4_DMA_CONFIG_PACK(0x21) |
- MDP4_DMA_CONFIG_DEFLKR_EN |
- MDP4_DMA_CONFIG_DITHER_EN);
+ config =
+ MDP4_DMA_CONFIG_R_BPC(BPC6) |
+ MDP4_DMA_CONFIG_G_BPC(BPC6) |
+ MDP4_DMA_CONFIG_B_BPC(BPC6) |
+ MDP4_DMA_CONFIG_PACK(0x21) |
+ MDP4_DMA_CONFIG_DEFLKR_EN |
+ MDP4_DMA_CONFIG_DITHER_EN;
+
+ if (!of_property_read_bool(dev->dev->of_node, "qcom,lcdc-align-lsb"))
+ config |= MDP4_DMA_CONFIG_PACK_ALIGN_MSB;
+
+ mdp4_crtc_set_config(encoder->crtc, config);
mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0);
bs_set(mdp4_lcdc_encoder, 1);
@@ -391,16 +396,16 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
if (ret)
- dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable regulator: %d\n", ret);
}
DBG("setting lcdc_clk=%lu", pc);
ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc);
if (ret)
- dev_err(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk);
if (ret)
- dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
if (!IS_ERR(panel)) {
@@ -454,7 +459,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
/* TODO: do we need different pll in other cases? */
mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev);
if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
- dev_err(dev->dev, "failed to get lvds_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk);
goto fail;
}
@@ -463,7 +468,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v");
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
goto fail;
}
mdp4_lcdc_encoder->regs[0] = reg;
@@ -471,7 +476,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
reg = devm_regulator_get(dev->dev, "lvds-pll-vdda");
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
goto fail;
}
mdp4_lcdc_encoder->regs[1] = reg;
@@ -479,7 +484,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
reg = devm_regulator_get(dev->dev, "lvds-vdda");
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev->dev, "failed to get lvds-vdda: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds-vdda: %d\n", ret);
goto fail;
}
mdp4_lcdc_encoder->regs[2] = reg;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index 7a499731ce93..005066f7154d 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -234,22 +234,22 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
format = to_mdp_format(msm_framebuffer_format(fb));
if (src_w > (crtc_w * DOWN_SCALE_MAX)) {
- dev_err(dev->dev, "Width down scaling exceeds limits!\n");
+ DRM_DEV_ERROR(dev->dev, "Width down scaling exceeds limits!\n");
return -ERANGE;
}
if (src_h > (crtc_h * DOWN_SCALE_MAX)) {
- dev_err(dev->dev, "Height down scaling exceeds limits!\n");
+ DRM_DEV_ERROR(dev->dev, "Height down scaling exceeds limits!\n");
return -ERANGE;
}
if (crtc_w > (src_w * UP_SCALE_MAX)) {
- dev_err(dev->dev, "Width up scaling exceeds limits!\n");
+ DRM_DEV_ERROR(dev->dev, "Width up scaling exceeds limits!\n");
return -ERANGE;
}
if (crtc_h > (src_h * UP_SCALE_MAX)) {
- dev_err(dev->dev, "Height up scaling exceeds limits!\n");
+ DRM_DEV_ERROR(dev->dev, "Height up scaling exceeds limits!\n");
return -ERANGE;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index 824067d2d427..ea8f7d7daf7f 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -553,6 +553,91 @@ const struct mdp5_cfg_hw msm8x96_config = {
.max_clk = 412500000,
};
+const struct mdp5_cfg_hw msm8917_config = {
+ .name = "msm8917",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_CDM,
+ },
+ .ctl = {
+ .count = 3,
+ .base = { 0x01000, 0x01200, 0x01400 },
+ .flush_hw_mask = 0xffffffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x34000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 2,
+ .base = { 0x44000, 0x45000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ },
+ .nb_stages = 8,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+
+ },
+ .pp = {
+ .count = 1,
+ .base = { 0x70000 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ },
+ },
+ .max_clk = 320000000,
+};
+
static const struct mdp5_cfg_handler cfg_handlers[] = {
{ .revision = 0, .config = { .hw = &msm8x74v1_config } },
{ .revision = 2, .config = { .hw = &msm8x74v2_config } },
@@ -560,6 +645,7 @@ static const struct mdp5_cfg_handler cfg_handlers[] = {
{ .revision = 6, .config = { .hw = &msm8x16_config } },
{ .revision = 9, .config = { .hw = &msm8x94_config } },
{ .revision = 7, .config = { .hw = &msm8x96_config } },
+ { .revision = 15, .config = { .hw = &msm8917_config } },
};
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
@@ -600,7 +686,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
}
if (major != 1) {
- dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP major version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto fail;
@@ -615,7 +701,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
break;
}
if (unlikely(!mdp5_cfg)) {
- dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto fail;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
index 33972c1816ed..976585d8bfd6 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -55,20 +55,20 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
int pp_id = mixer->pp;
if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
- dev_err(dev, "vsync_clk is not initialized\n");
+ DRM_DEV_ERROR(dev, "vsync_clk is not initialized\n");
return -EINVAL;
}
total_lines_x100 = mode->vtotal * mode->vrefresh;
if (!total_lines_x100) {
- dev_err(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
+ DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
__func__, mode->vtotal, mode->vrefresh);
return -EINVAL;
}
vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE);
if (vsync_clk_speed <= 0) {
- dev_err(dev, "vsync_clk round rate failed %ld\n",
+ DRM_DEV_ERROR(dev, "vsync_clk round rate failed %ld\n",
vsync_clk_speed);
return -EINVAL;
}
@@ -102,13 +102,13 @@ static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
ret = clk_set_rate(mdp5_kms->vsync_clk,
clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE));
if (ret) {
- dev_err(encoder->dev->dev,
+ DRM_DEV_ERROR(encoder->dev->dev,
"vsync_clk clk_set_rate failed, %d\n", ret);
return ret;
}
ret = clk_prepare_enable(mdp5_kms->vsync_clk);
if (ret) {
- dev_err(encoder->dev->dev,
+ DRM_DEV_ERROR(encoder->dev->dev,
"vsync_clk clk_prepare_enable failed, %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 4878b81c96fb..2f95e6525589 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -173,7 +173,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
struct msm_kms *kms = &mdp5_kms->base.base;
- msm_gem_put_iova(val, kms->aspace);
+ msm_gem_unpin_iova(val, kms->aspace);
drm_gem_object_put_unlocked(val);
}
@@ -655,7 +655,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
if (ret) {
- dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
return ret;
}
@@ -672,7 +672,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
* and that we don't have conflicting mixer stages:
*/
if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
- dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
+ DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
cnt, start);
return -EINVAL;
}
@@ -872,7 +872,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
}
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
- dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
+ DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
return -EINVAL;
}
@@ -896,7 +896,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!cursor_bo)
return -ENOENT;
- ret = msm_gem_get_iova(cursor_bo, kms->aspace,
+ ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
&mdp5_crtc->cursor.iova);
if (ret)
return -EINVAL;
@@ -917,7 +917,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
set_cursor:
ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
if (ret) {
- dev_err(dev->dev, "failed to %sable cursor: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n",
cursor_enable ? "en" : "dis", ret);
goto end;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
index f93d5681267c..65a871f9f0d9 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -262,13 +262,13 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
struct mdp5_hw_mixer *mixer = pipeline->mixer;
if (unlikely(WARN_ON(!mixer))) {
- dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM",
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM",
ctl->id);
return -EINVAL;
}
if (pipeline->r_mixer) {
- dev_err(ctl_mgr->dev->dev, "unsupported configuration");
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration");
return -EINVAL;
}
@@ -604,10 +604,10 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
return 0;
} else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
- dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n");
return -EINVAL;
} else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
- dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
return -EINVAL;
}
@@ -652,7 +652,7 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
if ((ctl_mgr->ctls[c].status & checkm) == match)
goto found;
- dev_err(ctl_mgr->dev->dev, "No more CTL available!");
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!");
goto unlock;
found:
@@ -698,13 +698,13 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
if (!ctl_mgr) {
- dev_err(dev->dev, "failed to allocate CTL manager\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n");
ret = -ENOMEM;
goto fail;
}
if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
- dev_err(dev->dev, "Increase static pool size to at least %d\n",
+ DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
ctl_cfg->count);
ret = -ENOSPC;
goto fail;
@@ -723,7 +723,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
if (WARN_ON(!ctl_cfg->base[c])) {
- dev_err(dev->dev, "CTL_%d: base is null!\n", c);
+ DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c);
ret = -EINVAL;
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
goto fail;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index f71d8cf2261b..97179bec8902 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -264,7 +264,7 @@ static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
minor->debugfs_root, minor);
if (ret) {
- dev_err(dev->dev, "could not install mdp5_debugfs_list\n");
+ DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n");
return ret;
}
@@ -337,7 +337,7 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
encoder = mdp5_encoder_init(dev, intf, ctl);
if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct encoder\n");
+ DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n");
return encoder;
}
@@ -418,7 +418,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
- dev_err(dev->dev, "failed to find dsi from intf %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n",
intf->num);
ret = -EINVAL;
break;
@@ -443,7 +443,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
break;
}
default:
- dev_err(dev->dev, "unknown intf: %d\n", intf->type);
+ DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type);
ret = -EINVAL;
break;
}
@@ -500,7 +500,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
plane = mdp5_plane_init(dev, type);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
- dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
+ DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
goto fail;
}
priv->planes[priv->num_planes++] = plane;
@@ -517,7 +517,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i);
if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc);
- dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
+ DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
goto fail;
}
priv->crtcs[priv->num_crtcs++] = crtc;
@@ -552,7 +552,7 @@ static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
*major = FIELD(version, MDP5_HW_VERSION_MAJOR);
*minor = FIELD(version, MDP5_HW_VERSION_MINOR);
- dev_info(dev, "MDP5 version v%d.%d", *major, *minor);
+ DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor);
}
static int get_clk(struct platform_device *pdev, struct clk **clkp,
@@ -561,7 +561,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
struct device *dev = &pdev->dev;
struct clk *clk = msm_clk_get(pdev, name);
if (IS_ERR(clk) && mandatory) {
- dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
+ DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
return PTR_ERR(clk);
}
if (IS_ERR(clk))
@@ -688,7 +688,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (irq < 0) {
ret = irq;
- dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret);
goto fail;
}
@@ -724,12 +724,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
- dev_err(&pdev->dev, "failed to attach iommu: %d\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
ret);
goto fail;
}
} else {
- dev_info(&pdev->dev,
+ DRM_DEV_INFO(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
aspace = NULL;
}
@@ -738,7 +738,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
ret = modeset_init(mdp5_kms);
if (ret) {
- dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret);
goto fail;
}
@@ -795,7 +795,7 @@ static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
if (IS_ERR(hwpipe)) {
ret = PTR_ERR(hwpipe);
- dev_err(dev->dev, "failed to construct pipe for %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n",
pipe2name(pipes[i]), ret);
return ret;
}
@@ -867,7 +867,7 @@ static int hwmixer_init(struct mdp5_kms *mdp5_kms)
mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
if (IS_ERR(mixer)) {
ret = PTR_ERR(mixer);
- dev_err(dev->dev, "failed to construct LM%d (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n",
i, ret);
return ret;
}
@@ -897,7 +897,7 @@ static int interface_init(struct mdp5_kms *mdp5_kms)
intf = kzalloc(sizeof(*intf), GFP_KERNEL);
if (!intf) {
- dev_err(dev->dev, "failed to construct INTF%d\n", i);
+ DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index 1cc4e57f0226..889c2940692c 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
@@ -132,7 +132,7 @@ static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
mdp5_mdss);
if (!d) {
- dev_err(dev, "mdss irq domain add failed\n");
+ DRM_DEV_ERROR(dev, "mdss irq domain add failed\n");
return -ENXIO;
}
@@ -246,7 +246,7 @@ int mdp5_mdss_init(struct drm_device *dev)
ret = msm_mdss_get_clocks(mdp5_mdss);
if (ret) {
- dev_err(dev->dev, "failed to get clocks: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get clocks: %d\n", ret);
goto fail;
}
@@ -259,7 +259,7 @@ int mdp5_mdss_init(struct drm_device *dev)
ret = regulator_enable(mdp5_mdss->vdd);
if (ret) {
- dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n",
ret);
goto fail;
}
@@ -267,13 +267,13 @@ int mdp5_mdss_init(struct drm_device *dev)
ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
mdss_irq, 0, "mdss_isr", mdp5_mdss);
if (ret) {
- dev_err(dev->dev, "failed to init irq: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret);
goto fail_irq;
}
ret = mdss_irq_domain_init(mdp5_mdss);
if (ret) {
- dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to init sub-block irqs: %d\n", ret);
goto fail_irq;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 310459541e48..be13140967b4 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -125,7 +125,7 @@ static int mdp5_plane_atomic_set_property(struct drm_plane *plane,
SET_PROPERTY(zpos, ZPOS, uint8_t);
- dev_err(dev->dev, "Invalid property\n");
+ DRM_DEV_ERROR(dev->dev, "Invalid property\n");
ret = -EINVAL;
done:
return ret;
@@ -153,7 +153,7 @@ static int mdp5_plane_atomic_get_property(struct drm_plane *plane,
GET_PROPERTY(zpos, ZPOS, uint8_t);
- dev_err(dev->dev, "Invalid property\n");
+ DRM_DEV_ERROR(dev->dev, "Invalid property\n");
ret = -EINVAL;
done:
return ret;
@@ -658,7 +658,7 @@ static int calc_scalex_steps(struct drm_plane *plane,
ret = calc_phase_step(src, dest, &phasex_step);
if (ret) {
- dev_err(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
+ DRM_DEV_ERROR(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
return ret;
}
@@ -683,7 +683,7 @@ static int calc_scaley_steps(struct drm_plane *plane,
ret = calc_phase_step(src, dest, &phasey_step);
if (ret) {
- dev_err(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
+ DRM_DEV_ERROR(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index 96c2b828dba4..7cebcb2b3a37 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -88,7 +88,7 @@ static int smp_request_block(struct mdp5_smp *smp,
avail = cnt - bitmap_weight(state->state, cnt);
if (nblks > avail) {
- dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
+ DRM_DEV_ERROR(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
nblks, avail);
return -ENOSPC;
}
@@ -188,7 +188,7 @@ int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
ret = smp_request_block(smp, state, cid, n);
if (ret) {
- dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
+ DRM_DEV_ERROR(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
n, ret);
return ret;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index a9768f823290..7b2a1e6a8810 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -29,7 +29,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
if (!phy_node) {
- dev_err(&pdev->dev, "cannot find phy device\n");
+ DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
return -ENXIO;
}
@@ -40,7 +40,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
of_node_put(phy_node);
if (!phy_pdev || !msm_dsi->phy) {
- dev_err(&pdev->dev, "%s: phy driver is not ready\n", __func__);
+ DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
return -EPROBE_DEFER;
}
@@ -210,7 +210,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
if (ret) {
- dev_err(dev->dev, "failed to modeset init host: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to modeset init host: %d\n", ret);
goto fail;
}
@@ -222,7 +222,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
if (IS_ERR(msm_dsi->bridge)) {
ret = PTR_ERR(msm_dsi->bridge);
- dev_err(dev->dev, "failed to create dsi bridge: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create dsi bridge: %d\n", ret);
msm_dsi->bridge = NULL;
goto fail;
}
@@ -244,7 +244,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
if (IS_ERR(msm_dsi->connector)) {
ret = PTR_ERR(msm_dsi->connector);
- dev_err(dev->dev,
+ DRM_DEV_ERROR(dev->dev,
"failed to create dsi connector: %d\n", ret);
msm_dsi->connector = NULL;
goto fail;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 9c6c523eacdc..38e481d2d606 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1050,7 +1050,7 @@ static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
msecs_to_jiffies(70));
if (ret <= 0)
- dev_err(dev, "wait for video done timed out\n");
+ DRM_DEV_ERROR(dev, "wait for video done timed out\n");
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
}
@@ -1083,6 +1083,8 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
return PTR_ERR(data);
}
+ msm_gem_object_set_name(msm_host->tx_gem_obj, "tx_gem");
+
msm_host->tx_size = msm_host->tx_gem_obj->size;
return 0;
@@ -1118,7 +1120,7 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
priv = dev->dev_private;
if (msm_host->tx_gem_obj) {
- msm_gem_put_iova(msm_host->tx_gem_obj, priv->kms->aspace);
+ msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
drm_gem_object_put_unlocked(msm_host->tx_gem_obj);
msm_host->tx_gem_obj = NULL;
}
@@ -1248,7 +1250,7 @@ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
if (!dma_base)
return -EINVAL;
- return msm_gem_get_iova(msm_host->tx_gem_obj,
+ return msm_gem_get_and_pin_iova(msm_host->tx_gem_obj,
priv->kms->aspace, dma_base);
}
@@ -1673,7 +1675,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
prop = of_find_property(ep, "data-lanes", &len);
if (!prop) {
- dev_dbg(dev,
+ DRM_DEV_DEBUG(dev,
"failed to find data lane mapping, using default\n");
return 0;
}
@@ -1681,7 +1683,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
num_lanes = len / sizeof(u32);
if (num_lanes < 1 || num_lanes > 4) {
- dev_err(dev, "bad number of data lanes\n");
+ DRM_DEV_ERROR(dev, "bad number of data lanes\n");
return -EINVAL;
}
@@ -1690,7 +1692,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
num_lanes);
if (ret) {
- dev_err(dev, "failed to read lane data\n");
+ DRM_DEV_ERROR(dev, "failed to read lane data\n");
return ret;
}
@@ -1711,7 +1713,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
*/
for (j = 0; j < num_lanes; j++) {
if (lane_map[j] < 0 || lane_map[j] > 3)
- dev_err(dev, "bad physical lane entry %u\n",
+ DRM_DEV_ERROR(dev, "bad physical lane entry %u\n",
lane_map[j]);
if (swap[lane_map[j]] != j)
@@ -1742,13 +1744,13 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
*/
endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
if (!endpoint) {
- dev_dbg(dev, "%s: no endpoint\n", __func__);
+ DRM_DEV_DEBUG(dev, "%s: no endpoint\n", __func__);
return 0;
}
ret = dsi_host_parse_lane_data(msm_host, endpoint);
if (ret) {
- dev_err(dev, "%s: invalid lane configuration %d\n",
+ DRM_DEV_ERROR(dev, "%s: invalid lane configuration %d\n",
__func__, ret);
ret = -EINVAL;
goto err;
@@ -1757,7 +1759,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
/* Get panel node from the output port's endpoint data */
device_node = of_graph_get_remote_node(np, 1, 0);
if (!device_node) {
- dev_dbg(dev, "%s: no valid device\n", __func__);
+ DRM_DEV_DEBUG(dev, "%s: no valid device\n", __func__);
ret = -ENODEV;
goto err;
}
@@ -1768,7 +1770,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
"syscon-sfpb");
if (IS_ERR(msm_host->sfpb)) {
- dev_err(dev, "%s: failed to get sfpb regmap\n",
+ DRM_DEV_ERROR(dev, "%s: failed to get sfpb regmap\n",
__func__);
ret = PTR_ERR(msm_host->sfpb);
}
@@ -1918,7 +1920,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (msm_host->irq < 0) {
ret = msm_host->irq;
- dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
return ret;
}
@@ -1926,7 +1928,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"dsi_isr", msm_host);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to request IRQ%u: %d\n",
msm_host->irq, ret);
return ret;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 9a9fa0c75a13..1760483b247e 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -404,7 +404,7 @@ static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
ret = devm_regulator_bulk_get(dev, num, s);
if (ret < 0) {
- dev_err(dev, "%s: failed to init regulator, ret=%d\n",
+ DRM_DEV_ERROR(dev, "%s: failed to init regulator, ret=%d\n",
__func__, ret);
return ret;
}
@@ -441,7 +441,7 @@ static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
ret = regulator_set_load(s[i].consumer,
regs[i].enable_load);
if (ret < 0) {
- dev_err(dev,
+ DRM_DEV_ERROR(dev,
"regulator %d set op mode failed, %d\n",
i, ret);
goto fail;
@@ -451,7 +451,7 @@ static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
ret = regulator_bulk_enable(num, s);
if (ret < 0) {
- dev_err(dev, "regulator enable failed, %d\n", ret);
+ DRM_DEV_ERROR(dev, "regulator enable failed, %d\n", ret);
goto fail;
}
@@ -472,7 +472,7 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
ret = clk_prepare_enable(phy->ahb_clk);
if (ret) {
- dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
+ DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
pm_runtime_put_sync(dev);
}
@@ -543,7 +543,7 @@ int msm_dsi_phy_init_common(struct msm_dsi_phy *phy)
phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
"DSI_PHY_REG");
if (IS_ERR(phy->reg_base)) {
- dev_err(&pdev->dev, "%s: failed to map phy regulator base\n",
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n",
__func__);
ret = -ENOMEM;
goto fail;
@@ -574,7 +574,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->id = dsi_phy_get_id(phy);
if (phy->id < 0) {
ret = phy->id;
- dev_err(dev, "%s: couldn't identify PHY index, %d\n",
+ DRM_DEV_ERROR(dev, "%s: couldn't identify PHY index, %d\n",
__func__, ret);
goto fail;
}
@@ -584,20 +584,20 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
if (IS_ERR(phy->base)) {
- dev_err(dev, "%s: failed to map phy base\n", __func__);
+ DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
ret = -ENOMEM;
goto fail;
}
ret = dsi_phy_regulator_init(phy);
if (ret) {
- dev_err(dev, "%s: failed to init regulator\n", __func__);
+ DRM_DEV_ERROR(dev, "%s: failed to init regulator\n", __func__);
goto fail;
}
phy->ahb_clk = msm_clk_get(pdev, "iface");
if (IS_ERR(phy->ahb_clk)) {
- dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
+ DRM_DEV_ERROR(dev, "%s: Unable to get ahb clk\n", __func__);
ret = PTR_ERR(phy->ahb_clk);
goto fail;
}
@@ -617,7 +617,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
if (IS_ERR_OR_NULL(phy->pll))
- dev_info(dev,
+ DRM_DEV_INFO(dev,
"%s: pll init failed: %ld, need separate pll clk driver\n",
__func__, PTR_ERR(phy->pll));
@@ -675,21 +675,21 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
ret = dsi_phy_enable_resource(phy);
if (ret) {
- dev_err(dev, "%s: resource enable failed, %d\n",
+ DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
__func__, ret);
goto res_en_fail;
}
ret = dsi_phy_regulator_enable(phy);
if (ret) {
- dev_err(dev, "%s: regulator enable failed, %d\n",
+ DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
__func__, ret);
goto reg_en_fail;
}
ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req);
if (ret) {
- dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
+ DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret);
goto phy_en_fail;
}
@@ -702,7 +702,7 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
if (phy->usecase != MSM_DSI_PHY_SLAVE) {
ret = msm_dsi_pll_restore_state(phy->pll);
if (ret) {
- dev_err(dev, "%s: failed to restore pll state, %d\n",
+ DRM_DEV_ERROR(dev, "%s: failed to restore pll state, %d\n",
__func__, ret);
goto pll_restor_fail;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index b3fffc8dbb2a..44959e79ce28 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -93,7 +93,7 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG("");
if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
@@ -172,7 +172,7 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
if (ret) {
- dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+ DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
__func__, ret);
return ret;
}
@@ -196,7 +196,7 @@ static int dsi_10nm_phy_init(struct msm_dsi_phy *phy)
phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
"DSI_PHY_LANE");
if (IS_ERR(phy->lane_base)) {
- dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
__func__);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 513f4234adc1..a172c667e8bc 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -64,7 +64,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
void __iomem *lane_base = phy->lane_base;
if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
@@ -115,7 +115,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
if (ret) {
- dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+ DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
__func__, ret);
return ret;
}
@@ -142,7 +142,7 @@ static int dsi_14nm_phy_init(struct msm_dsi_phy *phy)
phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
"DSI_PHY_LANE");
if (IS_ERR(phy->lane_base)) {
- dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
__func__);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index 1ca6c69516f5..9ea9478d3707 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -82,7 +82,7 @@ static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG("");
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 4972b52cbe44..c79505d97fe8 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -76,7 +76,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG("");
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 398004463498..98790b44da48 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -132,7 +132,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG("");
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index 613e206fa4fc..7a1fb4da2ad3 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -175,7 +175,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
}
if (IS_ERR(pll)) {
- dev_err(dev, "%s: failed to init DSI PLL\n", __func__);
+ DRM_DEV_ERROR(dev, "%s: failed to init DSI PLL\n", __func__);
return pll;
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index 4c03f0b7343e..aabab6311043 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -17,7 +17,7 @@
* | |
* | |
* +---------+ | +----------+ | +----+
- * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0pllbyte
+ * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
* +---------+ | +----------+ | +----+
* | |
* | | dsi0_pll_by_2_bit_clk
@@ -25,7 +25,7 @@
* | | +----+ | |\ dsi0_pclk_mux
* | |--| /2 |--o--| \ |
* | | +----+ | \ | +---------+
- * | --------------| |--o--| div_7_4 |-- dsi0pll
+ * | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
* |------------------------------| / +---------+
* | +-----+ | /
* -----------| /4? |--o----------|/
@@ -39,6 +39,8 @@
#define DSI_PIXEL_PLL_CLK 1
#define NUM_PROVIDED_CLKS 2
+#define VCO_REF_CLK_RATE 19200000
+
struct dsi_pll_regs {
u32 pll_prop_gain_rate;
u32 pll_lockdet_rate;
@@ -316,7 +318,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
parent_rate);
pll_10nm->vco_current_rate = rate;
- pll_10nm->vco_ref_clk_rate = parent_rate;
+ pll_10nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
dsi_pll_setup_config(pll_10nm);
@@ -688,7 +690,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
hws[num++] = hw;
- snprintf(clk_name, 32, "dsi%dpllbyte", pll_10nm->id);
+ snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
@@ -737,7 +739,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
hws[num++] = hw;
- snprintf(clk_name, 32, "dsi%dpll", pll_10nm->id);
+ snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
/* PIX CLK DIV : DIV_CTRL_7_4*/
@@ -760,7 +762,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
pll_10nm->hw_data);
if (ret) {
- dev_err(dev, "failed to register clk provider: %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
@@ -788,13 +790,13 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) {
- dev_err(&pdev->dev, "failed to map CMN PHY base\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
return ERR_PTR(-ENOMEM);
}
pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_10nm->mmio)) {
- dev_err(&pdev->dev, "failed to map PLL base\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
return ERR_PTR(-ENOMEM);
}
@@ -813,7 +815,7 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
ret = pll_10nm_register(pll_10nm);
if (ret) {
- dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
index 71fe60e5f01f..0e18cddd6f22 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
@@ -783,7 +783,7 @@ static int dsi_pll_14nm_enable_seq(struct msm_dsi_pll *pll)
POLL_TIMEOUT_US);
if (unlikely(!locked))
- dev_err(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
+ DRM_DEV_ERROR(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL lock success");
@@ -829,7 +829,7 @@ static int dsi_pll_14nm_restore_state(struct msm_dsi_pll *pll)
ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw,
cached_state->vco_rate, 0);
if (ret) {
- dev_err(&pll_14nm->pdev->dev,
+ DRM_DEV_ERROR(&pll_14nm->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
@@ -1039,7 +1039,7 @@ static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm)
ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
pll_14nm->hw_data);
if (ret) {
- dev_err(dev, "failed to register clk provider: %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
@@ -1067,13 +1067,13 @@ struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) {
- dev_err(&pdev->dev, "failed to map CMN PHY base\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
return ERR_PTR(-ENOMEM);
}
pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_14nm->mmio)) {
- dev_err(&pdev->dev, "failed to map PLL base\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
return ERR_PTR(-ENOMEM);
}
@@ -1096,7 +1096,7 @@ struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
ret = pll_14nm_register(pll_14nm);
if (ret) {
- dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index 26e3a01a99c2..dcbbaeb1b1fb 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -156,7 +156,7 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (rate <= lpfr_lut[i].vco_rate)
break;
if (i == LPFR_LUT_SIZE) {
- dev_err(dev, "unable to get loop filter resistance. vco=%lu\n",
+ DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
rate);
return -EINVAL;
}
@@ -386,7 +386,7 @@ static int dsi_pll_28nm_enable_seq_hpm(struct msm_dsi_pll *pll)
}
if (unlikely(!locked))
- dev_err(dev, "DSI PLL lock failed\n");
+ DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL Lock success");
@@ -429,7 +429,7 @@ static int dsi_pll_28nm_enable_seq_lp(struct msm_dsi_pll *pll)
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
if (unlikely(!locked))
- dev_err(dev, "DSI PLL lock failed\n");
+ DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL lock success");
@@ -468,7 +468,7 @@ static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
cached_state->vco_rate, 0);
if (ret) {
- dev_err(&pll_28nm->pdev->dev,
+ DRM_DEV_ERROR(&pll_28nm->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
@@ -581,7 +581,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
ret = of_clk_add_provider(dev->of_node,
of_clk_src_onecell_get, &pll_28nm->clk_data);
if (ret) {
- dev_err(dev, "failed to register clk provider: %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
@@ -607,7 +607,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
- dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
return ERR_PTR(-ENOMEM);
}
@@ -633,13 +633,13 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
pll->en_seq_cnt = 1;
pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp;
} else {
- dev_err(&pdev->dev, "phy type (%d) is not 28nm\n", type);
+ DRM_DEV_ERROR(&pdev->dev, "phy type (%d) is not 28nm\n", type);
return ERR_PTR(-EINVAL);
}
ret = pll_28nm_register(pll_28nm);
if (ret) {
- dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
index 49008451085b..d6897464755f 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
@@ -327,7 +327,7 @@ static int dsi_pll_28nm_enable_seq(struct msm_dsi_pll *pll)
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
if (unlikely(!locked))
- dev_err(dev, "DSI PLL lock failed\n");
+ DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL lock success");
@@ -368,7 +368,7 @@ static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
cached_state->vco_rate, 0);
if (ret) {
- dev_err(&pll_28nm->pdev->dev,
+ DRM_DEV_ERROR(&pll_28nm->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
@@ -482,7 +482,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
ret = of_clk_add_provider(dev->of_node,
of_clk_src_onecell_get, &pll_28nm->clk_data);
if (ret) {
- dev_err(dev, "failed to register clk provider: %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
@@ -508,7 +508,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
- dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
return ERR_PTR(-ENOMEM);
}
@@ -526,7 +526,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
ret = pll_28nm_register(pll_28nm);
if (ret) {
- dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index 0940e84b2821..6a63aba98a30 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -157,7 +157,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
edp->bridge = msm_edp_bridge_init(edp);
if (IS_ERR(edp->bridge)) {
ret = PTR_ERR(edp->bridge);
- dev_err(dev->dev, "failed to create eDP bridge: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create eDP bridge: %d\n", ret);
edp->bridge = NULL;
goto fail;
}
@@ -165,7 +165,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
edp->connector = msm_edp_connector_init(edp);
if (IS_ERR(edp->connector)) {
ret = PTR_ERR(edp->connector);
- dev_err(dev->dev, "failed to create eDP connector: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create eDP connector: %d\n", ret);
edp->connector = NULL;
goto fail;
}
@@ -173,7 +173,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
edp->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (edp->irq < 0) {
ret = edp->irq;
- dev_err(dev->dev, "failed to get IRQ: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get IRQ: %d\n", ret);
goto fail;
}
@@ -181,7 +181,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
edp_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"edp_isr", edp);
if (ret < 0) {
- dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
edp->irq, ret);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 23670907a29d..e247d6942a49 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -98,7 +98,7 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
if (!phy_node) {
- dev_err(&pdev->dev, "cannot find phy device\n");
+ DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
return -ENXIO;
}
@@ -109,7 +109,7 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
of_node_put(phy_node);
if (!phy_pdev || !hdmi->phy) {
- dev_err(&pdev->dev, "phy driver is not ready\n");
+ DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
return -EPROBE_DEFER;
}
@@ -153,7 +153,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
hdmi->qfprom_mmio = msm_ioremap(pdev,
config->qfprom_mmio_name, "HDMI_QFPROM");
if (IS_ERR(hdmi->qfprom_mmio)) {
- dev_info(&pdev->dev, "can't find qfprom resource\n");
+ DRM_DEV_INFO(&pdev->dev, "can't find qfprom resource\n");
hdmi->qfprom_mmio = NULL;
}
@@ -172,7 +172,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
config->hpd_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
goto fail;
}
@@ -195,7 +195,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
config->pwr_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
goto fail;
}
@@ -217,7 +217,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
clk = msm_clk_get(pdev, config->hpd_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
config->hpd_clk_names[i], ret);
goto fail;
}
@@ -239,7 +239,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
clk = msm_clk_get(pdev, config->pwr_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
config->pwr_clk_names[i], ret);
goto fail;
}
@@ -254,14 +254,14 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
hdmi->i2c = msm_hdmi_i2c_init(hdmi);
if (IS_ERR(hdmi->i2c)) {
ret = PTR_ERR(hdmi->i2c);
- dev_err(&pdev->dev, "failed to get i2c: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to get i2c: %d\n", ret);
hdmi->i2c = NULL;
goto fail;
}
ret = msm_hdmi_get_phy(hdmi);
if (ret) {
- dev_err(&pdev->dev, "failed to get phy\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to get phy\n");
goto fail;
}
@@ -303,7 +303,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
hdmi->bridge = msm_hdmi_bridge_init(hdmi);
if (IS_ERR(hdmi->bridge)) {
ret = PTR_ERR(hdmi->bridge);
- dev_err(dev->dev, "failed to create HDMI bridge: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create HDMI bridge: %d\n", ret);
hdmi->bridge = NULL;
goto fail;
}
@@ -311,7 +311,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
hdmi->connector = msm_hdmi_connector_init(hdmi);
if (IS_ERR(hdmi->connector)) {
ret = PTR_ERR(hdmi->connector);
- dev_err(dev->dev, "failed to create HDMI connector: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create HDMI connector: %d\n", ret);
hdmi->connector = NULL;
goto fail;
}
@@ -319,7 +319,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (hdmi->irq < 0) {
ret = hdmi->irq;
- dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
goto fail;
}
@@ -327,11 +327,17 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
msm_hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"hdmi_isr", hdmi);
if (ret < 0) {
- dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
hdmi->irq, ret);
goto fail;
}
+ ret = msm_hdmi_hpd_enable(hdmi->connector);
+ if (ret < 0) {
+ DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
+ goto fail;
+ }
+
encoder->bridge = hdmi->bridge;
priv->bridges[priv->num_bridges++] = hdmi->bridge;
@@ -476,7 +482,7 @@ static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
unsigned int level_shift = 0; /* 0dB */
bool down_mix = false;
- dev_dbg(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
+ DRM_DEV_DEBUG(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
params->sample_width, params->cea.channels);
switch (params->cea.channels) {
@@ -527,7 +533,7 @@ static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
rate = HDMI_SAMPLE_RATE_192KHZ;
break;
default:
- dev_err(dev, "rate[%d] not supported!\n",
+ DRM_DEV_ERROR(dev, "rate[%d] not supported!\n",
params->sample_rate);
return -EINVAL;
}
@@ -571,7 +577,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
- static struct hdmi_platform_config *hdmi_cfg;
+ struct hdmi_platform_config *hdmi_cfg;
struct hdmi *hdmi;
struct device_node *of_node = dev->of_node;
int i, err;
@@ -579,7 +585,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi_cfg = (struct hdmi_platform_config *)
of_device_get_match_data(dev);
if (!hdmi_cfg) {
- dev_err(dev, "unknown hdmi_cfg: %pOFn\n", of_node);
+ DRM_DEV_ERROR(dev, "unknown hdmi_cfg: %pOFn\n", of_node);
return -ENXIO;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index accc9a61611d..5c5df6ab2a57 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -245,6 +245,7 @@ void msm_hdmi_bridge_destroy(struct drm_bridge *bridge);
void msm_hdmi_connector_irq(struct drm_connector *connector);
struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi);
+int msm_hdmi_hpd_enable(struct drm_connector *connector);
/*
* i2c adapter for ddc:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 5ed4cab2819f..7ba6f52ed72c 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -40,7 +40,7 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
for (i = 0; i < config->pwr_reg_cnt; i++) {
ret = regulator_enable(hdmi->pwr_regs[i]);
if (ret) {
- dev_err(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
}
}
@@ -49,7 +49,7 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
DBG("pixclock: %lu", hdmi->pixclock);
ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
if (ret) {
- dev_err(dev->dev, "failed to set pixel clk: %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to set pixel clk: %s (%d)\n",
config->pwr_clk_names[0], ret);
}
}
@@ -57,7 +57,7 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
for (i = 0; i < config->pwr_clk_cnt; i++) {
ret = clk_prepare_enable(hdmi->pwr_clks[i]);
if (ret) {
- dev_err(dev->dev, "failed to enable pwr clk: %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to enable pwr clk: %s (%d)\n",
config->pwr_clk_names[i], ret);
}
}
@@ -82,7 +82,7 @@ static void power_off(struct drm_bridge *bridge)
for (i = 0; i < config->pwr_reg_cnt; i++) {
ret = regulator_disable(hdmi->pwr_regs[i]);
if (ret) {
- dev_err(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
}
}
@@ -106,7 +106,7 @@ static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi)
len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer));
if (len < 0) {
- dev_err(&hdmi->pdev->dev,
+ DRM_DEV_ERROR(&hdmi->pdev->dev,
"failed to configure avi infoframe\n");
return;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index e9c9a0af508e..a6eeab2c4dc3 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -90,7 +90,7 @@ static int gpio_config(struct hdmi *hdmi, bool on)
if (gpio.num != -1) {
ret = gpio_request(gpio.num, gpio.label);
if (ret) {
- dev_err(dev,
+ DRM_DEV_ERROR(dev,
"'%s'(%d) gpio_request failed: %d\n",
gpio.label, gpio.num, ret);
goto err;
@@ -156,7 +156,7 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
ret = clk_prepare_enable(hdmi->hpd_clks[i]);
if (ret) {
- dev_err(dev,
+ DRM_DEV_ERROR(dev,
"failed to enable hpd clk: %s (%d)\n",
config->hpd_clk_names[i], ret);
}
@@ -167,8 +167,9 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
}
}
-static int hpd_enable(struct hdmi_connector *hdmi_connector)
+int msm_hdmi_hpd_enable(struct drm_connector *connector)
{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
struct hdmi *hdmi = hdmi_connector->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
@@ -179,7 +180,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_enable(hdmi->hpd_regs[i]);
if (ret) {
- dev_err(dev, "failed to enable hpd regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to enable hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
goto fail;
}
@@ -187,13 +188,13 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
ret = pinctrl_pm_select_default_state(dev);
if (ret) {
- dev_err(dev, "pinctrl state chg failed: %d\n", ret);
+ DRM_DEV_ERROR(dev, "pinctrl state chg failed: %d\n", ret);
goto fail;
}
ret = gpio_config(hdmi, true);
if (ret) {
- dev_err(dev, "failed to configure GPIOs: %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to configure GPIOs: %d\n", ret);
goto fail;
}
@@ -450,7 +451,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
{
struct drm_connector *connector = NULL;
struct hdmi_connector *hdmi_connector;
- int ret;
hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
if (!hdmi_connector)
@@ -471,12 +471,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- ret = hpd_enable(hdmi_connector);
- if (ret) {
- dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
- return ERR_PTR(ret);
- }
-
drm_connector_attach_encoder(connector, hdmi->encoder);
return connector;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
index 73e20219d431..25d2fe2c60e8 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -66,7 +66,7 @@ static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c)
} while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
if (!retry) {
- dev_err(dev->dev, "timeout waiting for DDC\n");
+ DRM_DEV_ERROR(dev->dev, "timeout waiting for DDC\n");
return -ETIMEDOUT;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 4157722d6b4d..1f4331ed69bd 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -37,7 +37,7 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
reg = devm_regulator_get(dev, cfg->reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev, "failed to get phy regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to get phy regulator: %s (%d)\n",
cfg->reg_names[i], ret);
return ret;
}
@@ -51,7 +51,7 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
clk = msm_clk_get(phy->pdev, cfg->clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- dev_err(dev, "failed to get phy clock: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to get phy clock: %s (%d)\n",
cfg->clk_names[i], ret);
return ret;
}
@@ -73,14 +73,14 @@ int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy)
for (i = 0; i < cfg->num_regs; i++) {
ret = regulator_enable(phy->regs[i]);
if (ret)
- dev_err(dev, "failed to enable regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to enable regulator: %s (%d)\n",
cfg->reg_names[i], ret);
}
for (i = 0; i < cfg->num_clks; i++) {
ret = clk_prepare_enable(phy->clks[i]);
if (ret)
- dev_err(dev, "failed to enable clock: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to enable clock: %s (%d)\n",
cfg->clk_names[i], ret);
}
@@ -159,7 +159,7 @@ static int msm_hdmi_phy_probe(struct platform_device *pdev)
phy->mmio = msm_ioremap(pdev, "hdmi_phy", "HDMI_PHY");
if (IS_ERR(phy->mmio)) {
- dev_err(dev, "%s: failed to map phy base\n", __func__);
+ DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
return -ENOMEM;
}
@@ -177,7 +177,7 @@ static int msm_hdmi_phy_probe(struct platform_device *pdev)
ret = msm_hdmi_phy_pll_init(pdev, phy->cfg->type);
if (ret) {
- dev_err(dev, "couldn't init PLL\n");
+ DRM_DEV_ERROR(dev, "couldn't init PLL\n");
msm_hdmi_phy_resource_disable(phy);
return ret;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index 0df504c61833..318708f26731 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -725,7 +725,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
if (IS_ERR(pll->mmio_qserdes_com)) {
- dev_err(dev, "failed to map pll base\n");
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
return -ENOMEM;
}
@@ -737,7 +737,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name, label);
if (IS_ERR(pll->mmio_qserdes_tx[i])) {
- dev_err(dev, "failed to map pll base\n");
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
return -ENOMEM;
}
}
@@ -745,7 +745,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
clk = devm_clk_register(dev, &pll->clk_hw);
if (IS_ERR(clk)) {
- dev_err(dev, "failed to register pll clock\n");
+ DRM_DEV_ERROR(dev, "failed to register pll clock\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
index 99590758c68b..c6dae6e437f9 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
@@ -445,7 +445,7 @@ int msm_hdmi_pll_8960_init(struct platform_device *pdev)
pll->mmio = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
if (IS_ERR(pll->mmio)) {
- dev_err(dev, "failed to map pll base\n");
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
return -ENOMEM;
}
@@ -454,7 +454,7 @@ int msm_hdmi_pll_8960_init(struct platform_device *pdev)
clk = devm_clk_register(dev, &pll->clk_hw);
if (IS_ERR(clk)) {
- dev_err(dev, "failed to register pll clock\n");
+ DRM_DEV_ERROR(dev, "failed to register pll clock\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 4bcdeca7479d..f5b1256e32b6 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -34,7 +34,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
if (!new_crtc_state->active)
continue;
+ if (drm_crtc_vblank_get(crtc))
+ continue;
+
kms->funcs->wait_for_crtc_commit_done(kms, crtc);
+
+ drm_crtc_vblank_put(crtc);
}
}
@@ -78,7 +83,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
kms->funcs->commit(kms, state);
}
- msm_atomic_wait_for_commit_done(dev, state);
+ if (!state->legacy_cursor_update)
+ msm_atomic_wait_for_commit_done(dev, state);
kms->funcs->complete_commit(kms, state);
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index f0da0d3c8a80..fb423d309e91 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -84,7 +84,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
- return ret;
+ goto free_priv;
pm_runtime_get_sync(&gpu->pdev->dev);
show_priv->state = gpu->funcs->gpu_state_get(gpu);
@@ -94,13 +94,20 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
if (IS_ERR(show_priv->state)) {
ret = PTR_ERR(show_priv->state);
- kfree(show_priv);
- return ret;
+ goto free_priv;
}
show_priv->dev = dev;
- return single_open(file, msm_gpu_show, show_priv);
+ ret = single_open(file, msm_gpu_show, show_priv);
+ if (ret)
+ goto free_priv;
+
+ return 0;
+
+free_priv:
+ kfree(show_priv);
+ return ret;
}
static const struct file_operations msm_gpu_fops = {
@@ -194,13 +201,13 @@ static int late_init_minor(struct drm_minor *minor)
ret = msm_rd_debugfs_init(minor);
if (ret) {
- dev_err(minor->dev->dev, "could not install rd debugfs\n");
+ DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n");
return ret;
}
ret = msm_perf_debugfs_init(minor);
if (ret) {
- dev_err(minor->dev->dev, "could not install perf debugfs\n");
+ DRM_DEV_ERROR(minor->dev->dev, "could not install perf debugfs\n");
return ret;
}
@@ -228,14 +235,14 @@ int msm_debugfs_init(struct drm_minor *minor)
minor->debugfs_root, minor);
if (ret) {
- dev_err(dev->dev, "could not install msm_debugfs_list\n");
+ DRM_DEV_ERROR(dev->dev, "could not install msm_debugfs_list\n");
return ret;
}
debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
dev, &msm_gpu_fops);
- if (priv->kms->funcs->debugfs_init) {
+ if (priv->kms && priv->kms->funcs->debugfs_init) {
ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 5e758d95751a..d2cdc7b553fe 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -23,8 +23,10 @@
#include "msm_drv.h"
#include "msm_debugfs.h"
#include "msm_fence.h"
+#include "msm_gem.h"
#include "msm_gpu.h"
#include "msm_kms.h"
+#include "adreno/adreno_gpu.h"
/*
@@ -35,9 +37,11 @@
* - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
* SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
* MSM_GEM_INFO ioctl.
+ * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
+ * GEM object's debug name
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 3
+#define MSM_VERSION_MINOR 4
#define MSM_VERSION_PATCHLEVEL 0
static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -170,7 +174,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
return ERR_PTR(-EINVAL);
}
@@ -178,7 +182,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
if (!ptr) {
- dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
return ERR_PTR(-ENOMEM);
}
@@ -358,6 +362,14 @@ static int get_mdp_ver(struct platform_device *pdev)
#include <linux/of_address.h>
+bool msm_use_mmu(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+
+ /* a2xx comes with its own MMU */
+ return priv->is_a2xx || iommu_present(&platform_bus_type);
+}
+
static int msm_init_vram(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -396,7 +408,7 @@ static int msm_init_vram(struct drm_device *dev)
* Grab the entire CMA chunk carved out in early startup in
* mach-msm:
*/
- } else if (!iommu_present(&platform_bus_type)) {
+ } else if (!msm_use_mmu(dev)) {
DRM_INFO("using %s VRAM carveout\n", vram);
size = memparse(vram, NULL);
}
@@ -419,12 +431,12 @@ static int msm_init_vram(struct drm_device *dev)
p = dma_alloc_attrs(dev->dev, size,
&priv->vram.paddr, GFP_KERNEL, attrs);
if (!p) {
- dev_err(dev->dev, "failed to allocate VRAM\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
priv->vram.paddr = 0;
return -ENOMEM;
}
- dev_info(dev->dev, "VRAM: %08x->%08x\n",
+ DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
(uint32_t)priv->vram.paddr,
(uint32_t)(priv->vram.paddr + size));
}
@@ -444,7 +456,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
ddev = drm_dev_alloc(drv, dev);
if (IS_ERR(ddev)) {
- dev_err(dev, "failed to allocate drm_device\n");
+ DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
return PTR_ERR(ddev);
}
@@ -508,19 +520,16 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv->kms = kms;
break;
default:
- kms = ERR_PTR(-ENODEV);
+ /* valid only for the dummy headless case, where of_node=NULL */
+ WARN_ON(dev->of_node);
+ kms = NULL;
break;
}
if (IS_ERR(kms)) {
- /*
- * NOTE: once we have GPU support, having no kms should not
- * be considered fatal.. ideally we would still support gpu
- * and (for example) use dmabuf/prime to share buffers with
- * imx drm driver on iMX5
- */
- dev_err(dev, "failed to load kms\n");
+ DRM_DEV_ERROR(dev, "failed to load kms\n");
ret = PTR_ERR(kms);
+ priv->kms = NULL;
goto err_msm_uninit;
}
@@ -530,7 +539,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
if (kms) {
ret = kms->funcs->hw_init(kms);
if (ret) {
- dev_err(dev, "kms hw init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
goto err_msm_uninit;
}
}
@@ -554,17 +563,18 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
kthread_run(kthread_worker_fn,
&priv->disp_thread[i].worker,
"crtc_commit:%d", priv->disp_thread[i].crtc_id);
- ret = sched_setscheduler(priv->disp_thread[i].thread,
- SCHED_FIFO, &param);
- if (ret)
- pr_warn("display thread priority update failed: %d\n",
- ret);
-
if (IS_ERR(priv->disp_thread[i].thread)) {
- dev_err(dev, "failed to create crtc_commit kthread\n");
+ DRM_DEV_ERROR(dev, "failed to create crtc_commit kthread\n");
priv->disp_thread[i].thread = NULL;
+ goto err_msm_uninit;
}
+ ret = sched_setscheduler(priv->disp_thread[i].thread,
+ SCHED_FIFO, &param);
+ if (ret)
+ dev_warn(dev, "disp_thread set priority failed: %d\n",
+ ret);
+
/* initialize event thread */
priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
kthread_init_worker(&priv->event_thread[i].worker);
@@ -573,6 +583,12 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
kthread_run(kthread_worker_fn,
&priv->event_thread[i].worker,
"crtc_event:%d", priv->event_thread[i].crtc_id);
+ if (IS_ERR(priv->event_thread[i].thread)) {
+ DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
+ priv->event_thread[i].thread = NULL;
+ goto err_msm_uninit;
+ }
+
/**
* event thread should also run at same priority as disp_thread
* because it is handling frame_done events. A lower priority
@@ -581,39 +597,15 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
* failure at crtc commit level.
*/
ret = sched_setscheduler(priv->event_thread[i].thread,
- SCHED_FIFO, &param);
+ SCHED_FIFO, &param);
if (ret)
- pr_warn("display event thread priority update failed: %d\n",
- ret);
-
- if (IS_ERR(priv->event_thread[i].thread)) {
- dev_err(dev, "failed to create crtc_event kthread\n");
- priv->event_thread[i].thread = NULL;
- }
-
- if ((!priv->disp_thread[i].thread) ||
- !priv->event_thread[i].thread) {
- /* clean up previously created threads if any */
- for ( ; i >= 0; i--) {
- if (priv->disp_thread[i].thread) {
- kthread_stop(
- priv->disp_thread[i].thread);
- priv->disp_thread[i].thread = NULL;
- }
-
- if (priv->event_thread[i].thread) {
- kthread_stop(
- priv->event_thread[i].thread);
- priv->event_thread[i].thread = NULL;
- }
- }
- goto err_msm_uninit;
- }
+ dev_warn(dev, "event_thread set priority failed:%d\n",
+ ret);
}
ret = drm_vblank_init(ddev, priv->num_crtcs);
if (ret < 0) {
- dev_err(dev, "failed to initialize vblank\n");
+ DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
goto err_msm_uninit;
}
@@ -622,7 +614,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
ret = drm_irq_install(ddev, kms->irq);
pm_runtime_put_sync(dev);
if (ret < 0) {
- dev_err(dev, "failed to install IRQ handler\n");
+ DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
goto err_msm_uninit;
}
}
@@ -634,7 +626,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
drm_mode_config_reset(ddev);
#ifdef CONFIG_DRM_FBDEV_EMULATION
- if (fbdev)
+ if (kms && fbdev)
priv->fbdev = msm_fbdev_init(ddev);
#endif
@@ -742,7 +734,11 @@ static int msm_irq_postinstall(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
BUG_ON(!kms);
- return kms->funcs->irq_postinstall(kms);
+
+ if (kms->funcs->irq_postinstall)
+ return kms->funcs->irq_postinstall(kms);
+
+ return 0;
}
static void msm_irq_uninstall(struct drm_device *dev)
@@ -809,7 +805,7 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
}
return msm_gem_new_handle(dev, file, args->size,
- args->flags, &args->handle);
+ args->flags, &args->handle, NULL);
}
static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
@@ -867,6 +863,10 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
if (!priv->gpu)
return -EINVAL;
+ /*
+ * Don't pin the memory here - just get an address so that userspace can
+ * be productive
+ */
return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
}
@@ -875,23 +875,66 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
{
struct drm_msm_gem_info *args = data;
struct drm_gem_object *obj;
- int ret = 0;
+ struct msm_gem_object *msm_obj;
+ int i, ret = 0;
+
+ if (args->pad)
+ return -EINVAL;
- if (args->flags & ~MSM_INFO_FLAGS)
+ switch (args->info) {
+ case MSM_INFO_GET_OFFSET:
+ case MSM_INFO_GET_IOVA:
+ /* value returned as immediate, not pointer, so len==0: */
+ if (args->len)
+ return -EINVAL;
+ break;
+ case MSM_INFO_SET_NAME:
+ case MSM_INFO_GET_NAME:
+ break;
+ default:
return -EINVAL;
+ }
obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
- if (args->flags & MSM_INFO_IOVA) {
- uint64_t iova;
+ msm_obj = to_msm_bo(obj);
- ret = msm_ioctl_gem_info_iova(dev, obj, &iova);
- if (!ret)
- args->offset = iova;
- } else {
- args->offset = msm_gem_mmap_offset(obj);
+ switch (args->info) {
+ case MSM_INFO_GET_OFFSET:
+ args->value = msm_gem_mmap_offset(obj);
+ break;
+ case MSM_INFO_GET_IOVA:
+ ret = msm_ioctl_gem_info_iova(dev, obj, &args->value);
+ break;
+ case MSM_INFO_SET_NAME:
+ /* length check should leave room for terminating null: */
+ if (args->len >= sizeof(msm_obj->name)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = copy_from_user(msm_obj->name,
+ u64_to_user_ptr(args->value), args->len);
+ msm_obj->name[args->len] = '\0';
+ for (i = 0; i < args->len; i++) {
+ if (!isprint(msm_obj->name[i])) {
+ msm_obj->name[i] = '\0';
+ break;
+ }
+ }
+ break;
+ case MSM_INFO_GET_NAME:
+ if (args->value && (args->len < strlen(msm_obj->name))) {
+ ret = -EINVAL;
+ break;
+ }
+ args->len = strlen(msm_obj->name);
+ if (args->value) {
+ ret = copy_to_user(u64_to_user_ptr(args->value),
+ msm_obj->name, args->len);
+ }
+ break;
}
drm_gem_object_put_unlocked(obj);
@@ -1070,18 +1113,15 @@ static int msm_pm_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
- struct msm_kms *kms = priv->kms;
- /* TODO: Use atomic helper suspend/resume */
- if (kms && kms->funcs && kms->funcs->pm_suspend)
- return kms->funcs->pm_suspend(dev);
-
- drm_kms_helper_poll_disable(ddev);
+ if (WARN_ON(priv->pm_state))
+ drm_atomic_state_put(priv->pm_state);
priv->pm_state = drm_atomic_helper_suspend(ddev);
if (IS_ERR(priv->pm_state)) {
- drm_kms_helper_poll_enable(ddev);
- return PTR_ERR(priv->pm_state);
+ int ret = PTR_ERR(priv->pm_state);
+ DRM_ERROR("Failed to suspend dpu, %d\n", ret);
+ return ret;
}
return 0;
@@ -1091,16 +1131,16 @@ static int msm_pm_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
- struct msm_kms *kms = priv->kms;
+ int ret;
- /* TODO: Use atomic helper suspend/resume */
- if (kms && kms->funcs && kms->funcs->pm_resume)
- return kms->funcs->pm_resume(dev);
+ if (WARN_ON(!priv->pm_state))
+ return -ENOENT;
- drm_atomic_helper_resume(ddev, priv->pm_state);
- drm_kms_helper_poll_enable(ddev);
+ ret = drm_atomic_helper_resume(ddev, priv->pm_state);
+ if (!ret)
+ priv->pm_state = NULL;
- return 0;
+ return ret;
}
#endif
@@ -1185,7 +1225,7 @@ static int add_components_mdp(struct device *mdp_dev,
ret = of_graph_parse_endpoint(ep_node, &ep);
if (ret) {
- dev_err(mdp_dev, "unable to parse port endpoint\n");
+ DRM_DEV_ERROR(mdp_dev, "unable to parse port endpoint\n");
of_node_put(ep_node);
return ret;
}
@@ -1207,8 +1247,10 @@ static int add_components_mdp(struct device *mdp_dev,
if (!intf)
continue;
- drm_of_component_match_add(master_dev, matchptr, compare_of,
- intf);
+ if (of_device_is_available(intf))
+ drm_of_component_match_add(master_dev, matchptr,
+ compare_of, intf);
+
of_node_put(intf);
}
@@ -1236,13 +1278,13 @@ static int add_display_components(struct device *dev,
of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
- dev_err(dev, "failed to populate children devices\n");
+ DRM_DEV_ERROR(dev, "failed to populate children devices\n");
return ret;
}
mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
if (!mdp_dev) {
- dev_err(dev, "failed to find MDSS MDP node\n");
+ DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n");
of_platform_depopulate(dev);
return -ENODEV;
}
@@ -1272,6 +1314,7 @@ static int add_display_components(struct device *dev,
static const struct of_device_id msm_gpu_match[] = {
{ .compatible = "qcom,adreno" },
{ .compatible = "qcom,adreno-3xx" },
+ { .compatible = "amd,imageon" },
{ .compatible = "qcom,kgsl-3d0" },
{ },
};
@@ -1316,9 +1359,11 @@ static int msm_pdev_probe(struct platform_device *pdev)
struct component_match *match = NULL;
int ret;
- ret = add_display_components(&pdev->dev, &match);
- if (ret)
- return ret;
+ if (get_mdp_ver(pdev)) {
+ ret = add_display_components(&pdev->dev, &match);
+ if (ret)
+ return ret;
+ }
ret = add_gpu_components(&pdev->dev, &match);
if (ret)
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9d11f321f5a9..9cd6a96c6bf2 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -179,6 +179,8 @@ struct msm_drm_private {
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
struct msm_file_private *lastctx;
+ /* gpu is only set on open(), but we need this info earlier */
+ bool is_a2xx;
struct drm_fb_helper *fbdev;
@@ -241,10 +243,16 @@ struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
void msm_atomic_state_clear(struct drm_atomic_state *state);
void msm_atomic_state_free(struct drm_atomic_state *state);
+int msm_gem_init_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, int npages);
+void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma);
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt);
+ struct msm_gem_vma *vma);
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
+void msm_gem_close_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma);
void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
@@ -252,9 +260,15 @@ struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
+struct msm_gem_address_space *
+msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
+ const char *name, uint64_t va_start, uint64_t va_end);
+
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+bool msm_use_mmu(struct drm_device *dev);
+
void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -269,12 +283,14 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova);
uint64_t msm_gem_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -301,7 +317,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
void msm_gem_free_object(struct drm_gem_object *obj);
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- uint32_t size, uint32_t flags, uint32_t *handle);
+ uint32_t size, uint32_t flags, uint32_t *handle, char *name);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
@@ -312,9 +328,13 @@ void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova);
+void msm_gem_kernel_put(struct drm_gem_object *bo,
+ struct msm_gem_address_space *aspace, bool locked);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
+
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace);
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 2a7348aeb38d..67dfd8d3dc12 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -66,7 +66,7 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
uint64_t iova;
for (i = 0; i < n; i++) {
- ret = msm_gem_get_iova(fb->obj[i], aspace, &iova);
+ ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &iova);
DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
@@ -81,7 +81,7 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
int i, n = fb->format->num_planes;
for (i = 0; i < n; i++)
- msm_gem_put_iova(fb->obj[i], aspace);
+ msm_gem_unpin_iova(fb->obj[i], aspace);
}
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
@@ -154,7 +154,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
mode_cmd->modifier[0]);
if (!format) {
- dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
+ DRM_DEV_ERROR(dev->dev, "unsupported pixel format: %4.4s\n",
(char *)&mode_cmd->pixel_format);
ret = -EINVAL;
goto fail;
@@ -196,7 +196,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
if (ret) {
- dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "framebuffer init failed: %d\n", ret);
goto fail;
}
@@ -233,13 +233,15 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format
bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
}
if (IS_ERR(bo)) {
- dev_err(dev->dev, "failed to allocate buffer object\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate buffer object\n");
return ERR_CAST(bo);
}
+ msm_gem_object_set_name(bo, "stolenfb");
+
fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
if (IS_ERR(fb)) {
- dev_err(dev->dev, "failed to allocate fb\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 456622b46335..c03e860ba737 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -91,7 +91,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
sizes->surface_height, pitch, format);
if (IS_ERR(fb)) {
- dev_err(dev->dev, "failed to allocate fb\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
return PTR_ERR(fb);
}
@@ -104,15 +104,15 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
* in panic (ie. lock-safe, etc) we could avoid pinning the
* buffer now:
*/
- ret = msm_gem_get_iova(bo, priv->kms->aspace, &paddr);
+ ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
if (ret) {
- dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
goto fail_unlock;
}
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
- dev_err(dev->dev, "failed to allocate fb info\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi);
goto fail_unlock;
}
@@ -176,7 +176,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
if (ret) {
- dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "could not init fbdev: ret=%d\n", ret);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f59ca27a4a35..20c979a7fc9c 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -88,7 +88,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
p = get_pages_vram(obj, npages);
if (IS_ERR(p)) {
- dev_err(dev->dev, "could not get pages: %ld\n",
+ DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
PTR_ERR(p));
return p;
}
@@ -99,7 +99,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
if (IS_ERR(msm_obj->sgt)) {
void *ptr = ERR_CAST(msm_obj->sgt);
- dev_err(dev->dev, "failed to allocate sgt\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
msm_obj->sgt = NULL;
return ptr;
}
@@ -280,7 +280,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
ret = drm_gem_create_mmap_offset(obj);
if (ret) {
- dev_err(dev->dev, "could not allocate mmap offset\n");
+ DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
return 0;
}
@@ -352,63 +352,104 @@ put_iova(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&msm_obj->lock));
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
- msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
+ msm_gem_purge_vma(vma->aspace, vma);
+ msm_gem_close_vma(vma->aspace, vma);
del_vma(vma);
}
}
-/* get iova, taking a reference. Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj,
+static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
int ret = 0;
- mutex_lock(&msm_obj->lock);
-
- if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
- mutex_unlock(&msm_obj->lock);
- return -EBUSY;
- }
+ WARN_ON(!mutex_is_locked(&msm_obj->lock));
vma = lookup_vma(obj, aspace);
if (!vma) {
- struct page **pages;
-
vma = add_vma(obj, aspace);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto unlock;
- }
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
- pages = get_pages(obj);
- if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto fail;
+ ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
+ if (ret) {
+ del_vma(vma);
+ return ret;
}
-
- ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
- obj->size >> PAGE_SHIFT);
- if (ret)
- goto fail;
}
*iova = vma->iova;
+ return 0;
+}
+
+static int msm_gem_pin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *vma;
+ struct page **pages;
+
+ WARN_ON(!mutex_is_locked(&msm_obj->lock));
+
+ if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
+ return -EBUSY;
+
+ vma = lookup_vma(obj, aspace);
+ if (WARN_ON(!vma))
+ return -EINVAL;
+
+ pages = get_pages(obj);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
+ obj->size >> PAGE_SHIFT);
+}
+
+/* get iova and pin it. Should have a matching put */
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ u64 local;
+ int ret;
+
+ mutex_lock(&msm_obj->lock);
+
+ ret = msm_gem_get_iova_locked(obj, aspace, &local);
+
+ if (!ret)
+ ret = msm_gem_pin_iova(obj, aspace);
+
+ if (!ret)
+ *iova = local;
mutex_unlock(&msm_obj->lock);
- return 0;
+ return ret;
+}
-fail:
- del_vma(vma);
-unlock:
+/*
+ * Get an iova but don't pin it. Doesn't need a put because iovas are currently
+ * valid for the life of the object
+ */
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int ret;
+
+ mutex_lock(&msm_obj->lock);
+ ret = msm_gem_get_iova_locked(obj, aspace, iova);
mutex_unlock(&msm_obj->lock);
+
return ret;
}
/* get iova without taking a reference, used in places where you have
- * already done a 'msm_gem_get_iova()'.
+ * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
*/
uint64_t msm_gem_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
@@ -424,15 +465,24 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj,
return vma ? vma->iova : 0;
}
-void msm_gem_put_iova(struct drm_gem_object *obj,
+/*
+ * Unpin a iova by updating the reference counts. The memory isn't actually
+ * purged until something else (shrinker, mm_notifier, destroy, etc) decides
+ * to get rid of it
+ */
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
- // XXX TODO ..
- // NOTE: probably don't need a _locked() version.. we wouldn't
- // normally unmap here, but instead just mark that it could be
- // unmapped (if the iova refcnt drops to zero), but then later
- // if another _get_iova_locked() fails we can start unmapping
- // things that are no longer needed..
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *vma;
+
+ mutex_lock(&msm_obj->lock);
+ vma = lookup_vma(obj, aspace);
+
+ if (!WARN_ON(!vma))
+ msm_gem_unmap_vma(aspace, vma);
+
+ mutex_unlock(&msm_obj->lock);
}
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
@@ -441,7 +491,7 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
args->pitch = align_pitch(args->width, args->bpp);
args->size = PAGE_ALIGN(args->pitch * args->height);
return msm_gem_new_handle(dev, file, args->size,
- MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
+ MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
}
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -473,7 +523,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
mutex_lock(&msm_obj->lock);
if (WARN_ON(msm_obj->madv > madv)) {
- dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
+ DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
msm_obj->madv, madv);
mutex_unlock(&msm_obj->lock);
return ERR_PTR(-EBUSY);
@@ -708,7 +758,7 @@ static void describe_fence(struct dma_fence *fence, const char *type,
struct seq_file *m)
{
if (!dma_fence_is_signaled(fence))
- seq_printf(m, "\t%9s: %s %s seq %u\n", type,
+ seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
fence->seqno);
@@ -739,16 +789,24 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
break;
}
- seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
+ seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
obj->name, kref_read(&obj->refcount),
off, msm_obj->vaddr);
- /* FIXME: we need to print the address space here too */
- list_for_each_entry(vma, &msm_obj->vmas, list)
- seq_printf(m, " %08llx", vma->iova);
+ seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
+
+ if (!list_empty(&msm_obj->vmas)) {
+
+ seq_puts(m, " vmas:");
- seq_printf(m, " %zu%s\n", obj->size, madv);
+ list_for_each_entry(vma, &msm_obj->vmas, list)
+ seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
+ vma->iova, vma->mapped ? "mapped" : "unmapped",
+ vma->inuse);
+
+ seq_puts(m, "\n");
+ }
rcu_read_lock();
fobj = rcu_dereference(robj->fence);
@@ -775,9 +833,10 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
int count = 0;
size_t size = 0;
+ seq_puts(m, " flags id ref offset kaddr size madv name\n");
list_for_each_entry(msm_obj, list, mm_list) {
struct drm_gem_object *obj = &msm_obj->base;
- seq_printf(m, " ");
+ seq_puts(m, " ");
msm_gem_describe(obj, m);
count++;
size += obj->size;
@@ -831,7 +890,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- uint32_t size, uint32_t flags, uint32_t *handle)
+ uint32_t size, uint32_t flags, uint32_t *handle,
+ char *name)
{
struct drm_gem_object *obj;
int ret;
@@ -841,6 +901,9 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
if (IS_ERR(obj))
return PTR_ERR(obj);
+ if (name)
+ msm_gem_object_set_name(obj, "%s", name);
+
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
@@ -864,7 +927,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
case MSM_BO_WC:
break;
default:
- dev_err(dev->dev, "invalid cache flag: %x\n",
+ DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
(flags & MSM_BO_CACHE_MASK));
return -EINVAL;
}
@@ -912,9 +975,9 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
size = PAGE_ALIGN(size);
- if (!iommu_present(&platform_bus_type))
+ if (!msm_use_mmu(dev))
use_vram = true;
- else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
+ else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
use_vram = true;
if (WARN_ON(use_vram && !priv->vram.size))
@@ -989,8 +1052,8 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
int ret, npages;
/* if we don't have IOMMU, don't bother pretending we can import: */
- if (!iommu_present(&platform_bus_type)) {
- dev_err(dev->dev, "cannot import without IOMMU\n");
+ if (!msm_use_mmu(dev)) {
+ DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
return ERR_PTR(-EINVAL);
}
@@ -1040,24 +1103,30 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
return ERR_CAST(obj);
if (iova) {
- ret = msm_gem_get_iova(obj, aspace, iova);
- if (ret) {
- drm_gem_object_put(obj);
- return ERR_PTR(ret);
- }
+ ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
+ if (ret)
+ goto err;
}
vaddr = msm_gem_get_vaddr(obj);
if (IS_ERR(vaddr)) {
- msm_gem_put_iova(obj, aspace);
- drm_gem_object_put(obj);
- return ERR_CAST(vaddr);
+ msm_gem_unpin_iova(obj, aspace);
+ ret = PTR_ERR(vaddr);
+ goto err;
}
if (bo)
*bo = obj;
return vaddr;
+err:
+ if (locked)
+ drm_gem_object_put(obj);
+ else
+ drm_gem_object_put_unlocked(obj);
+
+ return ERR_PTR(ret);
+
}
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
@@ -1073,3 +1142,31 @@ void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
{
return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
}
+
+void msm_gem_kernel_put(struct drm_gem_object *bo,
+ struct msm_gem_address_space *aspace, bool locked)
+{
+ if (IS_ERR_OR_NULL(bo))
+ return;
+
+ msm_gem_put_vaddr(bo);
+ msm_gem_unpin_iova(bo, aspace);
+
+ if (locked)
+ drm_gem_object_put(bo);
+ else
+ drm_gem_object_put_unlocked(bo);
+}
+
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(bo);
+ va_list ap;
+
+ if (!fmt)
+ return;
+
+ va_start(ap, fmt);
+ vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
+ va_end(ap);
+}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index c5d9bd3e47a8..2064fac871b8 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -41,6 +41,8 @@ struct msm_gem_vma {
uint64_t iova;
struct msm_gem_address_space *aspace;
struct list_head list; /* node in msm_gem_object::vmas */
+ bool mapped;
+ int inuse;
};
struct msm_gem_object {
@@ -91,6 +93,8 @@ struct msm_gem_object {
*/
struct drm_mm_node *vram_node;
struct mutex lock; /* Protects resources associated with bo */
+
+ char name[32]; /* Identifier to print for the debugfs files */
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
@@ -150,6 +154,7 @@ struct msm_gem_submit {
struct msm_ringbuffer *ring;
unsigned int nr_cmds;
unsigned int nr_bos;
+ u32 ident; /* A "identifier" for the submit for logging */
struct {
uint32_t type;
uint32_t size; /* in dwords */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index a90aedd6883a..12b983fc0b56 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -20,6 +20,7 @@
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_gem.h"
+#include "msm_gpu_trace.h"
/*
* Cmdstream submission:
@@ -48,7 +49,6 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->dev = dev;
submit->gpu = gpu;
submit->fence = NULL;
- submit->pid = get_pid(task_pid(current));
submit->cmd = (void *)&submit->bos[nr_bos];
submit->queue = queue;
submit->ring = gpu->rb[queue->prio];
@@ -77,7 +77,7 @@ void msm_gem_submit_free(struct msm_gem_submit *submit)
static inline unsigned long __must_check
copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
- if (access_ok(VERIFY_READ, from, n))
+ if (access_ok(from, n))
return __copy_from_user_inatomic(to, from, n);
return -EFAULT;
}
@@ -114,8 +114,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
pagefault_disable();
}
+/* at least one of READ and/or WRITE flags should be set: */
+#define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
+
if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
- !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
+ !(submit_bo.flags & MANDATORY_FLAGS)) {
DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
goto out_unlock;
@@ -167,7 +170,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
struct msm_gem_object *msm_obj = submit->bos[i].obj;
if (submit->bos[i].flags & BO_PINNED)
- msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
+ msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace);
if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock);
@@ -270,7 +273,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
uint64_t iova;
/* if locking succeeded, pin bo: */
- ret = msm_gem_get_iova(&msm_obj->base,
+ ret = msm_gem_get_and_pin_iova(&msm_obj->base,
submit->gpu->aspace, &iova);
if (ret)
@@ -318,6 +321,9 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
uint32_t *ptr;
int ret = 0;
+ if (!nr_relocs)
+ return 0;
+
if (offset % 4) {
DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
return -EINVAL;
@@ -406,19 +412,19 @@ static void submit_cleanup(struct msm_gem_submit *submit)
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ static atomic_t ident = ATOMIC_INIT(0);
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_gem_submit *args = data;
struct msm_file_private *ctx = file->driver_priv;
struct msm_gem_submit *submit;
struct msm_gpu *gpu = priv->gpu;
- struct dma_fence *in_fence = NULL;
struct sync_file *sync_file = NULL;
struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
int out_fence_fd = -1;
+ struct pid *pid = get_pid(task_pid(current));
unsigned i;
- int ret;
-
+ int ret, submitid;
if (!gpu)
return -ENXIO;
@@ -441,9 +447,16 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (!queue)
return -ENOENT;
+ /* Get a unique identifier for the submission for logging purposes */
+ submitid = atomic_inc_return(&ident) - 1;
+
ring = gpu->rb[queue->prio];
+ trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
+ args->nr_bos, args->nr_cmds);
if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
+ struct dma_fence *in_fence;
+
in_fence = sync_file_get_fence(args->fence_fd);
if (!in_fence)
@@ -453,11 +466,13 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
* Wait if the fence is from a foreign context, or if the fence
* array contains any fence from a foreign context.
*/
- if (!dma_fence_match_context(in_fence, ring->fctx->context)) {
+ ret = 0;
+ if (!dma_fence_match_context(in_fence, ring->fctx->context))
ret = dma_fence_wait(in_fence, true);
- if (ret)
- return ret;
- }
+
+ dma_fence_put(in_fence);
+ if (ret)
+ return ret;
}
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -478,6 +493,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out_unlock;
}
+ submit->pid = pid;
+ submit->ident = submitid;
+
if (args->flags & MSM_SUBMIT_SUDO)
submit->in_rb = true;
@@ -583,8 +601,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
out:
- if (in_fence)
- dma_fence_put(in_fence);
submit_cleanup(submit);
if (ret)
msm_gem_submit_free(submit);
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index ffbec224551b..557360788084 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -38,20 +38,72 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
kref_put(&aspace->kref, msm_gem_address_space_destroy);
}
-void
-msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt)
+/* Actually unmap memory for the vma */
+void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
{
- if (!aspace || !vma->iova)
+ unsigned size = vma->node.size << PAGE_SHIFT;
+
+ /* Print a message if we try to purge a vma in use */
+ if (WARN_ON(vma->inuse > 0))
+ return;
+
+ /* Don't do anything if the memory isn't mapped */
+ if (!vma->mapped)
return;
- if (aspace->mmu) {
- unsigned size = vma->node.size << PAGE_SHIFT;
- aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
- }
+ if (aspace->mmu)
+ aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
+
+ vma->mapped = false;
+}
+
+/* Remove reference counts for the mapping */
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
+{
+ if (!WARN_ON(!vma->iova))
+ vma->inuse--;
+}
+
+int
+msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
+{
+ unsigned size = npages << PAGE_SHIFT;
+ int ret = 0;
+
+ if (WARN_ON(!vma->iova))
+ return -EINVAL;
+
+ /* Increase the usage counter */
+ vma->inuse++;
+
+ if (vma->mapped)
+ return 0;
+
+ vma->mapped = true;
+
+ if (aspace->mmu)
+ ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
+ size, IOMMU_READ | IOMMU_WRITE);
+
+ if (ret)
+ vma->mapped = false;
+
+ return ret;
+}
+
+/* Close an iova. Warn if it is still in use */
+void msm_gem_close_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
+{
+ if (WARN_ON(vma->inuse > 0 || vma->mapped))
+ return;
spin_lock(&aspace->lock);
- drm_mm_remove_node(&vma->node);
+ if (vma->iova)
+ drm_mm_remove_node(&vma->node);
spin_unlock(&aspace->lock);
vma->iova = 0;
@@ -59,18 +111,16 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
msm_gem_address_space_put(aspace);
}
-int
-msm_gem_map_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
+/* Initialize a new vma and allocate an iova for it */
+int msm_gem_init_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, int npages)
{
int ret;
- spin_lock(&aspace->lock);
- if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
- spin_unlock(&aspace->lock);
- return 0;
- }
+ if (WARN_ON(vma->iova))
+ return -EBUSY;
+ spin_lock(&aspace->lock);
ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
spin_unlock(&aspace->lock);
@@ -78,19 +128,14 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
return ret;
vma->iova = vma->node.start << PAGE_SHIFT;
+ vma->mapped = false;
- if (aspace->mmu) {
- unsigned size = npages << PAGE_SHIFT;
- ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
- size, IOMMU_READ | IOMMU_WRITE);
- }
-
- /* Get a reference to the aspace to keep it around */
kref_get(&aspace->kref);
- return ret;
+ return 0;
}
+
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name)
@@ -114,3 +159,26 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
return aspace;
}
+
+struct msm_gem_address_space *
+msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
+ const char *name, uint64_t va_start, uint64_t va_end)
+{
+ struct msm_gem_address_space *aspace;
+ u64 size = va_end - va_start;
+
+ aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
+ if (!aspace)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&aspace->lock);
+ aspace->name = name;
+ aspace->mmu = msm_gpummu_new(dev, gpu);
+
+ drm_mm_init(&aspace->mm, (va_start >> PAGE_SHIFT),
+ size >> PAGE_SHIFT);
+
+ kref_init(&aspace->kref);
+
+ return aspace;
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 11aac8337066..5f3eff304355 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -19,6 +19,8 @@
#include "msm_gem.h"
#include "msm_mmu.h"
#include "msm_fence.h"
+#include "msm_gpu_trace.h"
+#include "adreno/adreno_gpu.h"
#include <generated/utsrelease.h>
#include <linux/string_helpers.h>
@@ -107,7 +109,7 @@ static void msm_devfreq_init(struct msm_gpu *gpu)
&msm_devfreq_profile, "simple_ondemand", NULL);
if (IS_ERR(gpu->devfreq.devfreq)) {
- dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
+ DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
gpu->devfreq.devfreq = NULL;
}
@@ -122,7 +124,7 @@ static int enable_pwrrail(struct msm_gpu *gpu)
if (gpu->gpu_reg) {
ret = regulator_enable(gpu->gpu_reg);
if (ret) {
- dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
return ret;
}
}
@@ -130,7 +132,7 @@ static int enable_pwrrail(struct msm_gpu *gpu)
if (gpu->gpu_cx) {
ret = regulator_enable(gpu->gpu_cx);
if (ret) {
- dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
return ret;
}
}
@@ -315,28 +317,28 @@ static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
/* Don't record write only objects */
-
state_bo->size = obj->base.size;
state_bo->iova = iova;
- /* Only store the data for buffer objects marked for read */
- if ((flags & MSM_SUBMIT_BO_READ)) {
+ /* Only store data for non imported buffer objects marked for read */
+ if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
void *ptr;
state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
if (!state_bo->data)
- return;
+ goto out;
ptr = msm_gem_get_vaddr_active(&obj->base);
if (IS_ERR(ptr)) {
kvfree(state_bo->data);
- return;
+ state_bo->data = NULL;
+ goto out;
}
memcpy(state_bo->data, ptr, obj->base.size);
msm_gem_put_vaddr(&obj->base);
}
-
+out:
state->nr_bos++;
}
@@ -345,6 +347,10 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
{
struct msm_gpu_state *state;
+ /* Check if the target supports capturing crash state */
+ if (!gpu->funcs->gpu_state_get)
+ return;
+
/* Only save one crash state at a time */
if (gpu->crashstate)
return;
@@ -360,12 +366,15 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
if (submit) {
int i;
- state->bos = kcalloc(submit->nr_bos,
+ state->bos = kcalloc(submit->nr_cmds,
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
- for (i = 0; state->bos && i < submit->nr_bos; i++)
- msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
- submit->bos[i].iova, submit->bos[i].flags);
+ for (i = 0; state->bos && i < submit->nr_cmds; i++) {
+ int idx = submit->cmd[i].idx;
+
+ msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
+ submit->bos[idx].iova, submit->bos[idx].flags);
+ }
}
/* Set the active crash state to be dumped on failure */
@@ -428,16 +437,15 @@ static void recover_worker(struct work_struct *work)
mutex_lock(&dev->struct_mutex);
- dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
+ DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
if (submit) {
struct task_struct *task;
- rcu_read_lock();
- task = pid_task(submit->pid, PIDTYPE_PID);
+ task = get_pid_task(submit->pid, PIDTYPE_PID);
if (task) {
- comm = kstrdup(task->comm, GFP_ATOMIC);
+ comm = kstrdup(task->comm, GFP_KERNEL);
/*
* So slightly annoying, in other paths like
@@ -450,13 +458,13 @@ static void recover_worker(struct work_struct *work)
* about the submit going away.
*/
mutex_unlock(&dev->struct_mutex);
- cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC);
+ cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+ put_task_struct(task);
mutex_lock(&dev->struct_mutex);
}
- rcu_read_unlock();
if (comm && cmd) {
- dev_err(dev->dev, "%s: offending task: %s (%s)\n",
+ DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
gpu->name, comm, cmd);
msm_rd_dump_submit(priv->hangrd, submit,
@@ -539,11 +547,11 @@ static void hangcheck_handler(struct timer_list *t)
} else if (fence < ring->seqno) {
/* no progress and not done.. hung! */
ring->hangcheck_fence = fence;
- dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
+ DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
gpu->name, ring->id);
- dev_err(dev->dev, "%s: completed fence: %u\n",
+ DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
- dev_err(dev->dev, "%s: submitted fence: %u\n",
+ DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
gpu->name, ring->seqno);
queue_work(priv->wq, &gpu->recover_work);
@@ -659,15 +667,33 @@ out:
* Cmdstream submission/retirement:
*/
-static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ struct msm_gem_submit *submit)
{
+ int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
+ volatile struct msm_gpu_submit_stats *stats;
+ u64 elapsed, clock = 0;
int i;
+ stats = &ring->memptrs->stats[index];
+ /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
+ elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
+ do_div(elapsed, 192);
+
+ /* Calculate the clock frequency from the number of CP cycles */
+ if (elapsed) {
+ clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
+ do_div(clock, elapsed);
+ }
+
+ trace_msm_gpu_submit_retired(submit, elapsed, clock,
+ stats->alwayson_start, stats->alwayson_end);
+
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
/* move to inactive: */
msm_gem_move_to_inactive(&msm_obj->base);
- msm_gem_put_iova(&msm_obj->base, gpu->aspace);
+ msm_gem_unpin_iova(&msm_obj->base, gpu->aspace);
drm_gem_object_put(&msm_obj->base);
}
@@ -690,7 +716,7 @@ static void retire_submits(struct msm_gpu *gpu)
list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
if (dma_fence_is_signaled(submit->fence))
- retire_submit(gpu, submit);
+ retire_submit(gpu, ring, submit);
}
}
}
@@ -751,7 +777,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* submit takes a reference to the bo and iova until retired: */
drm_gem_object_get(&msm_obj->base);
- msm_gem_get_iova(&msm_obj->base,
+ msm_gem_get_and_pin_iova(&msm_obj->base,
submit->gpu->aspace, &iova);
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
@@ -800,7 +826,6 @@ static struct msm_gem_address_space *
msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
uint64_t va_start, uint64_t va_end)
{
- struct iommu_domain *iommu;
struct msm_gem_address_space *aspace;
int ret;
@@ -809,20 +834,27 @@ msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
* and have separate page tables per context. For now, to keep things
* simple and to get something working, just use a single address space:
*/
- iommu = iommu_domain_alloc(&platform_bus_type);
- if (!iommu)
- return NULL;
-
- iommu->geometry.aperture_start = va_start;
- iommu->geometry.aperture_end = va_end;
-
- dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
+ if (!adreno_is_a2xx(to_adreno_gpu(gpu))) {
+ struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu)
+ return NULL;
+
+ iommu->geometry.aperture_start = va_start;
+ iommu->geometry.aperture_end = va_end;
+
+ DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
+
+ aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
+ if (IS_ERR(aspace))
+ iommu_domain_free(iommu);
+ } else {
+ aspace = msm_gem_address_space_create_a2xx(&pdev->dev, gpu, "gpu",
+ va_start, va_end);
+ }
- aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
if (IS_ERR(aspace)) {
- dev_err(gpu->dev->dev, "failed to init iommu: %ld\n",
+ DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n",
PTR_ERR(aspace));
- iommu_domain_free(iommu);
return ERR_CAST(aspace);
}
@@ -871,14 +903,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->irq = platform_get_irq_byname(pdev, config->irqname);
if (gpu->irq < 0) {
ret = gpu->irq;
- dev_err(drm->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
goto fail;
}
ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
IRQF_TRIGGER_HIGH, gpu->name, gpu);
if (ret) {
- dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
+ DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
goto fail;
}
@@ -911,22 +943,25 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
config->va_start, config->va_end);
if (gpu->aspace == NULL)
- dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
+ DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
else if (IS_ERR(gpu->aspace)) {
ret = PTR_ERR(gpu->aspace);
goto fail;
}
- memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo),
+ memptrs = msm_gem_kernel_new(drm,
+ sizeof(struct msm_rbmemptrs) * nr_rings,
MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
&memptrs_iova);
if (IS_ERR(memptrs)) {
ret = PTR_ERR(memptrs);
- dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
+ DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
goto fail;
}
+ msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
+
if (nr_rings > ARRAY_SIZE(gpu->rb)) {
DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
ARRAY_SIZE(gpu->rb));
@@ -939,7 +974,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (IS_ERR(gpu->rb[i])) {
ret = PTR_ERR(gpu->rb[i]);
- dev_err(drm->dev,
+ DRM_DEV_ERROR(drm->dev,
"could not create ringbuffer %d: %d\n", i, ret);
goto fail;
}
@@ -958,11 +993,7 @@ fail:
gpu->rb[i] = NULL;
}
- if (gpu->memptrs_bo) {
- msm_gem_put_vaddr(gpu->memptrs_bo);
- msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
- drm_gem_object_put_unlocked(gpu->memptrs_bo);
- }
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
platform_set_drvdata(pdev, NULL);
return ret;
@@ -981,11 +1012,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
gpu->rb[i] = NULL;
}
- if (gpu->memptrs_bo) {
- msm_gem_put_vaddr(gpu->memptrs_bo);
- msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
- drm_gem_object_put_unlocked(gpu->memptrs_bo);
- }
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
if (!IS_ERR_OR_NULL(gpu->aspace)) {
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index f82bac086666..efb49bb64191 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -187,6 +187,7 @@ struct msm_gpu_state_bo {
u64 iova;
size_t size;
void *data;
+ bool encoded;
};
struct msm_gpu_state {
@@ -201,6 +202,7 @@ struct msm_gpu_state {
u32 wptr;
void *data;
int data_size;
+ bool encoded;
} ring[MSM_GPU_MAX_RINGS];
int nr_registers;
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
new file mode 100644
index 000000000000..1155118a27a1
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(_MSM_GPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MSM_GPU_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM drm_msm
+#define TRACE_INCLUDE_FILE msm_gpu_trace
+
+TRACE_EVENT(msm_gpu_submit,
+ TP_PROTO(pid_t pid, u32 ringid, u32 id, u32 nr_bos, u32 nr_cmds),
+ TP_ARGS(pid, ringid, id, nr_bos, nr_cmds),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(u32, id)
+ __field(u32, ringid)
+ __field(u32, nr_cmds)
+ __field(u32, nr_bos)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __entry->id = id;
+ __entry->ringid = ringid;
+ __entry->nr_bos = nr_bos;
+ __entry->nr_cmds = nr_cmds
+ ),
+ TP_printk("id=%d pid=%d ring=%d bos=%d cmds=%d",
+ __entry->id, __entry->pid, __entry->ringid,
+ __entry->nr_bos, __entry->nr_cmds)
+);
+
+TRACE_EVENT(msm_gpu_submit_flush,
+ TP_PROTO(struct msm_gem_submit *submit, u64 ticks),
+ TP_ARGS(submit, ticks),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(u32, id)
+ __field(u32, ringid)
+ __field(u32, seqno)
+ __field(u64, ticks)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid_nr(submit->pid);
+ __entry->id = submit->ident;
+ __entry->ringid = submit->ring->id;
+ __entry->seqno = submit->seqno;
+ __entry->ticks = ticks;
+ ),
+ TP_printk("id=%d pid=%d ring=%d:%d ticks=%lld",
+ __entry->id, __entry->pid, __entry->ringid, __entry->seqno,
+ __entry->ticks)
+);
+
+
+TRACE_EVENT(msm_gpu_submit_retired,
+ TP_PROTO(struct msm_gem_submit *submit, u64 elapsed, u64 clock,
+ u64 start, u64 end),
+ TP_ARGS(submit, elapsed, clock, start, end),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(u32, id)
+ __field(u32, ringid)
+ __field(u32, seqno)
+ __field(u64, elapsed)
+ __field(u64, clock)
+ __field(u64, start_ticks)
+ __field(u64, end_ticks)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid_nr(submit->pid);
+ __entry->id = submit->ident;
+ __entry->ringid = submit->ring->id;
+ __entry->seqno = submit->seqno;
+ __entry->elapsed = elapsed;
+ __entry->clock = clock;
+ __entry->start_ticks = start;
+ __entry->end_ticks = end;
+ ),
+ TP_printk("id=%d pid=%d ring=%d:%d elapsed=%lld ns mhz=%lld start=%lld end=%lld",
+ __entry->id, __entry->pid, __entry->ringid, __entry->seqno,
+ __entry->elapsed, __entry->clock,
+ __entry->start_ticks, __entry->end_ticks)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/msm
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/msm_gpu_tracepoints.c b/drivers/gpu/drm/msm/msm_gpu_tracepoints.c
new file mode 100644
index 000000000000..72c074f8c4f8
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu_tracepoints.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "msm_gem.h"
+#include "msm_ringbuffer.h"
+
+#define CREATE_TRACE_POINTS
+#include "msm_gpu_trace.h"
diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c
new file mode 100644
index 000000000000..27312b553dd8
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpummu.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "adreno/adreno_gpu.h"
+#include "adreno/a2xx.xml.h"
+
+struct msm_gpummu {
+ struct msm_mmu base;
+ struct msm_gpu *gpu;
+ dma_addr_t pt_base;
+ uint32_t *table;
+};
+#define to_msm_gpummu(x) container_of(x, struct msm_gpummu, base)
+
+#define GPUMMU_VA_START SZ_16M
+#define GPUMMU_VA_RANGE (0xfff * SZ_64K)
+#define GPUMMU_PAGE_SIZE SZ_4K
+#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
+
+static int msm_gpummu_attach(struct msm_mmu *mmu, const char * const *names,
+ int cnt)
+{
+ return 0;
+}
+
+static void msm_gpummu_detach(struct msm_mmu *mmu, const char * const *names,
+ int cnt)
+{
+}
+
+static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, unsigned len, int prot)
+{
+ struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+ unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
+ struct scatterlist *sg;
+ unsigned prot_bits = 0;
+ unsigned i, j;
+
+ if (prot & IOMMU_WRITE)
+ prot_bits |= 1;
+ if (prot & IOMMU_READ)
+ prot_bits |= 2;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ dma_addr_t addr = sg->dma_address;
+ for (j = 0; j < sg->length / GPUMMU_PAGE_SIZE; j++, idx++) {
+ gpummu->table[idx] = addr | prot_bits;
+ addr += GPUMMU_PAGE_SIZE;
+ }
+ }
+
+ /* we can improve by deferring flush for multiple map() */
+ gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+ return 0;
+}
+
+static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
+{
+ struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+ unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
+ unsigned i;
+
+ for (i = 0; i < len / GPUMMU_PAGE_SIZE; i++, idx++)
+ gpummu->table[idx] = 0;
+
+ gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+ return 0;
+}
+
+static void msm_gpummu_destroy(struct msm_mmu *mmu)
+{
+ struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+
+ dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+
+ kfree(gpummu);
+}
+
+static const struct msm_mmu_funcs funcs = {
+ .attach = msm_gpummu_attach,
+ .detach = msm_gpummu_detach,
+ .map = msm_gpummu_map,
+ .unmap = msm_gpummu_unmap,
+ .destroy = msm_gpummu_destroy,
+};
+
+struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
+{
+ struct msm_gpummu *gpummu;
+
+ gpummu = kzalloc(sizeof(*gpummu), GFP_KERNEL);
+ if (!gpummu)
+ return ERR_PTR(-ENOMEM);
+
+ gpummu->table = dma_alloc_attrs(dev, TABLE_SIZE + 32, &gpummu->pt_base,
+ GFP_KERNEL | __GFP_ZERO, DMA_ATTR_FORCE_CONTIGUOUS);
+ if (!gpummu->table) {
+ kfree(gpummu);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ gpummu->gpu = gpu;
+ msm_mmu_init(&gpummu->base, dev, &funcs);
+
+ return &gpummu->base;
+}
+
+void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
+ dma_addr_t *tran_error)
+{
+ dma_addr_t base = to_msm_gpummu(mmu)->pt_base;
+
+ *pt_base = base;
+ *tran_error = base + TABLE_SIZE; /* 32-byte aligned */
+}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index b23d33622f37..4d62790cd425 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -66,13 +66,12 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
// pm_runtime_get_sync(mmu->dev);
ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
// pm_runtime_put_sync(mmu->dev);
- WARN_ON(ret < 0);
+ WARN_ON(!ret);
return (ret == len) ? 0 : -EINVAL;
}
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, unsigned len)
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index fd88cebb6adb..2b81b43a4bab 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -67,9 +67,6 @@ struct msm_kms_funcs {
void (*set_encoder_mode)(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode);
- /* pm suspend/resume hooks */
- int (*pm_suspend)(struct device *dev);
- int (*pm_resume)(struct device *dev);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index aa2c5d4580c8..d21b26604d0b 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -25,8 +25,7 @@ struct msm_mmu_funcs {
void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
unsigned len, int prot);
- int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
- unsigned len);
+ int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len);
void (*destroy)(struct msm_mmu *mmu);
};
@@ -54,4 +53,7 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
mmu->handler = handler;
}
+void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
+ dma_addr_t *tran_error);
+
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index cca933458439..90e9d0a48dc0 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -316,10 +316,11 @@ static void snapshot_buf(struct msm_rd_state *rd,
uint64_t iova, uint32_t size)
{
struct msm_gem_object *obj = submit->bos[idx].obj;
+ unsigned offset = 0;
const char *buf;
if (iova) {
- buf += iova - submit->bos[idx].iova;
+ offset = iova - submit->bos[idx].iova;
} else {
iova = submit->bos[idx].iova;
size = obj->base.size;
@@ -340,11 +341,19 @@ static void snapshot_buf(struct msm_rd_state *rd,
if (IS_ERR(buf))
return;
+ buf += offset;
+
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
msm_gem_put_vaddr(&obj->base);
}
+static bool
+should_dump(struct msm_gem_submit *submit, int idx)
+{
+ return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
+}
+
/* called under struct_mutex */
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...)
@@ -386,15 +395,16 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
- for (i = 0; rd_full && i < submit->nr_bos; i++)
- snapshot_buf(rd, submit, i, 0, 0);
+ for (i = 0; i < submit->nr_bos; i++)
+ if (should_dump(submit, i))
+ snapshot_buf(rd, submit, i, 0, 0);
for (i = 0; i < submit->nr_cmds; i++) {
uint64_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
/* snapshot cmdstream bo's (if we haven't already): */
- if (!rd_full) {
+ if (!should_dump(submit, i)) {
snapshot_buf(rd, submit, submit->cmd[i].idx,
submit->cmd[i].iova, szd * 4);
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 6f5295b3f2f6..20a96fe69dcd 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -36,15 +36,18 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->gpu = gpu;
ring->id = id;
- /* Pass NULL for the iova pointer - we will map it later */
+
ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
- MSM_BO_WC, gpu->aspace, &ring->bo, NULL);
+ MSM_BO_WC, gpu->aspace, &ring->bo, &ring->iova);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
ring->start = 0;
goto fail;
}
+
+ msm_gem_object_set_name(ring->bo, "ring%d", id);
+
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring->next = ring->start;
ring->cur = ring->start;
@@ -73,10 +76,7 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
msm_fence_context_free(ring->fctx);
- if (ring->bo) {
- msm_gem_put_iova(ring->bo, ring->gpu->aspace);
- msm_gem_put_vaddr(ring->bo);
- drm_gem_object_put_unlocked(ring->bo);
- }
+ msm_gem_kernel_put(ring->bo, ring->gpu->aspace, false);
+
kfree(ring);
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index cffce094aecb..6434ebb13136 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -23,9 +23,25 @@
#define rbmemptr(ring, member) \
((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
+#define rbmemptr_stats(ring, index, member) \
+ (rbmemptr((ring), stats) + \
+ ((index) * sizeof(struct msm_gpu_submit_stats)) + \
+ offsetof(struct msm_gpu_submit_stats, member))
+
+struct msm_gpu_submit_stats {
+ u64 cpcycles_start;
+ u64 cpcycles_end;
+ u64 alwayson_start;
+ u64 alwayson_end;
+};
+
+#define MSM_GPU_SUBMIT_STATS_COUNT 64
+
struct msm_rbmemptrs {
volatile uint32_t rptr;
volatile uint32_t fence;
+
+ volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
};
struct msm_ringbuffer {
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 4b75ad40dd80..432c440223bb 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -4,7 +4,8 @@ config DRM_NOUVEAU
select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
- select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
+ select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT
+ select BACKLIGHT_LCD_SUPPORT if DRM_NOUVEAU_BACKLIGHT
select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
select X86_PLATFORM_DEVICES if ACPI && X86
select ACPI_WMI if ACPI && X86
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 70dce544984e..1727d399833c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -67,7 +67,7 @@ nv04_display_create(struct drm_device *dev)
for (i = 0; i < dcb->entries; i++) {
struct dcb_output *dcbent = &dcb->entry[i];
- connector = nouveau_connector_create(dev, dcbent->connector);
+ connector = nouveau_connector_create(dev, dcbent);
if (IS_ERR(connector))
continue;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index 849b0f45afb8..3d074aa31173 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -7,6 +7,7 @@ nouveau-y += dispnv50/core827d.o
nouveau-y += dispnv50/core907d.o
nouveau-y += dispnv50/core917d.o
nouveau-y += dispnv50/corec37d.o
+nouveau-y += dispnv50/corec57d.o
nouveau-y += dispnv50/dac507d.o
nouveau-y += dispnv50/dac907d.o
@@ -23,12 +24,14 @@ nouveau-y += dispnv50/head827d.o
nouveau-y += dispnv50/head907d.o
nouveau-y += dispnv50/head917d.o
nouveau-y += dispnv50/headc37d.o
+nouveau-y += dispnv50/headc57d.o
nouveau-y += dispnv50/wimm.o
nouveau-y += dispnv50/wimmc37b.o
nouveau-y += dispnv50/wndw.o
nouveau-y += dispnv50/wndwc37e.o
+nouveau-y += dispnv50/wndwc57e.o
nouveau-y += dispnv50/base.o
nouveau-y += dispnv50/base507c.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index 908feb1fc60f..a194990d2b0d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -54,9 +54,10 @@ struct nv50_head_atom {
u64 offset:40;
u8 buffer:1;
u8 mode:4;
- u8 size:2;
+ u16 size:11;
u8 range:2;
u8 output_mode:2;
+ void (*load)(struct drm_color_lut *, int size, void __iomem *);
} olut;
struct {
@@ -169,9 +170,11 @@ struct nv50_wndw_atom {
u8 buffer:1;
u8 enable:2;
u8 mode:4;
- u8 size:2;
+ u16 size:11;
u8 range:2;
u8 output_mode:2;
+ void (*load)(struct drm_color_lut *, int size,
+ void __iomem *);
} i;
} xlut;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
index a562fc94ce59..049ce6da321c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -80,6 +80,7 @@ base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
asyw->xlut.i.mode = 7;
asyw->xlut.i.enable = 2;
+ asyw->xlut.i.load = head907d_olut_load;
}
const struct nv50_wndw_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c
index f3c49adb1bdb..c25e0ebe3c92 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.c
@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
int version;
int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
} cores[] = {
+ { TU104_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
{ GP102_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
{ GP100_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index 8470df9dd13d..df8336b593f7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -46,5 +46,9 @@ extern const struct nv50_outp_func sor907d;
int core917d_new(struct nouveau_drm *, s32, struct nv50_core **);
int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **);
+int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
+void corec37d_update(struct nv50_core *, u32 *, bool);
extern const struct nv50_outp_func sorc37d;
+
+int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **);
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index b5c17c948918..7860774b65bc 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -24,7 +24,7 @@
#include <nouveau_bo.h>
-static void
+void
corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
{
u32 *push;
@@ -71,7 +71,7 @@ corec37d_ntfy_init(struct nouveau_bo *bo, u32 offset)
nouveau_bo_wr32(bo, offset / 4 + 3, 0x00000000);
}
-void
+static void
corec37d_init(struct nv50_core *core)
{
const u32 windows = 8; /*XXX*/
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
new file mode 100644
index 000000000000..b606d68cda10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+#include "head.h"
+
+static void
+corec57d_init(struct nv50_core *core)
+{
+ const u32 windows = 8; /*XXX*/
+ u32 *push, i;
+ if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) {
+ evo_mthd(push, 0x0208, 1);
+ evo_data(push, core->chan.sync.handle);
+ for (i = 0; i < windows; i++) {
+ evo_mthd(push, 0x1000 + (i * 0x080), 3);
+ evo_data(push, i >> 1);
+ evo_data(push, 0x0000000f);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x1010 + (i * 0x080), 1);
+ evo_data(push, 0x00117fff);
+ }
+ evo_mthd(push, 0x0200, 1);
+ evo_data(push, 0x00000001);
+ evo_kick(push, &core->chan);
+ }
+}
+
+static const struct nv50_core_func
+corec57d = {
+ .init = corec57d_init,
+ .ntfy_init = corec37d_ntfy_init,
+ .ntfy_wait_done = corec37d_ntfy_wait_done,
+ .update = corec37d_update,
+ .head = &headc57d,
+ .sor = &sorc37d,
+};
+
+int
+corec57d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+ return core507d_new_(&corec57d, drm, oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.c b/drivers/gpu/drm/nouveau/dispnv50/curs.c
index f592087338c4..cb6e4d2b1b45 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.c
@@ -31,6 +31,7 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
int version;
int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
} curses[] = {
+ { TU104_DISP_CURSOR, 0, cursc37a_new },
{ GV100_DISP_CURSOR, 0, cursc37a_new },
{ GK104_DISP_CURSOR, 0, curs907a_new },
{ GF110_DISP_CURSOR, 0, curs907a_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 500bf3e0f6f2..67107f0b1299 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -198,6 +198,22 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
/******************************************************************************
* EVO channel helpers
*****************************************************************************/
+static void
+evo_flush(struct nv50_dmac *dmac)
+{
+ /* Push buffer fetches are not coherent with BAR1, we need to ensure
+ * writes have been flushed right through to VRAM before writing PUT.
+ */
+ if (dmac->push.type & NVIF_MEM_VRAM) {
+ struct nvif_device *device = dmac->base.device;
+ nvif_wr32(&device->object, 0x070000, 0x00000001);
+ nvif_msec(device, 2000,
+ if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
+ break;
+ );
+ }
+}
+
u32 *
evo_wait(struct nv50_dmac *evoc, int nr)
{
@@ -208,6 +224,7 @@ evo_wait(struct nv50_dmac *evoc, int nr)
mutex_lock(&dmac->lock);
if (put + nr >= (PAGE_SIZE / 4) - 8) {
dmac->ptr[put] = 0x20000000;
+ evo_flush(dmac);
nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
if (nvif_msec(device, 2000,
@@ -230,17 +247,7 @@ evo_kick(u32 *push, struct nv50_dmac *evoc)
{
struct nv50_dmac *dmac = evoc;
- /* Push buffer fetches are not coherent with BAR1, we need to ensure
- * writes have been flushed right through to VRAM before writing PUT.
- */
- if (dmac->push.type & NVIF_MEM_VRAM) {
- struct nvif_device *device = dmac->base.device;
- nvif_wr32(&device->object, 0x070000, 0x00000001);
- nvif_msec(device, 2000,
- if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
- break;
- );
- }
+ evo_flush(dmac);
nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
mutex_unlock(&dmac->lock);
@@ -1288,8 +1295,16 @@ nv50_mstm_fini(struct nv50_mstm *mstm)
static void
nv50_mstm_init(struct nv50_mstm *mstm)
{
- if (mstm && mstm->mgr.mst_state)
- drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+ int ret;
+
+ if (!mstm || !mstm->mgr.mst_state)
+ return;
+
+ ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+ if (ret == -1) {
+ drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
+ drm_kms_helper_hotplug_event(mstm->mgr.dev);
+ }
}
static void
@@ -1297,6 +1312,7 @@ nv50_mstm_del(struct nv50_mstm **pmstm)
{
struct nv50_mstm *mstm = *pmstm;
if (mstm) {
+ drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
kfree(*pmstm);
*pmstm = NULL;
}
@@ -2330,7 +2346,7 @@ nv50_display_create(struct drm_device *dev)
/* create encoder/connector objects based on VBIOS DCB table */
for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
- connector = nouveau_connector_create(dev, dcbe->connector);
+ connector = nouveau_connector_create(dev, dcbe);
if (IS_ERR(connector))
continue;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index e48c5eb35b49..2216c58620c2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -45,6 +45,8 @@ struct nv50_disp_interlock {
void corec37d_ntfy_init(struct nouveau_bo *, u32);
+void head907d_olut_load(struct drm_color_lut *, int size, void __iomem *);
+
struct nv50_chan {
struct nvif_object user;
struct nvif_device *device;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 4f57e5379796..ac97ebce5b35 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -50,9 +50,9 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
if (asyh->set.core ) head->func->core_set(head, asyh);
if (asyh->set.olut ) {
asyh->olut.offset = nv50_lut_load(&head->olut,
- asyh->olut.mode <= 1,
asyh->olut.buffer,
- asyh->state.gamma_lut);
+ asyh->state.gamma_lut,
+ asyh->olut.load);
head->func->olut_set(head, asyh);
}
if (asyh->set.curs ) head->func->curs_set(head, asyh);
@@ -210,7 +210,7 @@ nv50_head_atomic_check_lut(struct nv50_head *head,
}
}
- if (!olut) {
+ if (!olut && !head->func->olut_identity) {
asyh->olut.handle = 0;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index 37b3248c6dae..d1c002f534d4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -21,6 +21,7 @@ struct nv50_head_func {
void (*view)(struct nv50_head *, struct nv50_head_atom *);
void (*mode)(struct nv50_head *, struct nv50_head_atom *);
void (*olut)(struct nv50_head *, struct nv50_head_atom *);
+ bool olut_identity;
void (*olut_set)(struct nv50_head *, struct nv50_head_atom *);
void (*olut_clr)(struct nv50_head *);
void (*core_calc)(struct nv50_head *, struct nv50_head_atom *);
@@ -75,4 +76,14 @@ int head917d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
struct nv50_head_atom *);
extern const struct nv50_head_func headc37d;
+void headc37d_view(struct nv50_head *, struct nv50_head_atom *);
+void headc37d_core_set(struct nv50_head *, struct nv50_head_atom *);
+void headc37d_core_clr(struct nv50_head *);
+int headc37d_curs_format(struct nv50_head *, struct nv50_wndw_atom *,
+ struct nv50_head_atom *);
+void headc37d_curs_set(struct nv50_head *, struct nv50_head_atom *);
+void headc37d_curs_clr(struct nv50_head *);
+void headc37d_dither(struct nv50_head *, struct nv50_head_atom *);
+
+extern const struct nv50_head_func headc57d;
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
index 51bc5996fd37..7561be5ca707 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
@@ -254,6 +254,23 @@ head507d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
}
}
+static void
+head507d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+ for (; size--; in++, mem += 8) {
+ writew(drm_color_lut_extract(in-> red, 11) << 3, mem + 0);
+ writew(drm_color_lut_extract(in->green, 11) << 3, mem + 2);
+ writew(drm_color_lut_extract(in-> blue, 11) << 3, mem + 4);
+ }
+
+ /* INTERPOLATE modes require a "next" entry to interpolate with,
+ * so we replicate the last entry to deal with this for now.
+ */
+ writew(readw(mem - 8), mem + 0);
+ writew(readw(mem - 6), mem + 2);
+ writew(readw(mem - 4), mem + 4);
+}
+
void
head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
{
@@ -261,6 +278,8 @@ head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
asyh->olut.mode = 0;
else
asyh->olut.mode = 1;
+
+ asyh->olut.load = head507d_olut_load;
}
void
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index 633907163eb1..c2d09dd97b1f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -214,9 +214,27 @@ head907d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
}
void
+head907d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+ for (; size--; in++, mem += 8) {
+ writew(drm_color_lut_extract(in-> red, 14) + 0x6000, mem + 0);
+ writew(drm_color_lut_extract(in->green, 14) + 0x6000, mem + 2);
+ writew(drm_color_lut_extract(in-> blue, 14) + 0x6000, mem + 4);
+ }
+
+ /* INTERPOLATE modes require a "next" entry to interpolate with,
+ * so we replicate the last entry to deal with this for now.
+ */
+ writew(readw(mem - 8), mem + 0);
+ writew(readw(mem - 6), mem + 2);
+ writew(readw(mem - 4), mem + 4);
+}
+
+void
head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
{
asyh->olut.mode = 7;
+ asyh->olut.load = head907d_olut_load;
}
void
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
index 989c14083066..ef6a99d95a9c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
@@ -65,7 +65,7 @@ headc37d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
}
}
-static void
+void
headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
@@ -79,7 +79,7 @@ headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
}
}
-static void
+void
headc37d_curs_clr(struct nv50_head *head)
{
struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
@@ -93,7 +93,7 @@ headc37d_curs_clr(struct nv50_head *head)
}
}
-static void
+void
headc37d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
@@ -112,7 +112,7 @@ headc37d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
}
}
-static int
+int
headc37d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
@@ -155,6 +155,7 @@ headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
asyh->olut.size = 0;
asyh->olut.range = 0;
asyh->olut.output_mode = 1;
+ asyh->olut.load = head907d_olut_load;
}
static void
@@ -181,7 +182,7 @@ headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
}
}
-static void
+void
headc37d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
new file mode 100644
index 000000000000..32a7f9e85fb0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "head.h"
+#include "atom.h"
+#include "core.h"
+
+static void
+headc57d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ u32 *push;
+ if ((push = evo_wait(core, 2))) {
+ /*XXX: This is a dirty hack until OR depth handling is
+ * improved later for deep colour etc.
+ */
+ switch (asyh->or.depth) {
+ case 6: asyh->or.depth = 5; break;
+ case 5: asyh->or.depth = 4; break;
+ case 2: asyh->or.depth = 1; break;
+ case 0: asyh->or.depth = 4; break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ evo_mthd(push, 0x2004 + (head->base.index * 0x400), 1);
+ evo_data(push, 0xfc000001 |
+ asyh->or.depth << 4 |
+ asyh->or.nvsync << 3 |
+ asyh->or.nhsync << 2);
+ evo_kick(push, core);
+ }
+}
+
+static void
+headc57d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ u32 *push;
+ if ((push = evo_wait(core, 2))) {
+ evo_mthd(push, 0x2000 + (head->base.index * 0x400), 1);
+#if 0
+ evo_data(push, 0x80000000 |
+ asyh->procamp.sat.sin << 16 |
+ asyh->procamp.sat.cos << 4);
+#else
+ evo_data(push, 0);
+#endif
+ evo_kick(push, core);
+ }
+}
+
+void
+headc57d_olut_clr(struct nv50_head *head)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ u32 *push;
+ if ((push = evo_wait(core, 2))) {
+ evo_mthd(push, 0x2288 + (head->base.index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, core);
+ }
+}
+
+void
+headc57d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ u32 *push;
+ if ((push = evo_wait(core, 4))) {
+ evo_mthd(push, 0x2280 + (head->base.index * 0x400), 4);
+ evo_data(push, asyh->olut.size << 8 |
+ asyh->olut.mode << 2 |
+ asyh->olut.output_mode);
+ evo_data(push, 0xffffffff); /* FP_NORM_SCALE. */
+ evo_data(push, asyh->olut.handle);
+ evo_data(push, asyh->olut.offset >> 8);
+ evo_kick(push, core);
+ }
+}
+
+static void
+headc57d_olut_load_8(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+ memset_io(mem, 0x00, 0x20); /* VSS header. */
+ mem += 0x20;
+
+ while (size--) {
+ u16 r = drm_color_lut_extract(in-> red + 0, 16);
+ u16 g = drm_color_lut_extract(in->green + 0, 16);
+ u16 b = drm_color_lut_extract(in-> blue + 0, 16);
+ u16 ri = 0, gi = 0, bi = 0, i;
+
+ if (in++, size) {
+ ri = (drm_color_lut_extract(in-> red, 16) - r) / 4;
+ gi = (drm_color_lut_extract(in->green, 16) - g) / 4;
+ bi = (drm_color_lut_extract(in-> blue, 16) - b) / 4;
+ }
+
+ for (i = 0; i < 4; i++, mem += 8) {
+ writew(r + ri * i, mem + 0);
+ writew(g + gi * i, mem + 2);
+ writew(b + bi * i, mem + 4);
+ }
+ }
+
+ /* INTERPOLATE modes require a "next" entry to interpolate with,
+ * so we replicate the last entry to deal with this for now.
+ */
+ writew(readw(mem - 8), mem + 0);
+ writew(readw(mem - 6), mem + 2);
+ writew(readw(mem - 4), mem + 4);
+}
+
+static void
+headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+ memset_io(mem, 0x00, 0x20); /* VSS header. */
+ mem += 0x20;
+
+ for (; size--; in++, mem += 0x08) {
+ writew(drm_color_lut_extract(in-> red, 16), mem + 0);
+ writew(drm_color_lut_extract(in->green, 16), mem + 2);
+ writew(drm_color_lut_extract(in-> blue, 16), mem + 4);
+ }
+
+ /* INTERPOLATE modes require a "next" entry to interpolate with,
+ * so we replicate the last entry to deal with this for now.
+ */
+ writew(readw(mem - 8), mem + 0);
+ writew(readw(mem - 6), mem + 2);
+ writew(readw(mem - 4), mem + 4);
+}
+
+void
+headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ asyh->olut.mode = 2; /* DIRECT10 */
+ asyh->olut.size = 4 /* VSS header. */ + 1024 + 1 /* Entries. */;
+ asyh->olut.output_mode = 1; /* INTERPOLATE_ENABLE. */
+ if (asyh->state.gamma_lut &&
+ asyh->state.gamma_lut->length / sizeof(struct drm_color_lut) == 256)
+ asyh->olut.load = headc57d_olut_load_8;
+ else
+ asyh->olut.load = headc57d_olut_load;
+}
+
+static void
+headc57d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ struct nv50_head_mode *m = &asyh->mode;
+ u32 *push;
+ if ((push = evo_wait(core, 12))) {
+ evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
+ evo_data(push, (m->v.active << 16) | m->h.active );
+ evo_data(push, (m->v.synce << 16) | m->h.synce );
+ evo_data(push, (m->v.blanke << 16) | m->h.blanke );
+ evo_data(push, (m->v.blanks << 16) | m->h.blanks );
+ evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
+ evo_mthd(push, 0x200c + (head->base.index * 0x400), 1);
+ evo_data(push, m->clock * 1000);
+ evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
+ evo_data(push, m->clock * 1000);
+ /*XXX: HEAD_USAGE_BOUNDS, doesn't belong here. */
+ evo_mthd(push, 0x2030 + (head->base.index * 0x400), 1);
+ evo_data(push, 0x00001014);
+ evo_kick(push, core);
+ }
+}
+
+const struct nv50_head_func
+headc57d = {
+ .view = headc37d_view,
+ .mode = headc57d_mode,
+ .olut = headc57d_olut,
+ .olut_identity = true,
+ .olut_set = headc57d_olut_set,
+ .olut_clr = headc57d_olut_clr,
+ .curs_layout = head917d_curs_layout,
+ .curs_format = headc37d_curs_format,
+ .curs_set = headc37d_curs_set,
+ .curs_clr = headc37d_curs_clr,
+ .dither = headc37d_dither,
+ .procamp = headc57d_procamp,
+ .or = headc57d_or,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/lut.c b/drivers/gpu/drm/nouveau/dispnv50/lut.c
index a6b96ae2a22f..994def4fd51a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/lut.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/lut.c
@@ -29,45 +29,29 @@
#include <nvif/class.h>
u32
-nv50_lut_load(struct nv50_lut *lut, bool legacy, int buffer,
- struct drm_property_blob *blob)
+nv50_lut_load(struct nv50_lut *lut, int buffer, struct drm_property_blob *blob,
+ void (*load)(struct drm_color_lut *, int, void __iomem *))
{
- struct drm_color_lut *in = (struct drm_color_lut *)blob->data;
+ struct drm_color_lut *in = blob ? blob->data : NULL;
void __iomem *mem = lut->mem[buffer].object.map.ptr;
- const int size = blob->length / sizeof(*in);
- int bits, shift, i;
- u16 zero, r, g, b;
- u32 addr = lut->mem[buffer].addr;
-
- /* This can't happen.. But it shuts the compiler up. */
- if (WARN_ON(size != 256))
- return 0;
+ const u32 addr = lut->mem[buffer].addr;
+ int i;
- if (legacy) {
- bits = 11;
- shift = 3;
- zero = 0x0000;
+ if (!in) {
+ in = kvmalloc_array(1024, sizeof(*in), GFP_KERNEL);
+ if (!WARN_ON(!in)) {
+ for (i = 0; i < 1024; i++) {
+ in[i].red =
+ in[i].green =
+ in[i].blue = (i << 16) >> 10;
+ }
+ load(in, 1024, mem);
+ kvfree(in);
+ }
} else {
- bits = 14;
- shift = 0;
- zero = 0x6000;
- }
-
- for (i = 0; i < size; i++) {
- r = (drm_color_lut_extract(in[i]. red, bits) + zero) << shift;
- g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift;
- b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift;
- writew(r, mem + (i * 0x08) + 0);
- writew(g, mem + (i * 0x08) + 2);
- writew(b, mem + (i * 0x08) + 4);
+ load(in, blob->length / sizeof(*in), mem);
}
- /* INTERPOLATE modes require a "next" entry to interpolate with,
- * so we replicate the last entry to deal with this for now.
- */
- writew(r, mem + (i * 0x08) + 0);
- writew(g, mem + (i * 0x08) + 2);
- writew(b, mem + (i * 0x08) + 4);
return addr;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/lut.h b/drivers/gpu/drm/nouveau/dispnv50/lut.h
index 6d7b8352e4cb..b3b9040cfe9a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/lut.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/lut.h
@@ -2,6 +2,7 @@
#define __NV50_KMS_LUT_H__
#include <nvif/mem.h>
struct drm_property_blob;
+struct drm_color_lut;
struct nv50_disp;
struct nv50_lut {
@@ -10,6 +11,6 @@ struct nv50_lut {
int nv50_lut_init(struct nv50_disp *, struct nvif_mmu *, struct nv50_lut *);
void nv50_lut_fini(struct nv50_lut *);
-u32 nv50_lut_load(struct nv50_lut *, bool legacy, int buffer,
- struct drm_property_blob *);
+u32 nv50_lut_load(struct nv50_lut *, int buffer, struct drm_property_blob *,
+ void (*)(struct drm_color_lut *, int size, void __iomem *));
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimm.c b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
index fc36e0696407..bc9eeaf212ae 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimm.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
@@ -31,6 +31,7 @@ nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
int version;
int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
} wimms[] = {
+ { TU104_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{}
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 2187922e8dc2..ba9eea2ff16b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -139,10 +139,8 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
if (asyw->set.xlut ) {
if (asyw->ilut) {
asyw->xlut.i.offset =
- nv50_lut_load(&wndw->ilut,
- asyw->xlut.i.mode <= 1,
- asyw->xlut.i.buffer,
- asyw->ilut);
+ nv50_lut_load(&wndw->ilut, asyw->xlut.i.buffer,
+ asyw->ilut, asyw->xlut.i.load);
}
wndw->func->xlut_set(wndw, asyw);
}
@@ -322,6 +320,11 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
asyh->wndw.olut &= ~BIT(wndw->id);
}
+ if (!ilut && wndw->func->ilut_identity) {
+ static struct drm_property_blob dummy = {};
+ ilut = &dummy;
+ }
+
/* Recalculate LUT state. */
memset(&asyw->xlut, 0x00, sizeof(asyw->xlut));
if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) {
@@ -623,6 +626,7 @@ nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
int (*new)(struct nouveau_drm *, enum drm_plane_type,
int, s32, struct nv50_wndw **);
} wndws[] = {
+ { TU104_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
{ GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
{}
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index b0b6428034b0..03f3d8dc235a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -65,6 +65,7 @@ struct nv50_wndw_func {
int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset,
struct nvif_device *);
void (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ bool ilut_identity;
bool olut_core;
void (*xlut_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
void (*xlut_clr)(struct nv50_wndw *);
@@ -90,6 +91,23 @@ extern const struct nv50_wimm_func curs507a;
int wndwc37e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
struct nv50_wndw **);
+int wndwc37e_new_(const struct nv50_wndw_func *, struct nouveau_drm *,
+ enum drm_plane_type type, int index, s32 oclass, u32 heads,
+ struct nv50_wndw **);
+int wndwc37e_acquire(struct nv50_wndw *, struct nv50_wndw_atom *,
+ struct nv50_head_atom *);
+void wndwc37e_release(struct nv50_wndw *, struct nv50_wndw_atom *,
+ struct nv50_head_atom *);
+void wndwc37e_sema_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+void wndwc37e_sema_clr(struct nv50_wndw *);
+void wndwc37e_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+void wndwc37e_ntfy_clr(struct nv50_wndw *);
+void wndwc37e_image_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+void wndwc37e_image_clr(struct nv50_wndw *);
+void wndwc37e_update(struct nv50_wndw *, u32 *);
+
+int wndwc57e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
+ struct nv50_wndw **);
int nv50_wndw_new(struct nouveau_drm *, enum drm_plane_type, int index,
struct nv50_wndw **);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
index 44afb0f069a5..e52a85c83f7a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -61,9 +61,10 @@ wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
asyw->xlut.i.size = 0;
asyw->xlut.i.range = 0;
asyw->xlut.i.output_mode = 1;
+ asyw->xlut.i.load = head907d_olut_load;
}
-static void
+void
wndwc37e_image_clr(struct nv50_wndw *wndw)
{
u32 *push;
@@ -76,7 +77,7 @@ wndwc37e_image_clr(struct nv50_wndw *wndw)
}
}
-static void
+void
wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
u32 *push;
@@ -117,7 +118,7 @@ wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
evo_kick(push, &wndw->wndw);
}
-static void
+void
wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
{
u32 *push;
@@ -128,7 +129,7 @@ wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
}
}
-static void
+void
wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
u32 *push;
@@ -140,7 +141,7 @@ wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
}
}
-static void
+void
wndwc37e_sema_clr(struct nv50_wndw *wndw)
{
u32 *push;
@@ -151,7 +152,7 @@ wndwc37e_sema_clr(struct nv50_wndw *wndw)
}
}
-static void
+void
wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
u32 *push;
@@ -165,7 +166,7 @@ wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
}
}
-static void
+void
wndwc37e_update(struct nv50_wndw *wndw, u32 *interlock)
{
u32 *push;
@@ -183,13 +184,13 @@ wndwc37e_update(struct nv50_wndw *wndw, u32 *interlock)
}
}
-static void
+void
wndwc37e_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
}
-static int
+int
wndwc37e_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
@@ -236,7 +237,7 @@ wndwc37e = {
.update = wndwc37e_update,
};
-static int
+int
wndwc37e_new_(const struct nv50_wndw_func *func, struct nouveau_drm *drm,
enum drm_plane_type type, int index, s32 oclass, u32 heads,
struct nv50_wndw **pwndw)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
new file mode 100644
index 000000000000..ba89f1a5fcfa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wndw.h"
+#include "atom.h"
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <nouveau_bo.h>
+
+#include <nvif/clc37e.h>
+
+static void
+wndwc57e_ilut_clr(struct nv50_wndw *wndw)
+{
+ u32 *push;
+ if ((push = evo_wait(&wndw->wndw, 2))) {
+ evo_mthd(push, 0x0444, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &wndw->wndw);
+ }
+}
+
+static void
+wndwc57e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ u32 *push;
+ if ((push = evo_wait(&wndw->wndw, 4))) {
+ evo_mthd(push, 0x0440, 3);
+ evo_data(push, asyw->xlut.i.size << 8 |
+ asyw->xlut.i.mode << 2 |
+ asyw->xlut.i.output_mode);
+ evo_data(push, asyw->xlut.handle);
+ evo_data(push, asyw->xlut.i.offset >> 8);
+ evo_kick(push, &wndw->wndw);
+ }
+}
+
+static u16
+fixedU0_16_FP16(u16 fixed)
+{
+ int sign = 0, exp = 0, man = 0;
+ if (fixed) {
+ while (--exp && !(fixed & 0x8000))
+ fixed <<= 1;
+ man = ((fixed << 1) & 0xffc0) >> 6;
+ exp += 15;
+ }
+ return (sign << 15) | (exp << 10) | man;
+}
+
+static void
+wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+ memset_io(mem, 0x00, 0x20); /* VSS header. */
+ mem += 0x20;
+
+ for (; size--; in++, mem += 0x08) {
+ u16 r = fixedU0_16_FP16(drm_color_lut_extract(in-> red, 16));
+ u16 g = fixedU0_16_FP16(drm_color_lut_extract(in->green, 16));
+ u16 b = fixedU0_16_FP16(drm_color_lut_extract(in-> blue, 16));
+ writew(r, mem + 0);
+ writew(g, mem + 2);
+ writew(b, mem + 4);
+ }
+
+ /* INTERPOLATE modes require a "next" entry to interpolate with,
+ * so we replicate the last entry to deal with this for now.
+ */
+ writew(readw(mem - 8), mem + 0);
+ writew(readw(mem - 6), mem + 2);
+ writew(readw(mem - 4), mem + 4);
+}
+
+static void
+wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ u16 size = asyw->ilut->length / sizeof(struct drm_color_lut);
+ if (size == 256) {
+ asyw->xlut.i.mode = 1; /* DIRECT8. */
+ } else {
+ asyw->xlut.i.mode = 2; /* DIRECT10. */
+ size = 1024;
+ }
+ asyw->xlut.i.size = 4 /* VSS header. */ + size + 1 /* Entries. */;
+ asyw->xlut.i.output_mode = 0; /* INTERPOLATE_DISABLE. */
+ asyw->xlut.i.load = wndwc57e_ilut_load;
+}
+
+static const struct nv50_wndw_func
+wndwc57e = {
+ .acquire = wndwc37e_acquire,
+ .release = wndwc37e_release,
+ .sema_set = wndwc37e_sema_set,
+ .sema_clr = wndwc37e_sema_clr,
+ .ntfy_set = wndwc37e_ntfy_set,
+ .ntfy_clr = wndwc37e_ntfy_clr,
+ .ntfy_reset = corec37d_ntfy_init,
+ .ntfy_wait_begun = base507c_ntfy_wait_begun,
+ .ilut = wndwc57e_ilut,
+ .ilut_identity = true,
+ .xlut_set = wndwc57e_ilut_set,
+ .xlut_clr = wndwc57e_ilut_clr,
+ .image_set = wndwc37e_image_set,
+ .image_clr = wndwc37e_image_clr,
+ .update = wndwc37e_update,
+};
+
+int
+wndwc57e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+ s32 oclass, struct nv50_wndw **pwndw)
+{
+ return wndwc37e_new_(&wndwc57e, drm, type, index, oclass,
+ BIT(index >> 1), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 4f5233107f5f..4cbed0329367 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -32,6 +32,7 @@ struct nv_device_info_v0 {
#define NV_DEVICE_INFO_V0_MAXWELL 0x09
#define NV_DEVICE_INFO_V0_PASCAL 0x0a
#define NV_DEVICE_INFO_V0_VOLTA 0x0b
+#define NV_DEVICE_INFO_V0_TURING 0x0c
__u8 family;
__u8 pad06[2];
__u64 ram_size;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
index fbfcffc5feb2..81401eb970ea 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
@@ -4,12 +4,13 @@
struct kepler_channel_gpfifo_a_v0 {
__u8 version;
- __u8 pad01[1];
+ __u8 priv;
__u16 chid;
__u32 ilength;
__u64 ioffset;
__u64 runlist;
__u64 vmm;
+ __u64 inst;
};
#define NVA06F_V0_NTFY_NON_STALL_INTERRUPT 0x00
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 6db56bd7d67e..1d82cbf70cf4 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -68,7 +68,8 @@
#define KEPLER_CHANNEL_GPFIFO_B /* cla06f.h */ 0x0000a16f
#define MAXWELL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000b06f
#define PASCAL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000c06f
-#define VOLTA_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000c36f
+#define VOLTA_CHANNEL_GPFIFO_A /* clc36f.h */ 0x0000c36f
+#define TURING_CHANNEL_GPFIFO_A /* clc36f.h */ 0x0000c46f
#define NV50_DISP /* cl5070.h */ 0x00005070
#define G82_DISP /* cl5070.h */ 0x00008270
@@ -83,6 +84,7 @@
#define GP100_DISP /* cl5070.h */ 0x00009770
#define GP102_DISP /* cl5070.h */ 0x00009870
#define GV100_DISP /* cl5070.h */ 0x0000c370
+#define TU104_DISP /* cl5070.h */ 0x0000c570
#define NV31_MPEG 0x00003174
#define G82_MPEG 0x00008274
@@ -95,6 +97,7 @@
#define GF110_DISP_CURSOR /* cl507a.h */ 0x0000907a
#define GK104_DISP_CURSOR /* cl507a.h */ 0x0000917a
#define GV100_DISP_CURSOR /* cl507a.h */ 0x0000c37a
+#define TU104_DISP_CURSOR /* cl507a.h */ 0x0000c57a
#define NV50_DISP_OVERLAY /* cl507b.h */ 0x0000507b
#define G82_DISP_OVERLAY /* cl507b.h */ 0x0000827b
@@ -103,6 +106,7 @@
#define GK104_DISP_OVERLAY /* cl507b.h */ 0x0000917b
#define GV100_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c37b
+#define TU104_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c57b
#define NV50_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000507c
#define G82_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000827c
@@ -125,6 +129,7 @@
#define GP100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000977d
#define GP102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
#define GV100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c37d
+#define TU104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c57d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
@@ -134,6 +139,7 @@
#define GK104_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000917e
#define GV100_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c37e
+#define TU104_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c57e
#define NV50_TESLA 0x00005097
#define G82_TESLA 0x00008297
@@ -183,6 +189,7 @@
#define PASCAL_DMA_COPY_A 0x0000c0b5
#define PASCAL_DMA_COPY_B 0x0000c1b5
#define VOLTA_DMA_COPY_A 0x0000c3b5
+#define TURING_DMA_COPY_A 0x0000c5b5
#define FERMI_DECOMPRESS 0x000090b8
diff --git a/drivers/gpu/drm/nouveau/include/nvif/clc36f.h b/drivers/gpu/drm/nouveau/include/nvif/clc36f.h
new file mode 100644
index 000000000000..6b14d7e3f6bb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/clc36f.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NVIF_CLC36F_H__
+#define __NVIF_CLC36F_H__
+
+struct volta_channel_gpfifo_a_v0 {
+ __u8 version;
+ __u8 priv;
+ __u16 chid;
+ __u32 ilength;
+ __u64 ioffset;
+ __u64 runlist;
+ __u64 vmm;
+ __u64 inst;
+ __u32 token;
+};
+
+#define NVC36F_V0_NTFY_NON_STALL_INTERRUPT 0x00
+#define NVC36F_V0_NTFY_KILLED 0x01
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index d83d834b7452..72e4dc1f0236 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -61,7 +61,11 @@ enum nvkm_devidx {
NVKM_ENGINE_NVENC2,
NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC2,
- NVKM_ENGINE_NVDEC,
+ NVKM_ENGINE_NVDEC0,
+ NVKM_ENGINE_NVDEC1,
+ NVKM_ENGINE_NVDEC2,
+ NVKM_ENGINE_NVDEC_LAST = NVKM_ENGINE_NVDEC2,
+
NVKM_ENGINE_PM,
NVKM_ENGINE_SEC,
NVKM_ENGINE_SEC2,
@@ -114,6 +118,7 @@ struct nvkm_device {
GM100 = 0x110,
GP100 = 0x130,
GV100 = 0x140,
+ TU100 = 0x160,
} card_type;
u32 chipset;
u8 chiprev;
@@ -163,7 +168,7 @@ struct nvkm_device {
struct nvkm_engine *msppp;
struct nvkm_engine *msvld;
struct nvkm_engine *nvenc[3];
- struct nvkm_nvdec *nvdec;
+ struct nvkm_nvdec *nvdec[3];
struct nvkm_pm *pm;
struct nvkm_engine *sec;
struct nvkm_sec2 *sec2;
@@ -235,7 +240,7 @@ struct nvkm_device_chip {
int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*nvdec )(struct nvkm_device *, int idx, struct nvkm_nvdec **);
+ int (*nvdec[3])(struct nvkm_device *, int idx, struct nvkm_nvdec **);
int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **);
int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*sec2 )(struct nvkm_device *, int idx, struct nvkm_sec2 **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
index 05f505de0075..f34c80310861 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -29,6 +29,7 @@ struct nvkm_memory_func {
void *(*dtor)(struct nvkm_memory *);
enum nvkm_memory_target (*target)(struct nvkm_memory *);
u8 (*page)(struct nvkm_memory *);
+ u64 (*bar2)(struct nvkm_memory *);
u64 (*addr)(struct nvkm_memory *);
u64 (*size)(struct nvkm_memory *);
void (*boot)(struct nvkm_memory *, struct nvkm_vmm *);
@@ -56,6 +57,7 @@ void nvkm_memory_tags_put(struct nvkm_memory *, struct nvkm_device *,
#define nvkm_memory_target(p) (p)->func->target(p)
#define nvkm_memory_page(p) (p)->func->page(p)
+#define nvkm_memory_bar2(p) (p)->func->bar2(p)
#define nvkm_memory_addr(p) (p)->func->addr(p)
#define nvkm_memory_size(p) (p)->func->size(p)
#define nvkm_memory_boot(p,v) (p)->func->boot((p),(v))
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index fc295e1faa19..86abe76023c2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -11,4 +11,5 @@ int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gp102_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gv100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int tu104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index ef7dc0844d26..5ca86e178bb9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -36,4 +36,5 @@ int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gv100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int tu104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 7e39fbed2519..3b2b685778eb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -74,4 +74,5 @@ int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gp10b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gv100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int tu104_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
index f6bd94c7e0f7..fd9d713b611c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -16,8 +16,10 @@ struct nvkm_bar {
};
struct nvkm_vmm *nvkm_bar_bar1_vmm(struct nvkm_device *);
+void nvkm_bar_bar1_reset(struct nvkm_device *);
void nvkm_bar_bar2_init(struct nvkm_device *);
void nvkm_bar_bar2_fini(struct nvkm_device *);
+void nvkm_bar_bar2_reset(struct nvkm_device *);
struct nvkm_vmm *nvkm_bar_bar2_vmm(struct nvkm_device *);
void nvkm_bar_flush(struct nvkm_bar *);
@@ -27,4 +29,5 @@ int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int gm107_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int gm20b_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int tu104_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
index 703a5b524b96..425ccc47e3b7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
@@ -12,11 +12,14 @@ u32 nvbios_M0203Tp(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_M0203T *);
struct nvbios_M0203E {
-#define M0203E_TYPE_DDR2 0x0
-#define M0203E_TYPE_DDR3 0x1
-#define M0203E_TYPE_GDDR3 0x2
-#define M0203E_TYPE_GDDR5 0x3
-#define M0203E_TYPE_SKIP 0xf
+#define M0203E_TYPE_DDR2 0x0
+#define M0203E_TYPE_DDR3 0x1
+#define M0203E_TYPE_GDDR3 0x2
+#define M0203E_TYPE_GDDR5 0x3
+#define M0203E_TYPE_HBM2 0x6
+#define M0203E_TYPE_GDDR5X 0x8
+#define M0203E_TYPE_GDDR6 0x9
+#define M0203E_TYPE_SKIP 0xf
u8 type;
u8 strap;
u8 group;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
index ed9e0a6a0011..8463b421d345 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
@@ -20,6 +20,7 @@ enum dcb_connector_type {
DCB_CONNECTOR_DMS59_DP0 = 0x64,
DCB_CONNECTOR_DMS59_DP1 = 0x65,
DCB_CONNECTOR_WFD = 0x70,
+ DCB_CONNECTOR_USB_C = 0x71,
DCB_CONNECTOR_NONE = 0xff
};
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index 486e7635c29d..1b71812a790b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -31,4 +31,5 @@ int gf100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int gv100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int tu104_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
index 5a77498fe6a0..127f48066026 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
@@ -30,4 +30,5 @@ struct nvkm_fault_data {
int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
int gv100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
+int tu104_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 96ccc624ee81..27298f8b7ead 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -105,7 +105,10 @@ enum nvkm_ram_type {
NVKM_RAM_TYPE_GDDR2,
NVKM_RAM_TYPE_GDDR3,
NVKM_RAM_TYPE_GDDR4,
- NVKM_RAM_TYPE_GDDR5
+ NVKM_RAM_TYPE_GDDR5,
+ NVKM_RAM_TYPE_GDDR5X,
+ NVKM_RAM_TYPE_GDDR6,
+ NVKM_RAM_TYPE_HBM2,
};
struct nvkm_ram {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 61c93c86e2e2..b66dedd8abb6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -31,4 +31,5 @@ int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gp10b_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int tu104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 688595545e21..0a0e064f22e5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -130,4 +130,5 @@ int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gv100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int tu104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index e9b0746826ca..3693ebf371b6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -28,6 +28,18 @@ struct nvkm_timer {
u64 nvkm_timer_read(struct nvkm_timer *);
void nvkm_timer_alarm(struct nvkm_timer *, u32 nsec, struct nvkm_alarm *);
+struct nvkm_timer_wait {
+ struct nvkm_timer *tmr;
+ u64 limit;
+ u64 time0;
+ u64 time1;
+ int reads;
+};
+
+void nvkm_timer_wait_init(struct nvkm_device *, u64 nsec,
+ struct nvkm_timer_wait *);
+s64 nvkm_timer_wait_test(struct nvkm_timer_wait *);
+
/* Delay based on GPU time (ie. PTIMER).
*
* Will return -ETIMEDOUT unless the loop was terminated with 'break',
@@ -38,21 +50,17 @@ void nvkm_timer_alarm(struct nvkm_timer *, u32 nsec, struct nvkm_alarm *);
*/
#define NVKM_DELAY _warn = false;
#define nvkm_nsec(d,n,cond...) ({ \
- struct nvkm_device *_device = (d); \
- struct nvkm_timer *_tmr = _device->timer; \
- u64 _nsecs = (n), _time0 = nvkm_timer_read(_tmr); \
- s64 _taken = 0; \
+ struct nvkm_timer_wait _wait; \
bool _warn = true; \
+ s64 _taken = 0; \
\
+ nvkm_timer_wait_init((d), (n), &_wait); \
do { \
cond \
- } while (_taken = nvkm_timer_read(_tmr) - _time0, _taken < _nsecs); \
+ } while ((_taken = nvkm_timer_wait_test(&_wait)) >= 0); \
\
- if (_taken >= _nsecs) { \
- if (_warn) \
- dev_WARN(_device->dev, "timeout\n"); \
- _taken = -ETIMEDOUT; \
- } \
+ if (_warn && _taken < 0) \
+ dev_WARN(_wait.tmr->subdev.device->dev, "timeout\n"); \
_taken; \
})
#define nvkm_usec(d,u,cond...) nvkm_nsec((d), (u) * 1000, ##cond)
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index e67a471331b5..b06cdac8f3a2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -306,7 +306,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
/* create channel object and initialise dma and fence management */
ret = nouveau_channel_new(drm, device, init->fb_ctxdma_handle,
- init->tt_ctxdma_handle, &chan->chan);
+ init->tt_ctxdma_handle, false, &chan->chan);
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7214022dfb91..73eff52036d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1141,6 +1141,8 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
struct ttm_mem_reg *, struct ttm_mem_reg *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
+ { "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 92d3115f96b5..668afbc29c3e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -29,6 +29,7 @@
#include <nvif/cl506f.h>
#include <nvif/cl906f.h>
#include <nvif/cla06f.h>
+#include <nvif/clc36f.h>
#include <nvif/ioctl.h>
/*XXX*/
@@ -217,10 +218,11 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
static int
nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
- u64 runlist, struct nouveau_channel **pchan)
+ u64 runlist, bool priv, struct nouveau_channel **pchan)
{
struct nouveau_cli *cli = (void *)device->object.client;
- static const u16 oclasses[] = { VOLTA_CHANNEL_GPFIFO_A,
+ static const u16 oclasses[] = { TURING_CHANNEL_GPFIFO_A,
+ VOLTA_CHANNEL_GPFIFO_A,
PASCAL_CHANNEL_GPFIFO_A,
MAXWELL_CHANNEL_GPFIFO_A,
KEPLER_CHANNEL_GPFIFO_B,
@@ -234,6 +236,7 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
struct nv50_channel_gpfifo_v0 nv50;
struct fermi_channel_gpfifo_v0 fermi;
struct kepler_channel_gpfifo_a_v0 kepler;
+ struct volta_channel_gpfifo_a_v0 volta;
} args;
struct nouveau_channel *chan;
u32 size;
@@ -247,12 +250,22 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
/* create channel object */
do {
+ if (oclass[0] >= VOLTA_CHANNEL_GPFIFO_A) {
+ args.volta.version = 0;
+ args.volta.ilength = 0x02000;
+ args.volta.ioffset = 0x10000 + chan->push.addr;
+ args.volta.runlist = runlist;
+ args.volta.vmm = nvif_handle(&cli->vmm.vmm.object);
+ args.volta.priv = priv;
+ size = sizeof(args.volta);
+ } else
if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) {
args.kepler.version = 0;
args.kepler.ilength = 0x02000;
args.kepler.ioffset = 0x10000 + chan->push.addr;
args.kepler.runlist = runlist;
args.kepler.vmm = nvif_handle(&cli->vmm.vmm.object);
+ args.kepler.priv = priv;
size = sizeof(args.kepler);
} else
if (oclass[0] >= FERMI_CHANNEL_GPFIFO) {
@@ -273,13 +286,20 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
ret = nvif_object_init(&device->object, 0, *oclass++,
&args, size, &chan->user);
if (ret == 0) {
- if (chan->user.oclass >= KEPLER_CHANNEL_GPFIFO_A)
+ if (chan->user.oclass >= VOLTA_CHANNEL_GPFIFO_A) {
+ chan->chid = args.volta.chid;
+ chan->inst = args.volta.inst;
+ chan->token = args.volta.token;
+ } else
+ if (chan->user.oclass >= KEPLER_CHANNEL_GPFIFO_A) {
chan->chid = args.kepler.chid;
- else
- if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO)
+ chan->inst = args.kepler.inst;
+ } else
+ if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
chan->chid = args.fermi.chid;
- else
+ } else {
chan->chid = args.nv50.chid;
+ }
return ret;
}
} while (*oclass);
@@ -448,7 +468,8 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
int
nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
- u32 arg0, u32 arg1, struct nouveau_channel **pchan)
+ u32 arg0, u32 arg1, bool priv,
+ struct nouveau_channel **pchan)
{
struct nouveau_cli *cli = (void *)device->object.client;
bool super;
@@ -458,7 +479,7 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
super = cli->base.super;
cli->base.super = true;
- ret = nouveau_channel_ind(drm, device, arg0, pchan);
+ ret = nouveau_channel_ind(drm, device, arg0, priv, pchan);
if (ret) {
NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret);
ret = nouveau_channel_dma(drm, device, pchan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 64454c2ebd90..28418f4e5748 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -10,6 +10,8 @@ struct nouveau_channel {
struct nouveau_drm *drm;
int chid;
+ u64 inst;
+ u32 token;
struct nvif_object vram;
struct nvif_object gart;
@@ -48,7 +50,8 @@ struct nouveau_channel {
int nouveau_channels_init(struct nouveau_drm *);
int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *,
- u32 arg0, u32 arg1, struct nouveau_channel **);
+ u32 arg0, u32 arg1, bool priv,
+ struct nouveau_channel **);
void nouveau_channel_del(struct nouveau_channel **);
int nouveau_channel_idle(struct nouveau_channel *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index fd80661dff92..3f463c91314a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -403,6 +403,7 @@ nouveau_connector_destroy(struct drm_connector *connector)
if (nv_connector->aux.transfer) {
drm_dp_cec_unregister_connector(&nv_connector->aux);
drm_dp_aux_unregister(&nv_connector->aux);
+ kfree(nv_connector->aux.name);
}
kfree(connector);
}
@@ -1218,7 +1219,8 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
case DCB_CONNECTOR_LVDS_SPWG: return DRM_MODE_CONNECTOR_LVDS;
case DCB_CONNECTOR_DMS59_DP0:
case DCB_CONNECTOR_DMS59_DP1:
- case DCB_CONNECTOR_DP : return DRM_MODE_CONNECTOR_DisplayPort;
+ case DCB_CONNECTOR_DP :
+ case DCB_CONNECTOR_USB_C : return DRM_MODE_CONNECTOR_DisplayPort;
case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP;
case DCB_CONNECTOR_HDMI_0 :
case DCB_CONNECTOR_HDMI_1 :
@@ -1232,7 +1234,8 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
}
struct drm_connector *
-nouveau_connector_create(struct drm_device *dev, int index)
+nouveau_connector_create(struct drm_device *dev,
+ const struct dcb_output *dcbe)
{
const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -1240,6 +1243,8 @@ nouveau_connector_create(struct drm_device *dev, int index)
struct nouveau_connector *nv_connector = NULL;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
+ char aux_name[48] = {0};
+ int index = dcbe->connector;
int type, ret = 0;
bool dummy;
@@ -1342,6 +1347,9 @@ nouveau_connector_create(struct drm_device *dev, int index)
case DRM_MODE_CONNECTOR_eDP:
nv_connector->aux.dev = dev->dev;
nv_connector->aux.transfer = nouveau_connector_aux_xfer;
+ snprintf(aux_name, sizeof(aux_name), "sor-%04x-%04x",
+ dcbe->hasht, dcbe->hashm);
+ nv_connector->aux.name = kstrdup(aux_name, GFP_KERNEL);
ret = drm_dp_aux_register(&nv_connector->aux);
if (ret) {
NV_ERROR(drm, "failed to register aux channel\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index f57ef35b1e5e..f43a8d63aef8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -38,6 +38,7 @@
#include "nouveau_encoder.h"
struct nvkm_i2c_port;
+struct dcb_output;
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
struct nouveau_backlight;
@@ -113,7 +114,7 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
}
struct drm_connector *
-nouveau_connector_create(struct drm_device *, int index);
+nouveau_connector_create(struct drm_device *, const struct dcb_output *);
extern int nouveau_tv_disable;
extern int nouveau_ignorelid;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 9109b69cd052..88a52f6b39fe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -47,6 +47,26 @@ nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
}
static int
+nouveau_debugfs_strap_peek(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
+ int ret;
+
+ ret = pm_runtime_get_sync(drm->dev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
+ seq_printf(m, "0x%08x\n",
+ nvif_rd32(&drm->client.device.object, 0x101000));
+
+ pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_put_autosuspend(drm->dev->dev);
+
+ return 0;
+}
+
+static int
nouveau_debugfs_pstate_get(struct seq_file *m, void *data)
{
struct drm_device *drm = m->private;
@@ -185,7 +205,8 @@ static const struct file_operations nouveau_pstate_fops = {
};
static struct drm_info_list nouveau_debugfs_list[] = {
- { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
+ { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
+ { "strap_peek", nouveau_debugfs_strap_peek, 0, NULL },
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
@@ -199,8 +220,9 @@ static const struct nouveau_debugfs_files {
int
nouveau_drm_debugfs_init(struct drm_minor *minor)
{
+ struct nouveau_drm *drm = nouveau_drm(minor->dev);
struct dentry *dentry;
- int i;
+ int i, ret;
for (i = 0; i < ARRAY_SIZE(nouveau_debugfs_files); i++) {
dentry = debugfs_create_file(nouveau_debugfs_files[i].name,
@@ -211,9 +233,23 @@ nouveau_drm_debugfs_init(struct drm_minor *minor)
return -ENOMEM;
}
- return drm_debugfs_create_files(nouveau_debugfs_list,
- NOUVEAU_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
+ ret = drm_debugfs_create_files(nouveau_debugfs_list,
+ NOUVEAU_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
+ if (ret)
+ return ret;
+
+ /* Set the size of the vbios since we know it, and it's confusing to
+ * userspace if it wants to seek() but the file has a length of 0
+ */
+ dentry = debugfs_lookup("vbios.rom", minor->debugfs_root);
+ if (!dentry)
+ return 0;
+
+ d_inode(dentry)->i_size = drm->vbios.length;
+ dput(dentry);
+
+ return 0;
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 945afd34138e..078f65d849ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -101,7 +101,7 @@ nv50_dma_push(struct nouveau_channel *chan, u64 offset, int length)
nvif_wr32(&chan->user, 0x8c, chan->dma.ib_put);
if (user->func && user->func->doorbell)
- user->func->doorbell(user, chan->chid);
+ user->func->doorbell(user, chan->token);
chan->dma.ib_free--;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 2b2baf6e0e0d..f900e94592f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -353,6 +353,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
case MAXWELL_CHANNEL_GPFIFO_A:
case PASCAL_CHANNEL_GPFIFO_A:
case VOLTA_CHANNEL_GPFIFO_A:
+ case TURING_CHANNEL_GPFIFO_A:
ret = nvc0_fence_create(drm);
break;
default:
@@ -370,7 +371,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
ret = nouveau_channel_new(drm, &drm->client.device,
nvif_fifo_runlist_ce(device), 0,
- &drm->cechan);
+ true, &drm->cechan);
if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
@@ -381,7 +382,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
device->info.chipset != 0xaa &&
device->info.chipset != 0xac) {
ret = nouveau_channel_new(drm, &drm->client.device,
- NvDmaFB, NvDmaTT, &drm->cechan);
+ NvDmaFB, NvDmaTT, false,
+ &drm->cechan);
if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
@@ -393,7 +395,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
}
ret = nouveau_channel_new(drm, &drm->client.device,
- arg0, arg1, &drm->channel);
+ arg0, arg1, false, &drm->channel);
if (ret) {
NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
nouveau_accel_fini(drm);
@@ -1171,10 +1173,16 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
goto err_free;
}
+ err = nouveau_drm_device_init(drm);
+ if (err)
+ goto err_put;
+
platform_set_drvdata(pdev, drm);
return drm;
+err_put:
+ drm_dev_put(drm);
err_free:
nvkm_device_del(pdevice);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.h b/drivers/gpu/drm/nouveau/nouveau_vmm.h
index 7e3b118cf7c4..ede872f6f668 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.h
@@ -25,7 +25,6 @@ void nouveau_vma_unmap(struct nouveau_vma *);
struct nouveau_vmm {
struct nouveau_cli *cli;
struct nvif_vmm vmm;
- struct nvkm_vm *vm;
};
int nouveau_vmm_init(struct nouveau_cli *, s32 oclass, struct nouveau_vmm *);
diff --git a/drivers/gpu/drm/nouveau/nvif/disp.c b/drivers/gpu/drm/nouveau/nvif/disp.c
index 18c7d064f75c..ef97dd223a32 100644
--- a/drivers/gpu/drm/nouveau/nvif/disp.c
+++ b/drivers/gpu/drm/nouveau/nvif/disp.c
@@ -34,6 +34,7 @@ int
nvif_disp_ctor(struct nvif_device *device, s32 oclass, struct nvif_disp *disp)
{
static const struct nvif_mclass disps[] = {
+ { TU104_DISP, -1 },
{ GV100_DISP, -1 },
{ GP102_DISP, -1 },
{ GP100_DISP, -1 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 03f676c18aad..c61b467cf45e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -79,7 +79,9 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
[NVKM_ENGINE_NVENC0 ] = "nvenc0",
[NVKM_ENGINE_NVENC1 ] = "nvenc1",
[NVKM_ENGINE_NVENC2 ] = "nvenc2",
- [NVKM_ENGINE_NVDEC ] = "nvdec",
+ [NVKM_ENGINE_NVDEC0 ] = "nvdec0",
+ [NVKM_ENGINE_NVDEC1 ] = "nvdec1",
+ [NVKM_ENGINE_NVDEC2 ] = "nvdec2",
[NVKM_ENGINE_PM ] = "pm",
[NVKM_ENGINE_SEC ] = "sec",
[NVKM_ENGINE_SEC2 ] = "sec2",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index 80d784441904..177a23301d6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -6,3 +6,4 @@ nvkm-y += nvkm/engine/ce/gm200.o
nvkm-y += nvkm/engine/ce/gp100.o
nvkm-y += nvkm/engine/ce/gp102.o
nvkm-y += nvkm/engine/ce/gv100.o
+nvkm-y += nvkm/engine/ce/tu104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c
new file mode 100644
index 000000000000..3c25043bbb33
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+tu104_ce = {
+ .intr = gp100_ce_intr,
+ .sclass = {
+ { -1, -1, TURING_DMA_COPY_A },
+ {}
+ }
+};
+
+int
+tu104_ce_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ return nvkm_engine_new_(&tu104_ce, device, index, true, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index e294013426ce..bfbc9341e0c2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2221,7 +2221,7 @@ nv132_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp102_gr_new,
- .nvdec = gp102_nvdec_new,
+ .nvdec[0] = gp102_nvdec_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2257,7 +2257,7 @@ nv134_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp104_gr_new,
- .nvdec = gp102_nvdec_new,
+ .nvdec[0] = gp102_nvdec_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2293,7 +2293,7 @@ nv136_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp104_gr_new,
- .nvdec = gp102_nvdec_new,
+ .nvdec[0] = gp102_nvdec_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2329,7 +2329,7 @@ nv137_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp107_gr_new,
- .nvdec = gp102_nvdec_new,
+ .nvdec[0] = gp102_nvdec_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2365,7 +2365,7 @@ nv138_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp107_gr_new,
- .nvdec = gp102_nvdec_new,
+ .nvdec[0] = gp102_nvdec_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2430,10 +2430,74 @@ nv140_chipset = {
.dma = gv100_dma_new,
.fifo = gv100_fifo_new,
.gr = gv100_gr_new,
- .nvdec = gp102_nvdec_new,
+ .nvdec[0] = gp102_nvdec_new,
.sec2 = gp102_sec2_new,
};
+static const struct nvkm_device_chip
+nv164_chipset = {
+ .name = "TU104",
+ .bar = tu104_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = tu104_devinit_new,
+ .fault = tu104_fault_new,
+ .fb = gv100_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp102_ltc_new,
+ .mc = tu104_mc_new,
+ .mmu = tu104_mmu_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .therm = gp100_therm_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = tu104_ce_new,
+ .ce[1] = tu104_ce_new,
+ .ce[2] = tu104_ce_new,
+ .ce[3] = tu104_ce_new,
+ .ce[4] = tu104_ce_new,
+ .disp = tu104_disp_new,
+ .dma = gv100_dma_new,
+ .fifo = tu104_fifo_new,
+};
+
+static const struct nvkm_device_chip
+nv166_chipset = {
+ .name = "TU106",
+ .bar = tu104_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = tu104_devinit_new,
+ .fault = tu104_fault_new,
+ .fb = gv100_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp102_ltc_new,
+ .mc = tu104_mc_new,
+ .mmu = tu104_mmu_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .therm = gp100_therm_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = tu104_ce_new,
+ .ce[1] = tu104_ce_new,
+ .ce[2] = tu104_ce_new,
+ .ce[3] = tu104_ce_new,
+ .ce[4] = tu104_ce_new,
+ .disp = tu104_disp_new,
+ .dma = gv100_dma_new,
+ .fifo = tu104_fifo_new,
+};
+
static int
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
@@ -2529,7 +2593,9 @@ nvkm_device_engine(struct nvkm_device *device, int index)
_(NVENC0 , device->nvenc[0], device->nvenc[0]);
_(NVENC1 , device->nvenc[1], device->nvenc[1]);
_(NVENC2 , device->nvenc[2], device->nvenc[2]);
- _(NVDEC , device->nvdec , &device->nvdec->engine);
+ _(NVDEC0 , device->nvdec[0], &device->nvdec[0]->engine);
+ _(NVDEC1 , device->nvdec[1], &device->nvdec[1]->engine);
+ _(NVDEC2 , device->nvdec[2], &device->nvdec[2]->engine);
_(PM , device->pm , &device->pm->engine);
_(SEC , device->sec , device->sec);
_(SEC2 , device->sec2 , &device->sec2->engine);
@@ -2791,6 +2857,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x120: device->card_type = GM100; break;
case 0x130: device->card_type = GP100; break;
case 0x140: device->card_type = GV100; break;
+ case 0x160: device->card_type = TU100; break;
default:
break;
}
@@ -2883,6 +2950,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x138: device->chip = &nv138_chipset; break;
case 0x13b: device->chip = &nv13b_chipset; break;
case 0x140: device->chip = &nv140_chipset; break;
+ case 0x164: device->chip = &nv164_chipset; break;
+ case 0x166: device->chip = &nv166_chipset; break;
default:
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
goto done;
@@ -2988,7 +3057,9 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
_(NVKM_ENGINE_NVENC0 , nvenc[0]);
_(NVKM_ENGINE_NVENC1 , nvenc[1]);
_(NVKM_ENGINE_NVENC2 , nvenc[2]);
- _(NVKM_ENGINE_NVDEC , nvdec);
+ _(NVKM_ENGINE_NVDEC0 , nvdec[0]);
+ _(NVKM_ENGINE_NVDEC1 , nvdec[1]);
+ _(NVKM_ENGINE_NVDEC2 , nvdec[2]);
_(NVKM_ENGINE_PM , pm);
_(NVKM_ENGINE_SEC , sec);
_(NVKM_ENGINE_SEC2 , sec2);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index dde6bbafa709..092ddc4ffefa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -91,7 +91,7 @@ nvkm_udevice_info_v1(struct nvkm_device *device,
case ENGINE_A(MSENC ); break;
case ENGINE_A(VIC ); break;
case ENGINE_A(SEC2 ); break;
- case ENGINE_A(NVDEC ); break;
+ case ENGINE_B(NVDEC ); break;
case ENGINE_B(NVENC ); break;
default:
args->mthd = NV_DEVICE_INFO_INVALID;
@@ -175,6 +175,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break;
+ case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
default:
args->v0.family = 0;
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 8089ac9a12e2..c6a257ba4347 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -15,6 +15,7 @@ nvkm-y += nvkm/engine/disp/gm200.o
nvkm-y += nvkm/engine/disp/gp100.o
nvkm-y += nvkm/engine/disp/gp102.o
nvkm-y += nvkm/engine/disp/gv100.o
+nvkm-y += nvkm/engine/disp/tu104.o
nvkm-y += nvkm/engine/disp/vga.o
nvkm-y += nvkm/engine/disp/head.o
@@ -38,6 +39,7 @@ nvkm-y += nvkm/engine/disp/sorgk104.o
nvkm-y += nvkm/engine/disp/sorgm107.o
nvkm-y += nvkm/engine/disp/sorgm200.o
nvkm-y += nvkm/engine/disp/sorgv100.o
+nvkm-y += nvkm/engine/disp/sortu104.o
nvkm-y += nvkm/engine/disp/outp.o
nvkm-y += nvkm/engine/disp/dp.o
@@ -69,6 +71,7 @@ nvkm-y += nvkm/engine/disp/rootgm200.o
nvkm-y += nvkm/engine/disp/rootgp100.o
nvkm-y += nvkm/engine/disp/rootgp102.o
nvkm-y += nvkm/engine/disp/rootgv100.o
+nvkm-y += nvkm/engine/disp/roottu104.o
nvkm-y += nvkm/engine/disp/channv50.o
nvkm-y += nvkm/engine/disp/changf119.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
index d0a7e3456da1..47be0ba4aebe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -28,7 +28,7 @@
#include <core/gpuobj.h>
#include <subdev/timer.h>
-static int
+int
gv100_disp_wndw_cnt(struct nvkm_disp *disp, unsigned long *pmask)
{
struct nvkm_device *device = disp->engine.subdev.device;
@@ -36,7 +36,7 @@ gv100_disp_wndw_cnt(struct nvkm_disp *disp, unsigned long *pmask)
return (nvkm_rd32(device, 0x610074) & 0x03f00000) >> 20;
}
-static void
+void
gv100_disp_super(struct work_struct *work)
{
struct nv50_disp *disp =
@@ -257,7 +257,7 @@ gv100_disp_intr_head_timing(struct nv50_disp *disp, int head)
}
}
-static void
+void
gv100_disp_intr(struct nv50_disp *disp)
{
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
@@ -297,7 +297,7 @@ gv100_disp_intr(struct nv50_disp *disp)
nvkm_warn(subdev, "intr %08x\n", stat);
}
-static void
+void
gv100_disp_fini(struct nv50_disp *disp)
{
struct nvkm_device *device = disp->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 0f0c86c32ec3..790e42f460fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -144,6 +144,11 @@ void gm200_sor_route_set(struct nvkm_outp *, struct nvkm_ior *);
int gm200_sor_route_get(struct nvkm_outp *, int *);
void gm200_sor_dp_drive(struct nvkm_ior *, int, int, int, int, int);
+void gv100_sor_state(struct nvkm_ior *, struct nvkm_ior_state *);
+void gv100_sor_dp_audio(struct nvkm_ior *, int, bool);
+void gv100_sor_dp_audio_sym(struct nvkm_ior *, int, u16, u32);
+void gv100_sor_dp_watermark(struct nvkm_ior *, int, u8);
+
void g84_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
void gt215_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
void gf119_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
@@ -195,4 +200,6 @@ int gm200_sor_new(struct nvkm_disp *, int);
int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
int gv100_sor_new(struct nvkm_disp *, int);
+
+int tu104_sor_new(struct nvkm_disp *, int);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index 8580382ab248..c36a8a7cafa1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -78,6 +78,11 @@ void gf119_disp_intr(struct nv50_disp *);
void gf119_disp_super(struct work_struct *);
void gf119_disp_intr_error(struct nv50_disp *, int);
+void gv100_disp_fini(struct nv50_disp *);
+void gv100_disp_intr(struct nv50_disp *);
+void gv100_disp_super(struct work_struct *);
+int gv100_disp_wndw_cnt(struct nvkm_disp *, unsigned long *);
+
void nv50_disp_dptmds_war_2(struct nv50_disp *, struct dcb_output *);
void nv50_disp_dptmds_war_3(struct nv50_disp *, struct dcb_output *);
void nv50_disp_update_sppll1(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 6ca4f9184b51..97de928cbde1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -37,4 +37,5 @@ extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
extern const struct nvkm_disp_oclass gp102_disp_root_oclass;
extern const struct nvkm_disp_oclass gv100_disp_root_oclass;
+extern const struct nvkm_disp_oclass tu104_disp_root_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c
new file mode 100644
index 000000000000..ad438c62f66c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "rootnv50.h"
+#include "channv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+tu104_disp_root = {
+ .user = {
+ {{0,0,TU104_DISP_CURSOR }, gv100_disp_curs_new },
+ {{0,0,TU104_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
+ {{0,0,TU104_DISP_CORE_CHANNEL_DMA }, gv100_disp_core_new },
+ {{0,0,TU104_DISP_WINDOW_CHANNEL_DMA }, gv100_disp_wndw_new },
+ {}
+ },
+};
+
+static int
+tu104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&tu104_disp_root, disp, oclass,
+ data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+tu104_disp_root_oclass = {
+ .base.oclass = TU104_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = tu104_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
index 8ba881a729ee..b0597ff9a714 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
@@ -23,7 +23,7 @@
#include <subdev/timer.h>
-static void
+void
gv100_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
@@ -31,7 +31,7 @@ gv100_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark)
nvkm_mask(device, 0x616550 + hoff, 0x0c00003f, 0x08000000 | watermark);
}
-static void
+void
gv100_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
@@ -40,7 +40,7 @@ gv100_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v)
nvkm_mask(device, 0x61656c + hoff, 0x00ffffff, v);
}
-static void
+void
gv100_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
@@ -54,7 +54,7 @@ gv100_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
);
}
-static void
+void
gv100_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c
new file mode 100644
index 000000000000..df026a525ef1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ior.h"
+
+#include <subdev/timer.h>
+
+static void
+tu104_sor_dp_vcpi(struct nvkm_ior *sor, int head,
+ u8 slot, u8 slot_nr, u16 pbn, u16 aligned)
+{
+ struct nvkm_device *device = sor->disp->engine.subdev.device;
+ const u32 hoff = head * 0x800;
+
+ nvkm_mask(device, 0x61657c + hoff, 0xffffffff, (aligned << 16) | pbn);
+ nvkm_mask(device, 0x616578 + hoff, 0x00003f3f, (slot_nr << 8) | slot);
+}
+
+static int
+tu104_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
+{
+ struct nvkm_device *device = sor->disp->engine.subdev.device;
+ const u32 soff = nv50_ior_base(sor);
+ const u32 loff = nv50_sor_link(sor);
+ u32 dpctrl = 0x00000000;
+ u32 clksor = 0x00000000;
+
+ clksor |= sor->dp.bw << 18;
+ dpctrl |= ((1 << sor->dp.nr) - 1) << 16;
+ if (sor->dp.mst)
+ dpctrl |= 0x40000000;
+ if (sor->dp.ef)
+ dpctrl |= 0x00004000;
+
+ nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
+
+ /*XXX*/
+ nvkm_msec(device, 40, NVKM_DELAY);
+ nvkm_mask(device, 0x612300 + soff, 0x00030000, 0x00010000);
+ nvkm_mask(device, 0x61c10c + loff, 0x00000003, 0x00000001);
+
+ nvkm_mask(device, 0x61c10c + loff, 0x401f4000, dpctrl);
+ return 0;
+}
+
+static const struct nvkm_ior_func
+tu104_sor = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gv100_sor_state,
+ .power = nv50_sor_power,
+ .clock = gf119_sor_clock,
+ .hdmi = {
+ .ctrl = gv100_hdmi_ctrl,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = tu104_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = tu104_sor_dp_vcpi,
+ .audio = gv100_sor_dp_audio,
+ .audio_sym = gv100_sor_dp_audio_sym,
+ .watermark = gv100_sor_dp_watermark,
+ },
+ .hda = {
+ .hpd = gf119_hda_hpd,
+ .eld = gf119_hda_eld,
+ },
+};
+
+int
+tu104_sor_new(struct nvkm_disp *disp, int id)
+{
+ return nvkm_ior_new_(&tu104_sor, disp, SOR, id);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c
new file mode 100644
index 000000000000..13fa21459d38
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+#include "head.h"
+#include "ior.h"
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <core/gpuobj.h>
+#include <subdev/timer.h>
+
+static int
+tu104_disp_init(struct nv50_disp *disp)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ struct nvkm_head *head;
+ int i, j;
+ u32 tmp;
+
+ /* Claim ownership of display. */
+ if (nvkm_rd32(device, 0x6254e8) & 0x00000002) {
+ nvkm_mask(device, 0x6254e8, 0x00000001, 0x00000000);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x6254e8) & 0x00000002))
+ break;
+ ) < 0)
+ return -EBUSY;
+ }
+
+ /* Lock pin capabilities. */
+ tmp = 0x00000021; /*XXX*/
+ nvkm_wr32(device, 0x640008, tmp);
+
+ /* SOR capabilities. */
+ for (i = 0; i < disp->sor.nr; i++) {
+ tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
+ nvkm_mask(device, 0x640000, 0x00000100 << i, 0x00000100 << i);
+ nvkm_wr32(device, 0x640144 + (i * 0x08), tmp);
+ }
+
+ /* Head capabilities. */
+ list_for_each_entry(head, &disp->base.head, head) {
+ const int id = head->id;
+
+ /* RG. */
+ tmp = nvkm_rd32(device, 0x616300 + (id * 0x800));
+ nvkm_wr32(device, 0x640048 + (id * 0x020), tmp);
+
+ /* POSTCOMP. */
+ for (j = 0; j < 5 * 4; j += 4) {
+ tmp = nvkm_rd32(device, 0x616140 + (id * 0x800) + j);
+ nvkm_wr32(device, 0x640680 + (id * 0x20) + j, tmp);
+ }
+ }
+
+ /* Window capabilities. */
+ for (i = 0; i < disp->wndw.nr; i++) {
+ nvkm_mask(device, 0x640004, 1 << i, 1 << i);
+ for (j = 0; j < 6 * 4; j += 4) {
+ tmp = nvkm_rd32(device, 0x630100 + (i * 0x800) + j);
+ nvkm_mask(device, 0x640780 + (i * 0x20) + j, 0xffffffff, tmp);
+ }
+ nvkm_mask(device, 0x64000c, 0x00000100, 0x00000100);
+ }
+
+ /* IHUB capabilities. */
+ for (i = 0; i < 3; i++) {
+ tmp = nvkm_rd32(device, 0x62e000 + (i * 0x04));
+ nvkm_wr32(device, 0x640010 + (i * 0x04), tmp);
+ }
+
+ nvkm_mask(device, 0x610078, 0x00000001, 0x00000001);
+
+ /* Setup instance memory. */
+ switch (nvkm_memory_target(disp->inst->memory)) {
+ case NVKM_MEM_TARGET_VRAM: tmp = 0x00000001; break;
+ case NVKM_MEM_TARGET_NCOH: tmp = 0x00000002; break;
+ case NVKM_MEM_TARGET_HOST: tmp = 0x00000003; break;
+ default:
+ break;
+ }
+ nvkm_wr32(device, 0x610010, 0x00000008 | tmp);
+ nvkm_wr32(device, 0x610014, disp->inst->addr >> 16);
+
+ /* CTRL_DISP: AWAKEN, ERROR, SUPERVISOR[1-3]. */
+ nvkm_wr32(device, 0x611cf0, 0x00000187); /* MSK. */
+ nvkm_wr32(device, 0x611db0, 0x00000187); /* EN. */
+
+ /* EXC_OTHER: CURSn, CORE. */
+ nvkm_wr32(device, 0x611cec, disp->head.mask << 16 |
+ 0x00000001); /* MSK. */
+ nvkm_wr32(device, 0x611dac, 0x00000000); /* EN. */
+
+ /* EXC_WINIM. */
+ nvkm_wr32(device, 0x611ce8, disp->wndw.mask); /* MSK. */
+ nvkm_wr32(device, 0x611da8, 0x00000000); /* EN. */
+
+ /* EXC_WIN. */
+ nvkm_wr32(device, 0x611ce4, disp->wndw.mask); /* MSK. */
+ nvkm_wr32(device, 0x611da4, 0x00000000); /* EN. */
+
+ /* HEAD_TIMING(n): VBLANK. */
+ list_for_each_entry(head, &disp->base.head, head) {
+ const u32 hoff = head->id * 4;
+ nvkm_wr32(device, 0x611cc0 + hoff, 0x00000004); /* MSK. */
+ nvkm_wr32(device, 0x611d80 + hoff, 0x00000000); /* EN. */
+ }
+
+ /* OR. */
+ nvkm_wr32(device, 0x611cf4, 0x00000000); /* MSK. */
+ nvkm_wr32(device, 0x611db4, 0x00000000); /* EN. */
+ return 0;
+}
+
+static const struct nv50_disp_func
+tu104_disp = {
+ .init = tu104_disp_init,
+ .fini = gv100_disp_fini,
+ .intr = gv100_disp_intr,
+ .uevent = &gv100_disp_chan_uevent,
+ .super = gv100_disp_super,
+ .root = &tu104_disp_root_oclass,
+ .wndw = { .cnt = gv100_disp_wndw_cnt },
+ .head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
+ .sor = { .cnt = gv100_sor_cnt, .new = tu104_sor_new },
+ .ramht_size = 0x2000,
+};
+
+int
+tu104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+ return nv50_disp_new_(&tu104_disp, device, index, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c
index 98911805aabf..5d3b641dbb14 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c
@@ -118,7 +118,7 @@ gv100_disp_wndw_mthd_base = {
const struct nv50_disp_chan_mthd
gv100_disp_wndw_mthd = {
- .name = "Base",
+ .name = "Window",
.addr = 0x001000,
.prev = 0x000800,
.data = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index f00408577a6a..87d8e054e40a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -16,6 +16,7 @@ nvkm-y += nvkm/engine/fifo/gm20b.o
nvkm-y += nvkm/engine/fifo/gp100.o
nvkm-y += nvkm/engine/fifo/gp10b.o
nvkm-y += nvkm/engine/fifo/gv100.o
+nvkm-y += nvkm/engine/fifo/tu104.o
nvkm-y += nvkm/engine/fifo/chan.o
nvkm-y += nvkm/engine/fifo/channv50.o
@@ -33,5 +34,7 @@ nvkm-y += nvkm/engine/fifo/gpfifog84.o
nvkm-y += nvkm/engine/fifo/gpfifogf100.o
nvkm-y += nvkm/engine/fifo/gpfifogk104.o
nvkm-y += nvkm/engine/fifo/gpfifogv100.o
+nvkm-y += nvkm/engine/fifo/gpfifotu104.o
nvkm-y += nvkm/engine/fifo/usergv100.o
+nvkm-y += nvkm/engine/fifo/usertu104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
index 3ffef236189e..2c7c5afc1ea5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -17,6 +17,7 @@ struct nvkm_fifo_chan_func {
bool suspend);
int (*object_ctor)(struct nvkm_fifo_chan *, struct nvkm_object *);
void (*object_dtor)(struct nvkm_fifo_chan *, int);
+ u32 (*submit_token)(struct nvkm_fifo_chan *);
};
int nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *, struct nvkm_fifo *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index 8e28ba6b2307..a14545d871d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -14,6 +14,8 @@ struct gk104_fifo_chan {
struct list_head head;
bool killed;
+ struct nvkm_memory *mthd;
+
struct {
struct nvkm_gpuobj *inst;
struct nvkm_vma *vma;
@@ -36,4 +38,15 @@ int gk104_fifo_gpfifo_kick_locked(struct gk104_fifo_chan *);
int gv100_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
void *data, u32 size, struct nvkm_object **);
+int gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *,
+ struct gk104_fifo *, u64 *, u16 *, u64, u64, u64,
+ u64 *, bool, u32 *, const struct nvkm_oclass *,
+ struct nvkm_object **);
+int gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *,
+ struct nvkm_engine *);
+int gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *,
+ struct nvkm_engine *, bool);
+
+int tu104_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
+ void *data, u32 size, struct nvkm_object **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index f69576868164..10a2e7039a75 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -346,10 +346,10 @@ gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
if (eu && eu->data2) {
switch (eu->data2) {
case NVKM_SUBDEV_BAR:
- nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
+ nvkm_bar_bar1_reset(device);
break;
case NVKM_SUBDEV_INSTMEM:
- nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
+ nvkm_bar_bar2_reset(device);
break;
case NVKM_ENGINE_IFB:
nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index afccf9721cf0..1053fe796466 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -149,16 +149,41 @@ gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
}
void
-gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
+gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
+ struct nvkm_memory *mem, int nr)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int target;
+
+ switch (nvkm_memory_target(mem)) {
+ case NVKM_MEM_TARGET_VRAM: target = 0; break;
+ case NVKM_MEM_TARGET_NCOH: target = 3; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
+ (target << 28));
+ nvkm_wr32(device, 0x002274, (runl << 20) | nr);
+
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000))
+ break;
+ ) < 0)
+ nvkm_error(subdev, "runlist %d update timeout\n", runl);
+}
+
+void
+gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl)
{
const struct gk104_fifo_runlist_func *func = fifo->func->runlist;
struct gk104_fifo_chan *chan;
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
struct nvkm_memory *mem;
struct nvkm_fifo_cgrp *cgrp;
int nr = 0;
- int target;
mutex_lock(&subdev->mutex);
mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
@@ -177,24 +202,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
}
nvkm_done(mem);
- switch (nvkm_memory_target(mem)) {
- case NVKM_MEM_TARGET_VRAM: target = 0; break;
- case NVKM_MEM_TARGET_NCOH: target = 3; break;
- default:
- WARN_ON(1);
- goto unlock;
- }
-
- nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
- (target << 28));
- nvkm_wr32(device, 0x002274, (runl << 20) | nr);
-
- if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000))
- break;
- ) < 0)
- nvkm_error(subdev, "runlist %d update timeout\n", runl);
-unlock:
+ func->commit(fifo, runl, mem, nr);
mutex_unlock(&subdev->mutex);
}
@@ -238,6 +246,29 @@ const struct gk104_fifo_runlist_func
gk104_fifo_runlist = {
.size = 8,
.chan = gk104_fifo_runlist_chan,
+ .commit = gk104_fifo_runlist_commit,
+};
+
+void
+gk104_fifo_pbdma_init(struct gk104_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
+}
+
+int
+gk104_fifo_pbdma_nr(struct gk104_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ /* Determine number of PBDMAs by checking valid enable bits. */
+ nvkm_wr32(device, 0x000204, 0xffffffff);
+ return hweight32(nvkm_rd32(device, 0x000204));
+}
+
+const struct gk104_fifo_pbdma_func
+gk104_fifo_pbdma = {
+ .nr = gk104_fifo_pbdma_nr,
+ .init = gk104_fifo_pbdma_init,
};
static void
@@ -267,7 +298,7 @@ gk104_fifo_recover_work(struct work_struct *w)
}
for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl))
- gk104_fifo_runlist_commit(fifo, runl);
+ gk104_fifo_runlist_update(fifo, runl);
nvkm_wr32(device, 0x00262c, runm);
nvkm_mask(device, 0x002630, runm, 0x00000000);
@@ -456,10 +487,10 @@ gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
if (ee && ee->data2) {
switch (ee->data2) {
case NVKM_SUBDEV_BAR:
- nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
+ nvkm_bar_bar1_reset(device);
break;
case NVKM_SUBDEV_INSTMEM:
- nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
+ nvkm_bar_bar2_reset(device);
break;
case NVKM_ENGINE_IFB:
nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
@@ -904,9 +935,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
enum nvkm_devidx engidx;
u32 *map;
- /* Determine number of PBDMAs by checking valid enable bits. */
- nvkm_wr32(device, 0x000204, 0xffffffff);
- fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x000204));
+ fifo->pbdma_nr = fifo->func->pbdma->nr(fifo);
nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
/* Read PBDMA->runlist(s) mapping from HW. */
@@ -978,7 +1007,7 @@ gk104_fifo_init(struct nvkm_fifo *base)
int i;
/* Enable PBDMAs. */
- nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
+ fifo->func->pbdma->init(fifo);
/* PBDMA[n] */
for (i = 0; i < fifo->pbdma_nr; i++) {
@@ -995,8 +1024,8 @@ gk104_fifo_init(struct nvkm_fifo *base)
nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
- if (fifo->func->init_pbdma_timeout)
- fifo->func->init_pbdma_timeout(fifo);
+ if (fifo->func->pbdma->init_timeout)
+ fifo->func->pbdma->init_timeout(fifo);
nvkm_wr32(device, 0x002100, 0xffffffff);
nvkm_wr32(device, 0x002140, 0x7fffffff);
@@ -1175,6 +1204,7 @@ gk104_fifo_fault_gpcclient[] = {
static const struct gk104_fifo_func
gk104_fifo = {
+ .pbdma = &gk104_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gk104_fifo_fault_engine,
.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index d295b81e18d6..d4e565658f46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -45,7 +45,11 @@ struct gk104_fifo {
};
struct gk104_fifo_func {
- void (*init_pbdma_timeout)(struct gk104_fifo *);
+ const struct gk104_fifo_pbdma_func {
+ int (*nr)(struct gk104_fifo *);
+ void (*init)(struct gk104_fifo *);
+ void (*init_timeout)(struct gk104_fifo *);
+ } *pbdma;
struct {
const struct nvkm_enum *access;
@@ -61,6 +65,8 @@ struct gk104_fifo_func {
struct nvkm_memory *, u32 offset);
void (*chan)(struct gk104_fifo_chan *,
struct nvkm_memory *, u32 offset);
+ void (*commit)(struct gk104_fifo *, int runl,
+ struct nvkm_memory *, int entries);
} *runlist;
struct gk104_fifo_user_user {
@@ -81,8 +87,11 @@ int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *,
int index, int nr, struct nvkm_fifo **);
void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *);
void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
-void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl);
+void gk104_fifo_runlist_update(struct gk104_fifo *, int runl);
+extern const struct gk104_fifo_pbdma_func gk104_fifo_pbdma;
+int gk104_fifo_pbdma_nr(struct gk104_fifo *);
+void gk104_fifo_pbdma_init(struct gk104_fifo *);
extern const struct nvkm_enum gk104_fifo_fault_access[];
extern const struct nvkm_enum gk104_fifo_fault_engine[];
extern const struct nvkm_enum gk104_fifo_fault_reason[];
@@ -91,15 +100,30 @@ extern const struct nvkm_enum gk104_fifo_fault_gpcclient[];
extern const struct gk104_fifo_runlist_func gk104_fifo_runlist;
void gk104_fifo_runlist_chan(struct gk104_fifo_chan *,
struct nvkm_memory *, u32);
+void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl,
+ struct nvkm_memory *, int);
extern const struct gk104_fifo_runlist_func gk110_fifo_runlist;
void gk110_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *,
struct nvkm_memory *, u32);
-void gk208_fifo_init_pbdma_timeout(struct gk104_fifo *);
+extern const struct gk104_fifo_pbdma_func gk208_fifo_pbdma;
+void gk208_fifo_pbdma_init_timeout(struct gk104_fifo *);
extern const struct nvkm_enum gm107_fifo_fault_engine[];
extern const struct gk104_fifo_runlist_func gm107_fifo_runlist;
+extern const struct gk104_fifo_pbdma_func gm200_fifo_pbdma;
+int gm200_fifo_pbdma_nr(struct gk104_fifo *);
+
extern const struct nvkm_enum gp100_fifo_fault_engine[];
+
+extern const struct nvkm_enum gv100_fifo_fault_access[];
+extern const struct nvkm_enum gv100_fifo_fault_reason[];
+extern const struct nvkm_enum gv100_fifo_fault_hubclient[];
+extern const struct nvkm_enum gv100_fifo_fault_gpcclient[];
+void gv100_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *,
+ struct nvkm_memory *, u32);
+void gv100_fifo_runlist_chan(struct gk104_fifo_chan *,
+ struct nvkm_memory *, u32);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
index ac7655a130fb..8adfa6b182cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
@@ -43,10 +43,12 @@ gk110_fifo_runlist = {
.size = 8,
.cgrp = gk110_fifo_runlist_cgrp,
.chan = gk104_fifo_runlist_chan,
+ .commit = gk104_fifo_runlist_commit,
};
static const struct gk104_fifo_func
gk110_fifo = {
+ .pbdma = &gk104_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gk104_fifo_fault_engine,
.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index 5ea7e452cc66..9553fb4af601 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -27,7 +27,7 @@
#include <nvif/class.h>
void
-gk208_fifo_init_pbdma_timeout(struct gk104_fifo *fifo)
+gk208_fifo_pbdma_init_timeout(struct gk104_fifo *fifo)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
int i;
@@ -36,9 +36,16 @@ gk208_fifo_init_pbdma_timeout(struct gk104_fifo *fifo)
nvkm_wr32(device, 0x04012c + (i * 0x2000), 0x0000ffff);
}
+const struct gk104_fifo_pbdma_func
+gk208_fifo_pbdma = {
+ .nr = gk104_fifo_pbdma_nr,
+ .init = gk104_fifo_pbdma_init,
+ .init_timeout = gk208_fifo_pbdma_init_timeout,
+};
+
static const struct gk104_fifo_func
gk208_fifo = {
- .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+ .pbdma = &gk208_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gk104_fifo_fault_engine,
.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index 535a0eb67a5f..a4c6ac3cd6c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -26,7 +26,7 @@
static const struct gk104_fifo_func
gk20a_fifo = {
- .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+ .pbdma = &gk208_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gk104_fifo_fault_engine,
.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index 79ae19b1db67..acf230764cb0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -41,6 +41,7 @@ gm107_fifo_runlist = {
.size = 8,
.cgrp = gk110_fifo_runlist_cgrp,
.chan = gm107_fifo_runlist_chan,
+ .commit = gk104_fifo_runlist_commit,
};
const struct nvkm_enum
@@ -68,7 +69,7 @@ gm107_fifo_fault_engine[] = {
static const struct gk104_fifo_func
gm107_fifo = {
- .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+ .pbdma = &gk208_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gm107_fifo_fault_engine,
.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
index 49565faa854d..b96c1c5d6577 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -26,9 +26,23 @@
#include <nvif/class.h>
+int
+gm200_fifo_pbdma_nr(struct gk104_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ return nvkm_rd32(device, 0x002004) & 0x000000ff;
+}
+
+const struct gk104_fifo_pbdma_func
+gm200_fifo_pbdma = {
+ .nr = gm200_fifo_pbdma_nr,
+ .init = gk104_fifo_pbdma_init,
+ .init_timeout = gk208_fifo_pbdma_init_timeout,
+};
+
static const struct gk104_fifo_func
gm200_fifo = {
- .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+ .pbdma = &gm200_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gm107_fifo_fault_engine,
.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
index 46736513bd11..a49539b9e4ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -26,7 +26,7 @@
static const struct gk104_fifo_func
gm20b_fifo = {
- .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+ .pbdma = &gm200_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gm107_fifo_fault_engine,
.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index e2f8f9087d7c..54377e0f6a88 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -52,7 +52,7 @@ gp100_fifo_fault_engine[] = {
static const struct gk104_fifo_func
gp100_fifo = {
- .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+ .pbdma = &gm200_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gp100_fifo_fault_engine,
.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
index 7733bf7c6545..778ba7e46fb3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
@@ -26,7 +26,7 @@
static const struct gk104_fifo_func
gp10b_fifo = {
- .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+ .pbdma = &gm200_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gp100_fifo_fault_engine,
.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index 118b37aea318..728a1edbf98c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -85,7 +85,7 @@ gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
case NVKM_ENGINE_MSVLD : return 0x0270;
case NVKM_ENGINE_VIC : return 0x0280;
case NVKM_ENGINE_MSENC : return 0x0290;
- case NVKM_ENGINE_NVDEC : return 0x02100270;
+ case NVKM_ENGINE_NVDEC0: return 0x02100270;
case NVKM_ENGINE_NVENC0: return 0x02100290;
case NVKM_ENGINE_NVENC1: return 0x0210;
default:
@@ -192,7 +192,7 @@ gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
gk104_fifo_runlist_remove(fifo, chan);
nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800);
gk104_fifo_gpfifo_kick(chan);
- gk104_fifo_runlist_commit(fifo, chan->runl);
+ gk104_fifo_runlist_update(fifo, chan->runl);
}
nvkm_wr32(device, 0x800000 + coff, 0x00000000);
@@ -213,7 +213,7 @@ gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
if (list_empty(&chan->head) && !chan->killed) {
gk104_fifo_runlist_insert(fifo, chan);
nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
- gk104_fifo_runlist_commit(fifo, chan->runl);
+ gk104_fifo_runlist_update(fifo, chan->runl);
nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
}
}
@@ -222,6 +222,7 @@ void *
gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ nvkm_memory_unref(&chan->mthd);
kfree(chan->cgrp);
return chan;
}
@@ -240,7 +241,7 @@ gk104_fifo_gpfifo_func = {
static int
gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
- u64 vmm, u64 ioffset, u64 ilength,
+ u64 vmm, u64 ioffset, u64 ilength, u64 *inst, bool priv,
const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
@@ -279,6 +280,7 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
return ret;
*chid = chan->base.chid;
+ *inst = chan->base.inst->addr;
/* Hack to support GPUs where even individual channels should be
* part of a channel group.
@@ -315,6 +317,7 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
+ nvkm_wo32(chan->base.inst, 0xe4, priv ? 0x00000020 : 0x00000000);
nvkm_wo32(chan->base.inst, 0xe8, chan->base.chid);
nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
@@ -337,15 +340,19 @@ gk104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
"ioffset %016llx ilength %08x "
- "runlist %016llx\n",
+ "runlist %016llx priv %d\n",
args->v0.version, args->v0.vmm, args->v0.ioffset,
- args->v0.ilength, args->v0.runlist);
+ args->v0.ilength, args->v0.runlist, args->v0.priv);
+ if (args->v0.priv && !oclass->client->super)
+ return -EINVAL;
return gk104_fifo_gpfifo_new_(fifo,
&args->v0.runlist,
&args->v0.chid,
args->v0.vmm,
args->v0.ioffset,
args->v0.ilength,
+ &args->v0.inst,
+ args->v0.priv,
oclass, pobject);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
index 9598853ced56..a7462cf59d65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
@@ -25,9 +25,15 @@
#include <core/client.h>
#include <core/gpuobj.h>
-#include <nvif/cla06f.h>
+#include <nvif/clc36f.h>
#include <nvif/unpack.h>
+static u32
+gv100_fifo_gpfifo_submit_token(struct nvkm_fifo_chan *chan)
+{
+ return chan->chid;
+}
+
static int
gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid)
{
@@ -56,7 +62,7 @@ gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid
return ret;
}
-static int
+int
gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine, bool suspend)
{
@@ -79,7 +85,7 @@ gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
return ret;
}
-static int
+int
gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
@@ -100,8 +106,8 @@ gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
return gv100_fifo_gpfifo_engine_valid(chan, false, true);
}
-const struct nvkm_fifo_chan_func
-gv100_fifo_gpfifo_func = {
+static const struct nvkm_fifo_chan_func
+gv100_fifo_gpfifo = {
.dtor = gk104_fifo_gpfifo_dtor,
.init = gk104_fifo_gpfifo_init,
.fini = gk104_fifo_gpfifo_fini,
@@ -110,19 +116,23 @@ gv100_fifo_gpfifo_func = {
.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
.engine_init = gv100_fifo_gpfifo_engine_init,
.engine_fini = gv100_fifo_gpfifo_engine_fini,
+ .submit_token = gv100_fifo_gpfifo_submit_token,
};
-static int
-gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
- u64 vmm, u64 ioffset, u64 ilength,
- const struct nvkm_oclass *oclass,
+int
+gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
+ struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
+ u64 vmm, u64 ioffset, u64 ilength, u64 *inst, bool priv,
+ u32 *token, const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
struct gk104_fifo_chan *chan;
int runlist = ffs(*runlists) -1, ret, i;
unsigned long engm;
u64 subdevs = 0;
- u64 usermem;
+ u64 usermem, mthd;
+ u32 size;
if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
return -EINVAL;
@@ -142,14 +152,15 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
chan->runl = runlist;
INIT_LIST_HEAD(&chan->head);
- ret = nvkm_fifo_chan_ctor(&gv100_fifo_gpfifo_func, &fifo->base,
- 0x1000, 0x1000, true, vmm, 0, subdevs,
- 1, fifo->user.bar->addr, 0x200,
+ ret = nvkm_fifo_chan_ctor(func, &fifo->base, 0x1000, 0x1000, true, vmm,
+ 0, subdevs, 1, fifo->user.bar->addr, 0x200,
oclass, &chan->base);
if (ret)
return ret;
*chid = chan->base.chid;
+ *inst = chan->base.inst->addr;
+ *token = chan->base.func->submit_token(&chan->base);
/* Hack to support GPUs where even individual channels should be
* part of a channel group.
@@ -173,6 +184,20 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
nvkm_done(fifo->user.mem);
usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
+ /* Allocate fault method buffer (magics come from nvgpu). */
+ size = nvkm_rd32(device, 0x104028); /* NV_PCE_PCE_MAP */
+ size = 27 * 5 * (((9 + 1 + 3) * hweight32(size)) + 2);
+ size = roundup(size, PAGE_SIZE);
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, true,
+ &chan->mthd);
+ if (ret)
+ return ret;
+
+ mthd = nvkm_memory_bar2(chan->mthd);
+ if (mthd == ~0ULL)
+ return -EFAULT;
+
/* RAMFC */
nvkm_kmap(chan->base.inst);
nvkm_wo32(chan->base.inst, 0x008, lower_32_bits(usermem));
@@ -184,13 +209,13 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
(ilength << 16));
nvkm_wo32(chan->base.inst, 0x084, 0x20400000);
nvkm_wo32(chan->base.inst, 0x094, 0x30000001);
- nvkm_wo32(chan->base.inst, 0x0e4, 0x00000020);
+ nvkm_wo32(chan->base.inst, 0x0e4, priv ? 0x00000020 : 0x00000000);
nvkm_wo32(chan->base.inst, 0x0e8, chan->base.chid);
- nvkm_wo32(chan->base.inst, 0x0f4, 0x00001100);
+ nvkm_wo32(chan->base.inst, 0x0f4, 0x00001000);
nvkm_wo32(chan->base.inst, 0x0f8, 0x10003080);
nvkm_mo32(chan->base.inst, 0x218, 0x00000000, 0x00000000);
- nvkm_wo32(chan->base.inst, 0x220, 0x020a1000);
- nvkm_wo32(chan->base.inst, 0x224, 0x00000000);
+ nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(mthd));
+ nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(mthd));
nvkm_done(chan->base.inst);
return gv100_fifo_gpfifo_engine_valid(chan, true, true);
}
@@ -201,7 +226,7 @@ gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
{
struct nvkm_object *parent = oclass->parent;
union {
- struct kepler_channel_gpfifo_a_v0 v0;
+ struct volta_channel_gpfifo_a_v0 v0;
} *args = data;
int ret = -ENOSYS;
@@ -209,15 +234,20 @@ gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
"ioffset %016llx ilength %08x "
- "runlist %016llx\n",
+ "runlist %016llx priv %d\n",
args->v0.version, args->v0.vmm, args->v0.ioffset,
- args->v0.ilength, args->v0.runlist);
- return gv100_fifo_gpfifo_new_(fifo,
+ args->v0.ilength, args->v0.runlist, args->v0.priv);
+ if (args->v0.priv && !oclass->client->super)
+ return -EINVAL;
+ return gv100_fifo_gpfifo_new_(&gv100_fifo_gpfifo, fifo,
&args->v0.runlist,
&args->v0.chid,
args->v0.vmm,
args->v0.ioffset,
args->v0.ilength,
+ &args->v0.inst,
+ args->v0.priv,
+ &args->v0.token,
oclass, pobject);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c
new file mode 100644
index 000000000000..ff70484dd01a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "changk104.h"
+#include "cgrp.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+
+#include <nvif/clc36f.h>
+#include <nvif/unpack.h>
+
+static u32
+tu104_fifo_gpfifo_submit_token(struct nvkm_fifo_chan *base)
+{
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ return (chan->runl << 16) | chan->base.chid;
+}
+
+static const struct nvkm_fifo_chan_func
+tu104_fifo_gpfifo = {
+ .dtor = gk104_fifo_gpfifo_dtor,
+ .init = gk104_fifo_gpfifo_init,
+ .fini = gk104_fifo_gpfifo_fini,
+ .ntfy = gf100_fifo_chan_ntfy,
+ .engine_ctor = gk104_fifo_gpfifo_engine_ctor,
+ .engine_dtor = gk104_fifo_gpfifo_engine_dtor,
+ .engine_init = gv100_fifo_gpfifo_engine_init,
+ .engine_fini = gv100_fifo_gpfifo_engine_fini,
+ .submit_token = tu104_fifo_gpfifo_submit_token,
+};
+
+int
+tu104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct volta_channel_gpfifo_a_v0 v0;
+ } *args = data;
+ int ret = -ENOSYS;
+
+ nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
+ "ioffset %016llx ilength %08x "
+ "runlist %016llx priv %d\n",
+ args->v0.version, args->v0.vmm, args->v0.ioffset,
+ args->v0.ilength, args->v0.runlist, args->v0.priv);
+ if (args->v0.priv && !oclass->client->super)
+ return -EINVAL;
+ return gv100_fifo_gpfifo_new_(&tu104_fifo_gpfifo, fifo,
+ &args->v0.runlist,
+ &args->v0.chid,
+ args->v0.vmm,
+ args->v0.ioffset,
+ args->v0.ilength,
+ &args->v0.inst,
+ args->v0.priv,
+ &args->v0.token,
+ oclass, pobject);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
index 4e1d159c0ae7..6ee1bb32a071 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
@@ -28,7 +28,7 @@
#include <nvif/class.h>
-static void
+void
gv100_fifo_runlist_chan(struct gk104_fifo_chan *chan,
struct nvkm_memory *memory, u32 offset)
{
@@ -42,7 +42,7 @@ gv100_fifo_runlist_chan(struct gk104_fifo_chan *chan,
nvkm_wo32(memory, offset + 0xc, upper_32_bits(inst));
}
-static void
+void
gv100_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *cgrp,
struct nvkm_memory *memory, u32 offset)
{
@@ -57,9 +57,10 @@ gv100_fifo_runlist = {
.size = 16,
.cgrp = gv100_fifo_runlist_cgrp,
.chan = gv100_fifo_runlist_chan,
+ .commit = gk104_fifo_runlist_commit,
};
-static const struct nvkm_enum
+const struct nvkm_enum
gv100_fifo_fault_gpcclient[] = {
{ 0x00, "T1_0" },
{ 0x01, "T1_1" },
@@ -161,7 +162,7 @@ gv100_fifo_fault_gpcclient[] = {
{}
};
-static const struct nvkm_enum
+const struct nvkm_enum
gv100_fifo_fault_hubclient[] = {
{ 0x00, "VIP" },
{ 0x01, "CE0" },
@@ -223,7 +224,7 @@ gv100_fifo_fault_hubclient[] = {
{}
};
-static const struct nvkm_enum
+const struct nvkm_enum
gv100_fifo_fault_reason[] = {
{ 0x00, "PDE" },
{ 0x01, "PDE_SIZE" },
@@ -271,7 +272,7 @@ gv100_fifo_fault_engine[] = {
{}
};
-static const struct nvkm_enum
+const struct nvkm_enum
gv100_fifo_fault_access[] = {
{ 0x0, "VIRT_READ" },
{ 0x1, "VIRT_WRITE" },
@@ -287,7 +288,7 @@ gv100_fifo_fault_access[] = {
static const struct gk104_fifo_func
gv100_fifo = {
- .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+ .pbdma = &gm200_fifo_pbdma,
.fault.access = gv100_fifo_fault_access,
.fault.engine = gv100_fifo_fault_engine,
.fault.reason = gv100_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c
new file mode 100644
index 000000000000..98c80705bc61
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gk104.h"
+#include "cgrp.h"
+#include "changk104.h"
+#include "user.h"
+
+#include <core/gpuobj.h>
+
+#include <nvif/class.h>
+
+static void
+tu104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
+ struct nvkm_memory *mem, int nr)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u64 addr = nvkm_memory_addr(mem);
+ /*XXX: target? */
+
+ nvkm_wr32(device, 0x002b00 + (runl * 0x10), lower_32_bits(addr));
+ nvkm_wr32(device, 0x002b04 + (runl * 0x10), upper_32_bits(addr));
+ nvkm_wr32(device, 0x002b08 + (runl * 0x10), nr);
+
+ /*XXX: how to wait? can you even wait? */
+}
+
+const struct gk104_fifo_runlist_func
+tu104_fifo_runlist = {
+ .size = 16,
+ .cgrp = gv100_fifo_runlist_cgrp,
+ .chan = gv100_fifo_runlist_chan,
+ .commit = tu104_fifo_runlist_commit,
+};
+
+static const struct nvkm_enum
+tu104_fifo_fault_engine[] = {
+ { 0x01, "DISPLAY" },
+ { 0x03, "PTP" },
+ { 0x06, "PWR_PMU" },
+ { 0x08, "IFB", NULL, NVKM_ENGINE_IFB },
+ { 0x09, "PERF" },
+ { 0x1f, "PHYSICAL" },
+ { 0x20, "HOST0" },
+ { 0x21, "HOST1" },
+ { 0x22, "HOST2" },
+ { 0x23, "HOST3" },
+ { 0x24, "HOST4" },
+ { 0x25, "HOST5" },
+ { 0x26, "HOST6" },
+ { 0x27, "HOST7" },
+ { 0x28, "HOST8" },
+ { 0x29, "HOST9" },
+ { 0x2a, "HOST10" },
+ { 0x2b, "HOST11" },
+ { 0x2c, "HOST12" },
+ { 0x2d, "HOST13" },
+ { 0x2e, "HOST14" },
+ { 0x80, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0xc0, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+ {}
+};
+
+static void
+tu104_fifo_pbdma_init(struct gk104_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ const u32 mask = (1 << fifo->pbdma_nr) - 1;
+ /*XXX: this is a bit of a guess at this point in time. */
+ nvkm_mask(device, 0xb65000, 0x80000fff, 0x80000000 | mask);
+}
+
+static const struct gk104_fifo_pbdma_func
+tu104_fifo_pbdma = {
+ .nr = gm200_fifo_pbdma_nr,
+ .init = tu104_fifo_pbdma_init,
+ .init_timeout = gk208_fifo_pbdma_init_timeout,
+};
+
+static const struct gk104_fifo_func
+tu104_fifo = {
+ .pbdma = &tu104_fifo_pbdma,
+ .fault.access = gv100_fifo_fault_access,
+ .fault.engine = tu104_fifo_fault_engine,
+ .fault.reason = gv100_fifo_fault_reason,
+ .fault.hubclient = gv100_fifo_fault_hubclient,
+ .fault.gpcclient = gv100_fifo_fault_gpcclient,
+ .runlist = &tu104_fifo_runlist,
+ .user = {{-1,-1,VOLTA_USERMODE_A }, tu104_fifo_user_new },
+ .chan = {{ 0, 0,TURING_CHANNEL_GPFIFO_A}, tu104_fifo_gpfifo_new },
+ .cgrp_force = true,
+};
+
+int
+tu104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&tu104_fifo, device, index, 4096, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
index ed840921ebe8..14b0c6bde8eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
@@ -3,4 +3,6 @@
#include "priv.h"
int gv100_fifo_user_new(const struct nvkm_oclass *, void *, u32,
struct nvkm_object **);
+int tu104_fifo_user_new(const struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c
new file mode 100644
index 000000000000..8f98548a21f6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "user.h"
+
+static int
+tu104_fifo_user_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+ struct nvkm_device *device = object->engine->subdev.device;
+ *addr = 0xbb0000 + device->func->resource_addr(device, 0);
+ *size = 0x010000;
+ *type = NVKM_OBJECT_MAP_IO;
+ return 0;
+}
+
+static const struct nvkm_object_func
+tu104_fifo_user = {
+ .map = tu104_fifo_user_map,
+};
+
+int
+tu104_fifo_user_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ return nvkm_object_new_(&tu104_fifo_user, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index 14be41f24155..427340153640 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -197,7 +197,7 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
case NVKM_SUBDEV_PMU:
debug_reg = 0xc08;
break;
- case NVKM_ENGINE_NVDEC:
+ case NVKM_ENGINE_NVDEC0:
debug_reg = 0xd00;
break;
case NVKM_ENGINE_SEC2:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index e5830453813d..ab0282dc0736 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -5,3 +5,4 @@ nvkm-y += nvkm/subdev/bar/gf100.o
nvkm-y += nvkm/subdev/bar/gk20a.o
nvkm-y += nvkm/subdev/bar/gm107.o
nvkm-y += nvkm/subdev/bar/gm20b.o
+nvkm-y += nvkm/subdev/bar/tu104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index 243f0a5c8a62..209a6a40834a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -36,6 +36,16 @@ nvkm_bar_bar1_vmm(struct nvkm_device *device)
return device->bar->func->bar1.vmm(device->bar);
}
+void
+nvkm_bar_bar1_reset(struct nvkm_device *device)
+{
+ struct nvkm_bar *bar = device->bar;
+ if (bar) {
+ bar->func->bar1.init(bar);
+ bar->func->bar1.wait(bar);
+ }
+}
+
struct nvkm_vmm *
nvkm_bar_bar2_vmm(struct nvkm_device *device)
{
@@ -49,6 +59,16 @@ nvkm_bar_bar2_vmm(struct nvkm_device *device)
}
void
+nvkm_bar_bar2_reset(struct nvkm_device *device)
+{
+ struct nvkm_bar *bar = device->bar;
+ if (bar && bar->bar2) {
+ bar->func->bar2.init(bar);
+ bar->func->bar2.wait(bar);
+ }
+}
+
+void
nvkm_bar_bar2_fini(struct nvkm_device *device)
{
struct nvkm_bar *bar = device->bar;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c
new file mode 100644
index 000000000000..ecaead156e9b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <core/memory.h>
+#include <subdev/timer.h>
+
+static void
+tu104_bar_bar2_wait(struct nvkm_bar *bar)
+{
+ struct nvkm_device *device = bar->subdev.device;
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0xb80f50) & 0x0000000c))
+ break;
+ );
+}
+
+static void
+tu104_bar_bar2_fini(struct nvkm_bar *bar)
+{
+ nvkm_mask(bar->subdev.device, 0xb80f48, 0x80000000, 0x00000000);
+}
+
+static void
+tu104_bar_bar2_init(struct nvkm_bar *base)
+{
+ struct nvkm_device *device = base->subdev.device;
+ struct gf100_bar *bar = gf100_bar(base);
+ u32 addr = nvkm_memory_addr(bar->bar[0].inst) >> 12;
+ if (bar->bar2_halve)
+ addr |= 0x40000000;
+ nvkm_wr32(device, 0xb80f48, 0x80000000 | addr);
+}
+
+static void
+tu104_bar_bar1_wait(struct nvkm_bar *bar)
+{
+ struct nvkm_device *device = bar->subdev.device;
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0xb80f50) & 0x00000003))
+ break;
+ );
+}
+
+static void
+tu104_bar_bar1_fini(struct nvkm_bar *bar)
+{
+ nvkm_mask(bar->subdev.device, 0xb80f40, 0x80000000, 0x00000000);
+}
+
+static void
+tu104_bar_bar1_init(struct nvkm_bar *base)
+{
+ struct nvkm_device *device = base->subdev.device;
+ struct gf100_bar *bar = gf100_bar(base);
+ const u32 addr = nvkm_memory_addr(bar->bar[1].inst) >> 12;
+ nvkm_wr32(device, 0xb80f40, 0x80000000 | addr);
+}
+
+static const struct nvkm_bar_func
+tu104_bar = {
+ .dtor = gf100_bar_dtor,
+ .oneinit = gf100_bar_oneinit,
+ .bar1.init = tu104_bar_bar1_init,
+ .bar1.fini = tu104_bar_bar1_fini,
+ .bar1.wait = tu104_bar_bar1_wait,
+ .bar1.vmm = gf100_bar_bar1_vmm,
+ .bar2.init = tu104_bar_bar2_init,
+ .bar2.fini = tu104_bar_bar2_fini,
+ .bar2.wait = tu104_bar_bar2_wait,
+ .bar2.vmm = gf100_bar_bar2_vmm,
+ .flush = g84_bar_flush,
+};
+
+int
+tu104_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+ return gf100_bar_new_(&tu104_bar, device, index, pbar);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
index 50a436926484..3ef505a5c01b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
@@ -13,3 +13,4 @@ nvkm-y += nvkm/subdev/devinit/gf100.o
nvkm-y += nvkm/subdev/devinit/gm107.o
nvkm-y += nvkm/subdev/devinit/gm200.o
nvkm-y += nvkm/subdev/devinit/gv100.o
+nvkm-y += nvkm/subdev/devinit/tu104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index 17235e940ca9..59940dacc2ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -105,6 +105,15 @@ pmu_load(struct nv50_devinit *init, u8 type, bool post,
return pmu_exec(init, pmu.init_addr_pmu), 0;
}
+void
+gm200_devinit_preos(struct nv50_devinit *init, bool post)
+{
+ /* Optional: Execute PRE_OS application on PMU, which should at
+ * least take care of fans until a full PMU has been loaded.
+ */
+ pmu_load(init, 0x01, post, NULL, NULL);
+}
+
int
gm200_devinit_post(struct nvkm_devinit *base, bool post)
{
@@ -156,10 +165,7 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
return -ETIMEDOUT;
}
- /* Optional: Execute PRE_OS application on PMU, which should at
- * least take care of fans until a full PMU has been loaded.
- */
- pmu_load(init, 0x01, post, NULL, NULL);
+ gm200_devinit_preos(init, post);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index 9b9f0dc1e192..72d130bb7f7c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -26,4 +26,5 @@ void gf100_devinit_preinit(struct nvkm_devinit *);
u64 gm107_devinit_disable(struct nvkm_devinit *);
int gm200_devinit_post(struct nvkm_devinit *, bool);
+void gm200_devinit_preos(struct nv50_devinit *, bool);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c
new file mode 100644
index 000000000000..aae87b3fc429
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+#include <subdev/clk/pll.h>
+
+static int
+tu104_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
+{
+ struct nvkm_subdev *subdev = &init->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvbios_pll info;
+ int head = type - PLL_VPLL0;
+ int N, fN, M, P;
+ int ret;
+
+ ret = nvbios_pll_parse(device->bios, type, &info);
+ if (ret)
+ return ret;
+
+ ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
+ if (ret < 0)
+ return ret;
+
+ switch (info.type) {
+ case PLL_VPLL0:
+ case PLL_VPLL1:
+ case PLL_VPLL2:
+ case PLL_VPLL3:
+ nvkm_wr32(device, 0x00ef10 + (head * 0x40), fN << 16);
+ nvkm_wr32(device, 0x00ef04 + (head * 0x40), (P << 16) |
+ (N << 8) |
+ (M << 0));
+ /*XXX*/
+ nvkm_wr32(device, 0x00ef0c + (head * 0x40), 0x00000900);
+ nvkm_wr32(device, 0x00ef00 + (head * 0x40), 0x02000014);
+ break;
+ default:
+ nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+tu104_devinit_post(struct nvkm_devinit *base, bool post)
+{
+ struct nv50_devinit *init = nv50_devinit(base);
+ gm200_devinit_preos(init, post);
+ return 0;
+}
+
+static const struct nvkm_devinit_func
+tu104_devinit = {
+ .init = nv50_devinit_init,
+ .post = tu104_devinit_post,
+ .pll_set = tu104_devinit_pll_set,
+ .disable = gm107_devinit_disable,
+};
+
+int
+tu104_devinit_new(struct nvkm_device *device, int index,
+ struct nvkm_devinit **pinit)
+{
+ return nv50_devinit_new_(&tu104_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
index 45bb46fb0929..794eb1745b2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
@@ -1,3 +1,4 @@
nvkm-y += nvkm/subdev/fault/base.o
nvkm-y += nvkm/subdev/fault/gp100.o
nvkm-y += nvkm/subdev/fault/gv100.o
+nvkm-y += nvkm/subdev/fault/tu104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
index 16ad91c91a7b..4ba1e21e8fda 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
@@ -23,21 +23,19 @@
#include <core/memory.h>
#include <core/notify.h>
-#include <subdev/bar.h>
-#include <subdev/mmu.h>
static void
nvkm_fault_ntfy_fini(struct nvkm_event *event, int type, int index)
{
struct nvkm_fault *fault = container_of(event, typeof(*fault), event);
- fault->func->buffer.fini(fault->buffer[index]);
+ fault->func->buffer.intr(fault->buffer[index], false);
}
static void
nvkm_fault_ntfy_init(struct nvkm_event *event, int type, int index)
{
struct nvkm_fault *fault = container_of(event, typeof(*fault), event);
- fault->func->buffer.init(fault->buffer[index]);
+ fault->func->buffer.intr(fault->buffer[index], true);
}
static int
@@ -91,7 +89,6 @@ nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
{
struct nvkm_subdev *subdev = &fault->subdev;
struct nvkm_device *device = subdev->device;
- struct nvkm_vmm *bar2 = nvkm_bar_bar2_vmm(device);
struct nvkm_fault_buffer *buffer;
int ret;
@@ -99,7 +96,7 @@ nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
return -ENOMEM;
buffer->fault = fault;
buffer->id = id;
- buffer->entries = fault->func->buffer.entries(buffer);
+ fault->func->buffer.info(buffer);
fault->buffer[id] = buffer;
nvkm_debug(subdev, "buffer %d: %d entries\n", id, buffer->entries);
@@ -110,12 +107,12 @@ nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
if (ret)
return ret;
- ret = nvkm_vmm_get(bar2, 12, nvkm_memory_size(buffer->mem),
- &buffer->vma);
- if (ret)
- return ret;
+ /* Pin fault buffer in BAR2. */
+ buffer->addr = nvkm_memory_bar2(buffer->mem);
+ if (buffer->addr == ~0ULL)
+ return -EFAULT;
- return nvkm_memory_map(buffer->mem, 0, bar2, buffer->vma, NULL, 0);
+ return 0;
}
static int
@@ -146,7 +143,6 @@ nvkm_fault_oneinit(struct nvkm_subdev *subdev)
static void *
nvkm_fault_dtor(struct nvkm_subdev *subdev)
{
- struct nvkm_vmm *bar2 = nvkm_bar_bar2_vmm(subdev->device);
struct nvkm_fault *fault = nvkm_fault(subdev);
int i;
@@ -154,7 +150,6 @@ nvkm_fault_dtor(struct nvkm_subdev *subdev)
for (i = 0; i < fault->buffer_nr; i++) {
if (fault->buffer[i]) {
- nvkm_vmm_put(bar2, &fault->buffer[i]->vma);
nvkm_memory_unref(&fault->buffer[i]->mem);
kfree(fault->buffer[i]);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
index 5e71db2e8d75..8fb96fe614f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
@@ -21,7 +21,14 @@
*/
#include "priv.h"
-#include <subdev/mmu.h>
+#include <subdev/mc.h>
+
+static void
+gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
+{
+ struct nvkm_device *device = buffer->fault->subdev.device;
+ nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable);
+}
static void
gp100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
@@ -34,15 +41,17 @@ static void
gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
- nvkm_wr32(device, 0x002a74, upper_32_bits(buffer->vma->addr));
- nvkm_wr32(device, 0x002a70, lower_32_bits(buffer->vma->addr));
+ nvkm_wr32(device, 0x002a74, upper_32_bits(buffer->addr));
+ nvkm_wr32(device, 0x002a70, lower_32_bits(buffer->addr));
nvkm_mask(device, 0x002a70, 0x00000001, 0x00000001);
}
-static u32
-gp100_fault_buffer_entries(struct nvkm_fault_buffer *buffer)
+static void
+gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
{
- return nvkm_rd32(buffer->fault->subdev.device, 0x002a78);
+ buffer->entries = nvkm_rd32(buffer->fault->subdev.device, 0x002a78);
+ buffer->get = 0x002a7c;
+ buffer->put = 0x002a80;
}
static void
@@ -56,9 +65,10 @@ gp100_fault = {
.intr = gp100_fault_intr,
.buffer.nr = 1,
.buffer.entry_size = 32,
- .buffer.entries = gp100_fault_buffer_entries,
+ .buffer.info = gp100_fault_buffer_info,
.buffer.init = gp100_fault_buffer_init,
.buffer.fini = gp100_fault_buffer_fini,
+ .buffer.intr = gp100_fault_buffer_intr,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
index 3cd610d7deb5..6fc54e17c935 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
@@ -30,9 +30,8 @@ gv100_fault_buffer_process(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
struct nvkm_memory *mem = buffer->mem;
- const u32 foff = buffer->id * 0x14;
- u32 get = nvkm_rd32(device, 0x100e2c + foff);
- u32 put = nvkm_rd32(device, 0x100e30 + foff);
+ u32 get = nvkm_rd32(device, buffer->get);
+ u32 put = nvkm_rd32(device, buffer->put);
if (put == get)
return;
@@ -51,7 +50,7 @@ gv100_fault_buffer_process(struct nvkm_fault_buffer *buffer)
if (++get == buffer->entries)
get = 0;
- nvkm_wr32(device, 0x100e2c + foff, get);
+ nvkm_wr32(device, buffer->get, get);
info.addr = ((u64)addrhi << 32) | addrlo;
info.inst = ((u64)insthi << 32) | instlo;
@@ -70,13 +69,21 @@ gv100_fault_buffer_process(struct nvkm_fault_buffer *buffer)
}
static void
-gv100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
+gv100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
{
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 intr = buffer->id ? 0x08000000 : 0x20000000;
- const u32 foff = buffer->id * 0x14;
+ if (enable)
+ nvkm_mask(device, 0x100a2c, intr, intr);
+ else
+ nvkm_mask(device, 0x100a34, intr, intr);
+}
- nvkm_mask(device, 0x100a34, intr, intr);
+static void
+gv100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
+{
+ struct nvkm_device *device = buffer->fault->subdev.device;
+ const u32 foff = buffer->id * 0x14;
nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x00000000);
}
@@ -84,23 +91,25 @@ static void
gv100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
- const u32 intr = buffer->id ? 0x08000000 : 0x20000000;
const u32 foff = buffer->id * 0x14;
nvkm_mask(device, 0x100e34 + foff, 0xc0000000, 0x40000000);
- nvkm_wr32(device, 0x100e28 + foff, upper_32_bits(buffer->vma->addr));
- nvkm_wr32(device, 0x100e24 + foff, lower_32_bits(buffer->vma->addr));
+ nvkm_wr32(device, 0x100e28 + foff, upper_32_bits(buffer->addr));
+ nvkm_wr32(device, 0x100e24 + foff, lower_32_bits(buffer->addr));
nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x80000000);
- nvkm_mask(device, 0x100a2c, intr, intr);
}
-static u32
-gv100_fault_buffer_entries(struct nvkm_fault_buffer *buffer)
+static void
+gv100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 foff = buffer->id * 0x14;
+
nvkm_mask(device, 0x100e34 + foff, 0x40000000, 0x40000000);
- return nvkm_rd32(device, 0x100e34 + foff) & 0x000fffff;
+
+ buffer->entries = nvkm_rd32(device, 0x100e34 + foff) & 0x000fffff;
+ buffer->get = 0x100e2c + foff;
+ buffer->put = 0x100e30 + foff;
}
static int
@@ -166,6 +175,8 @@ static void
gv100_fault_fini(struct nvkm_fault *fault)
{
nvkm_notify_put(&fault->nrpfb);
+ if (fault->buffer[0])
+ fault->func->buffer.fini(fault->buffer[0]);
nvkm_mask(fault->subdev.device, 0x100a34, 0x80000000, 0x80000000);
}
@@ -173,14 +184,15 @@ static void
gv100_fault_init(struct nvkm_fault *fault)
{
nvkm_mask(fault->subdev.device, 0x100a2c, 0x80000000, 0x80000000);
+ fault->func->buffer.init(fault->buffer[0]);
nvkm_notify_get(&fault->nrpfb);
}
-static int
+int
gv100_fault_oneinit(struct nvkm_fault *fault)
{
return nvkm_notify_init(&fault->buffer[0]->object, &fault->event,
- gv100_fault_ntfy_nrpfb, false, NULL, 0, 0,
+ gv100_fault_ntfy_nrpfb, true, NULL, 0, 0,
&fault->nrpfb);
}
@@ -192,9 +204,10 @@ gv100_fault = {
.intr = gv100_fault_intr,
.buffer.nr = 2,
.buffer.entry_size = 32,
- .buffer.entries = gv100_fault_buffer_entries,
+ .buffer.info = gv100_fault_buffer_info,
.buffer.init = gv100_fault_buffer_init,
.buffer.fini = gv100_fault_buffer_fini,
+ .buffer.intr = gv100_fault_buffer_intr,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
index e4d2f5234fd1..8ca8b2876dad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
@@ -12,8 +12,10 @@ struct nvkm_fault_buffer {
struct nvkm_fault *fault;
int id;
int entries;
+ u32 get;
+ u32 put;
struct nvkm_memory *mem;
- struct nvkm_vma *vma;
+ u64 addr;
};
int nvkm_fault_new_(const struct nvkm_fault_func *, struct nvkm_device *,
@@ -27,9 +29,12 @@ struct nvkm_fault_func {
struct {
int nr;
u32 entry_size;
- u32 (*entries)(struct nvkm_fault_buffer *);
+ void (*info)(struct nvkm_fault_buffer *);
void (*init)(struct nvkm_fault_buffer *);
void (*fini)(struct nvkm_fault_buffer *);
+ void (*intr)(struct nvkm_fault_buffer *, bool enable);
} buffer;
};
+
+int gv100_fault_oneinit(struct nvkm_fault *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c
new file mode 100644
index 000000000000..9c8a3adf99d7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/memory.h>
+#include <subdev/mmu.h>
+#include <engine/fifo.h>
+
+#include <nvif/class.h>
+
+static void
+tu104_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
+{
+ /*XXX: Earlier versions of RM touched the old regs on Turing,
+ * which don't appear to actually work anymore, but newer
+ * versions of RM don't appear to touch anything at all..
+ */
+}
+
+static void
+tu104_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
+{
+ struct nvkm_device *device = buffer->fault->subdev.device;
+ const u32 foff = buffer->id * 0x20;
+ nvkm_mask(device, 0xb83010 + foff, 0x80000000, 0x00000000);
+}
+
+static void
+tu104_fault_buffer_init(struct nvkm_fault_buffer *buffer)
+{
+ struct nvkm_device *device = buffer->fault->subdev.device;
+ const u32 foff = buffer->id * 0x20;
+
+ nvkm_mask(device, 0xb83010 + foff, 0xc0000000, 0x40000000);
+ nvkm_wr32(device, 0xb83004 + foff, upper_32_bits(buffer->addr));
+ nvkm_wr32(device, 0xb83000 + foff, lower_32_bits(buffer->addr));
+ nvkm_mask(device, 0xb83010 + foff, 0x80000000, 0x80000000);
+}
+
+static void
+tu104_fault_buffer_info(struct nvkm_fault_buffer *buffer)
+{
+ struct nvkm_device *device = buffer->fault->subdev.device;
+ const u32 foff = buffer->id * 0x20;
+
+ nvkm_mask(device, 0xb83010 + foff, 0x40000000, 0x40000000);
+
+ buffer->entries = nvkm_rd32(device, 0xb83010 + foff) & 0x000fffff;
+ buffer->get = 0xb83008 + foff;
+ buffer->put = 0xb8300c + foff;
+}
+
+static void
+tu104_fault_intr_fault(struct nvkm_fault *fault)
+{
+ struct nvkm_subdev *subdev = &fault->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_fault_data info;
+ const u32 addrlo = nvkm_rd32(device, 0xb83080);
+ const u32 addrhi = nvkm_rd32(device, 0xb83084);
+ const u32 info0 = nvkm_rd32(device, 0xb83088);
+ const u32 insthi = nvkm_rd32(device, 0xb8308c);
+ const u32 info1 = nvkm_rd32(device, 0xb83090);
+
+ info.addr = ((u64)addrhi << 32) | addrlo;
+ info.inst = ((u64)insthi << 32) | (info0 & 0xfffff000);
+ info.time = 0;
+ info.engine = (info0 & 0x000000ff);
+ info.valid = (info1 & 0x80000000) >> 31;
+ info.gpc = (info1 & 0x1f000000) >> 24;
+ info.hub = (info1 & 0x00100000) >> 20;
+ info.access = (info1 & 0x000f0000) >> 16;
+ info.client = (info1 & 0x00007f00) >> 8;
+ info.reason = (info1 & 0x0000001f);
+
+ nvkm_fifo_fault(device->fifo, &info);
+}
+
+static void
+tu104_fault_intr(struct nvkm_fault *fault)
+{
+ struct nvkm_subdev *subdev = &fault->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0xb83094);
+
+ if (stat & 0x80000000) {
+ tu104_fault_intr_fault(fault);
+ nvkm_wr32(device, 0xb83094, 0x80000000);
+ stat &= ~0x80000000;
+ }
+
+ if (stat & 0x00000200) {
+ if (fault->buffer[0]) {
+ nvkm_event_send(&fault->event, 1, 0, NULL, 0);
+ stat &= ~0x00000200;
+ }
+ }
+
+ /*XXX: guess, can't confirm until we get fw... */
+ if (stat & 0x00000100) {
+ if (fault->buffer[1]) {
+ nvkm_event_send(&fault->event, 1, 1, NULL, 0);
+ stat &= ~0x00000100;
+ }
+ }
+
+ if (stat) {
+ nvkm_debug(subdev, "intr %08x\n", stat);
+ }
+}
+
+static void
+tu104_fault_fini(struct nvkm_fault *fault)
+{
+ nvkm_notify_put(&fault->nrpfb);
+ if (fault->buffer[0])
+ fault->func->buffer.fini(fault->buffer[0]);
+ /*XXX: disable priv faults */
+}
+
+static void
+tu104_fault_init(struct nvkm_fault *fault)
+{
+ /*XXX: enable priv faults */
+ fault->func->buffer.init(fault->buffer[0]);
+ nvkm_notify_get(&fault->nrpfb);
+}
+
+static const struct nvkm_fault_func
+tu104_fault = {
+ .oneinit = gv100_fault_oneinit,
+ .init = tu104_fault_init,
+ .fini = tu104_fault_fini,
+ .intr = tu104_fault_intr,
+ .buffer.nr = 2,
+ .buffer.entry_size = 32,
+ .buffer.info = tu104_fault_buffer_info,
+ .buffer.init = tu104_fault_buffer_init,
+ .buffer.fini = tu104_fault_buffer_fini,
+ .buffer.intr = tu104_fault_buffer_intr,
+};
+
+int
+tu104_fault_new(struct nvkm_device *device, int index,
+ struct nvkm_fault **pfault)
+{
+ return nvkm_fault_new_(&tu104_fault, device, index, pfault);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 434d2fc5bb1c..b2bb5a3ccb02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -68,10 +68,13 @@ nvkm_fb_bios_memtype(struct nvkm_bios *bios)
if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) {
switch (M0203E.type) {
- case M0203E_TYPE_DDR2 : return NVKM_RAM_TYPE_DDR2;
- case M0203E_TYPE_DDR3 : return NVKM_RAM_TYPE_DDR3;
- case M0203E_TYPE_GDDR3: return NVKM_RAM_TYPE_GDDR3;
- case M0203E_TYPE_GDDR5: return NVKM_RAM_TYPE_GDDR5;
+ case M0203E_TYPE_DDR2 : return NVKM_RAM_TYPE_DDR2;
+ case M0203E_TYPE_DDR3 : return NVKM_RAM_TYPE_DDR3;
+ case M0203E_TYPE_GDDR3 : return NVKM_RAM_TYPE_GDDR3;
+ case M0203E_TYPE_GDDR5 : return NVKM_RAM_TYPE_GDDR5;
+ case M0203E_TYPE_GDDR5X: return NVKM_RAM_TYPE_GDDR5X;
+ case M0203E_TYPE_GDDR6 : return NVKM_RAM_TYPE_GDDR6;
+ case M0203E_TYPE_HBM2 : return NVKM_RAM_TYPE_HBM2;
default:
nvkm_warn(subdev, "M0203E type %02x\n", M0203E.type);
return NVKM_RAM_TYPE_UNKNOWN;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
index 24c7bd505731..b11867f682cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
@@ -184,6 +184,9 @@ nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
[NVKM_RAM_TYPE_GDDR3 ] = "GDDR3",
[NVKM_RAM_TYPE_GDDR4 ] = "GDDR4",
[NVKM_RAM_TYPE_GDDR5 ] = "GDDR5",
+ [NVKM_RAM_TYPE_GDDR5X ] = "GDDR5X",
+ [NVKM_RAM_TYPE_GDDR6 ] = "GDDR6",
+ [NVKM_RAM_TYPE_HBM2 ] = "HBM2",
};
struct nvkm_subdev *subdev = &fb->subdev;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index db48a1daca0c..02c4eb28cef4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -288,6 +288,19 @@ nv50_instobj_addr(struct nvkm_memory *memory)
return nvkm_memory_addr(nv50_instobj(memory)->ram);
}
+static u64
+nv50_instobj_bar2(struct nvkm_memory *memory)
+{
+ struct nv50_instobj *iobj = nv50_instobj(memory);
+ u64 addr = ~0ULL;
+ if (nv50_instobj_acquire(&iobj->base.memory)) {
+ iobj->lru.next = NULL; /* Exclude from eviction. */
+ addr = iobj->bar->addr;
+ }
+ nv50_instobj_release(&iobj->base.memory);
+ return addr;
+}
+
static enum nvkm_memory_target
nv50_instobj_target(struct nvkm_memory *memory)
{
@@ -325,8 +338,9 @@ static const struct nvkm_memory_func
nv50_instobj_func = {
.dtor = nv50_instobj_dtor,
.target = nv50_instobj_target,
- .size = nv50_instobj_size,
+ .bar2 = nv50_instobj_bar2,
.addr = nv50_instobj_addr,
+ .size = nv50_instobj_size,
.boot = nv50_instobj_boot,
.acquire = nv50_instobj_acquire,
.release = nv50_instobj_release,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index 2befbe36dc28..f3b06329c338 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -12,3 +12,4 @@ nvkm-y += nvkm/subdev/mc/gk104.o
nvkm-y += nvkm/subdev/mc/gk20a.o
nvkm-y += nvkm/subdev/mc/gp100.o
nvkm-y += nvkm/subdev/mc/gp10b.o
+nvkm-y += nvkm/subdev/mc/tu104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index 09f669ac6630..0e57ab2a709f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -108,6 +108,9 @@ nvkm_mc_intr(struct nvkm_device *device, bool *handled)
if (stat)
nvkm_error(&mc->subdev, "intr %08x\n", stat);
*handled = intr != 0;
+
+ if (mc->func->intr_hack)
+ mc->func->intr_hack(mc, handled);
}
static u32
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index d9e3691d45b7..eb91a4cf452b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -26,6 +26,7 @@ struct nvkm_mc_func {
void (*intr_mask)(struct nvkm_mc *, u32 mask, u32 stat);
/* retrieve pending interrupt mask (NV_PMC_INTR) */
u32 (*intr_stat)(struct nvkm_mc *);
+ void (*intr_hack)(struct nvkm_mc *, bool *handled);
const struct nvkm_mc_map *reset;
void (*unk260)(struct nvkm_mc *, u32);
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c
new file mode 100644
index 000000000000..b7165bd18999
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+tu104_mc_intr_hack(struct nvkm_mc *mc, bool *handled)
+{
+ struct nvkm_device *device = mc->subdev.device;
+ u32 stat = nvkm_rd32(device, 0xb81010);
+ if (stat & 0x00000050) {
+ struct nvkm_subdev *subdev =
+ nvkm_device_subdev(device, NVKM_SUBDEV_FAULT);
+ nvkm_wr32(device, 0xb81010, stat & 0x00000050);
+ if (subdev)
+ nvkm_subdev_intr(subdev);
+ *handled = true;
+ }
+}
+
+static const struct nvkm_mc_func
+tu104_mc = {
+ .init = nv50_mc_init,
+ .intr = gp100_mc_intr,
+ .intr_unarm = gp100_mc_intr_unarm,
+ .intr_rearm = gp100_mc_intr_rearm,
+ .intr_mask = gp100_mc_intr_mask,
+ .intr_stat = gf100_mc_intr_stat,
+ .intr_hack = tu104_mc_intr_hack,
+ .reset = gk104_mc_reset,
+};
+
+int
+tu104_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ return gp100_mc_new_(&tu104_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 58a24e3a0598..8966180b36cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -13,6 +13,7 @@ nvkm-y += nvkm/subdev/mmu/gm20b.o
nvkm-y += nvkm/subdev/mmu/gp100.o
nvkm-y += nvkm/subdev/mmu/gp10b.o
nvkm-y += nvkm/subdev/mmu/gv100.o
+nvkm-y += nvkm/subdev/mmu/tu104.o
nvkm-y += nvkm/subdev/mmu/mem.o
nvkm-y += nvkm/subdev/mmu/memnv04.o
@@ -33,6 +34,7 @@ nvkm-y += nvkm/subdev/mmu/vmmgm20b.o
nvkm-y += nvkm/subdev/mmu/vmmgp100.o
nvkm-y += nvkm/subdev/mmu/vmmgp10b.o
nvkm-y += nvkm/subdev/mmu/vmmgv100.o
+nvkm-y += nvkm/subdev/mmu/vmmtu104.o
nvkm-y += nvkm/subdev/mmu/umem.o
nvkm-y += nvkm/subdev/mmu/ummu.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c
new file mode 100644
index 000000000000..8e6f4096170d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <core/option.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+tu104_mmu = {
+ .dma_bits = 47,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, tu104_vmm_new },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+tu104_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&tu104_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
index 37b201b95f15..6889076097ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -134,23 +134,10 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
goto fail;
}
- if (vma->addr != addr) {
- const u64 tail = vma->size + vma->addr - addr;
- if (ret = -ENOMEM, !(vma = nvkm_vma_tail(vma, tail)))
- goto fail;
- vma->part = true;
- nvkm_vmm_node_insert(vmm, vma);
- }
-
- if (vma->size != size) {
- const u64 tail = vma->size - size;
- struct nvkm_vma *tmp;
- if (ret = -ENOMEM, !(tmp = nvkm_vma_tail(vma, tail))) {
- nvkm_vmm_unmap_region(vmm, vma);
- goto fail;
- }
- tmp->part = true;
- nvkm_vmm_node_insert(vmm, tmp);
+ vma = nvkm_vmm_node_split(vmm, vma, addr, size);
+ if (!vma) {
+ ret = -ENOMEM;
+ goto fail;
}
}
vma->busy = true;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 7459def78d50..6b87fff014b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -767,6 +767,20 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
return new;
}
+static inline void
+nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ rb_erase(&vma->tree, &vmm->free);
+}
+
+static inline void
+nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ nvkm_vmm_free_remove(vmm, vma);
+ list_del(&vma->head);
+ kfree(vma);
+}
+
static void
nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
@@ -795,7 +809,21 @@ nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
rb_insert_color(&vma->tree, &vmm->free);
}
-void
+static inline void
+nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ rb_erase(&vma->tree, &vmm->root);
+}
+
+static inline void
+nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ nvkm_vmm_node_remove(vmm, vma);
+ list_del(&vma->head);
+ kfree(vma);
+}
+
+static void
nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
struct rb_node **ptr = &vmm->root.rb_node;
@@ -834,6 +862,78 @@ nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr)
return NULL;
}
+#define node(root, dir) (((root)->head.dir == &vmm->list) ? NULL : \
+ list_entry((root)->head.dir, struct nvkm_vma, head))
+
+static struct nvkm_vma *
+nvkm_vmm_node_merge(struct nvkm_vmm *vmm, struct nvkm_vma *prev,
+ struct nvkm_vma *vma, struct nvkm_vma *next, u64 size)
+{
+ if (next) {
+ if (vma->size == size) {
+ vma->size += next->size;
+ nvkm_vmm_node_delete(vmm, next);
+ if (prev) {
+ prev->size += vma->size;
+ nvkm_vmm_node_delete(vmm, vma);
+ return prev;
+ }
+ return vma;
+ }
+ BUG_ON(prev);
+
+ nvkm_vmm_node_remove(vmm, next);
+ vma->size -= size;
+ next->addr -= size;
+ next->size += size;
+ nvkm_vmm_node_insert(vmm, next);
+ return next;
+ }
+
+ if (prev) {
+ if (vma->size != size) {
+ nvkm_vmm_node_remove(vmm, vma);
+ prev->size += size;
+ vma->addr += size;
+ vma->size -= size;
+ nvkm_vmm_node_insert(vmm, vma);
+ } else {
+ prev->size += vma->size;
+ nvkm_vmm_node_delete(vmm, vma);
+ }
+ return prev;
+ }
+
+ return vma;
+}
+
+struct nvkm_vma *
+nvkm_vmm_node_split(struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, u64 addr, u64 size)
+{
+ struct nvkm_vma *prev = NULL;
+
+ if (vma->addr != addr) {
+ prev = vma;
+ if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr)))
+ return NULL;
+ vma->part = true;
+ nvkm_vmm_node_insert(vmm, vma);
+ }
+
+ if (vma->size != size) {
+ struct nvkm_vma *tmp;
+ if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
+ nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size);
+ return NULL;
+ }
+ tmp->part = true;
+ nvkm_vmm_node_insert(vmm, tmp);
+ }
+
+ return vma;
+}
+
static void
nvkm_vmm_dtor(struct nvkm_vmm *vmm)
{
@@ -954,37 +1054,20 @@ nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
return nvkm_vmm_ctor(func, mmu, hdr, addr, size, key, name, *pvmm);
}
-#define node(root, dir) ((root)->head.dir == &vmm->list) ? NULL : \
- list_entry((root)->head.dir, struct nvkm_vma, head)
-
void
nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
- struct nvkm_vma *next;
+ struct nvkm_vma *next = node(vma, next);
+ struct nvkm_vma *prev = NULL;
nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
nvkm_memory_unref(&vma->memory);
- if (vma->part) {
- struct nvkm_vma *prev = node(vma, prev);
- if (!prev->memory) {
- prev->size += vma->size;
- rb_erase(&vma->tree, &vmm->root);
- list_del(&vma->head);
- kfree(vma);
- vma = prev;
- }
- }
-
- next = node(vma, next);
- if (next && next->part) {
- if (!next->memory) {
- vma->size += next->size;
- rb_erase(&next->tree, &vmm->root);
- list_del(&next->head);
- kfree(next);
- }
- }
+ if (!vma->part || ((prev = node(vma, prev)), prev->memory))
+ prev = NULL;
+ if (!next->part || next->memory)
+ next = NULL;
+ nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
}
void
@@ -1163,18 +1246,14 @@ nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
struct nvkm_vma *prev, *next;
if ((prev = node(vma, prev)) && !prev->used) {
- rb_erase(&prev->tree, &vmm->free);
- list_del(&prev->head);
vma->addr = prev->addr;
vma->size += prev->size;
- kfree(prev);
+ nvkm_vmm_free_delete(vmm, prev);
}
if ((next = node(vma, next)) && !next->used) {
- rb_erase(&next->tree, &vmm->free);
- list_del(&next->head);
vma->size += next->size;
- kfree(next);
+ nvkm_vmm_free_delete(vmm, next);
}
nvkm_vmm_free_insert(vmm, vma);
@@ -1250,7 +1329,7 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
}
/* Remove VMA from the list of allocated nodes. */
- rb_erase(&vma->tree, &vmm->root);
+ nvkm_vmm_node_remove(vmm, vma);
/* Merge VMA back into the free list. */
vma->page = NVKM_VMA_PAGE_NONE;
@@ -1357,7 +1436,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
tail = ALIGN_DOWN(tail, vmm->func->page_block);
if (addr <= tail && tail - addr >= size) {
- rb_erase(&this->tree, &vmm->free);
+ nvkm_vmm_free_remove(vmm, this);
vma = this;
break;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index 1a3b0a3724ca..42ad326521a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -157,6 +157,8 @@ int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
const char *name, struct nvkm_vmm *);
struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
+struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *,
+ u64 addr, u64 size);
int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref,
bool sparse, u8 page, u8 align, u64 size,
struct nvkm_vma **pvma);
@@ -165,7 +167,6 @@ void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *);
void nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma);
struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail);
-void nvkm_vmm_node_insert(struct nvkm_vmm *, struct nvkm_vma *);
int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
u64, u64, void *, u32, struct lock_class_key *,
@@ -200,6 +201,8 @@ int gp100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
int gp100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
void gp100_vmm_flush(struct nvkm_vmm *, int);
+int gv100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+
int nv04_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
int nv41_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
@@ -239,6 +242,9 @@ int gp10b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
int gv100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
struct lock_class_key *, const char *,
struct nvkm_vmm **);
+int tu104_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
#define VMM_PRINT(l,v,p,f,a...) do { \
struct nvkm_vmm *_vmm = (v); \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c
new file mode 100644
index 000000000000..adaadd92110f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/timer.h>
+
+static void
+tu104_vmm_flush(struct nvkm_vmm *vmm, int depth)
+{
+ struct nvkm_subdev *subdev = &vmm->mmu->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 type = depth << 24; /*XXX: not confirmed */
+
+ type = 0x00000001; /* PAGE_ALL */
+ if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
+ type |= 0x00000004; /* HUB_ONLY */
+
+ mutex_lock(&subdev->mutex);
+
+ nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
+ nvkm_wr32(device, 0xb830a4, 0x00000000);
+ nvkm_wr32(device, 0x100e68, 0x00000000);
+ nvkm_wr32(device, 0xb830b0, 0x80000000 | type);
+
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0xb830b0) & 0x80000000))
+ break;
+ );
+
+ mutex_unlock(&subdev->mutex);
+}
+
+static const struct nvkm_vmm_func
+tu104_vmm = {
+ .join = gv100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gp100_vmm_valid,
+ .flush = tu104_vmm_flush,
+ .page = {
+ { 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
+ { 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
+ { 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
+ { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
+ { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
+ { 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
+ {}
+ }
+};
+
+int
+tu104_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return nv04_vmm_new_(&tu104_vmm, mmu, 0, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
index 1f7a3c1a7f50..84a2f243ed9b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
@@ -59,10 +59,10 @@ gp102_run_secure_scrub(struct nvkm_secboot *sb)
nvkm_debug(subdev, "running VPR scrubber binary on NVDEC...\n");
- engine = nvkm_engine_ref(&device->nvdec->engine);
+ engine = nvkm_engine_ref(&device->nvdec[0]->engine);
if (IS_ERR(engine))
return PTR_ERR(engine);
- falcon = device->nvdec->falcon;
+ falcon = device->nvdec[0]->falcon;
nvkm_falcon_get(falcon, &sb->subdev);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index 36de23d12ae4..dd922033628c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -23,6 +23,42 @@
*/
#include "priv.h"
+s64
+nvkm_timer_wait_test(struct nvkm_timer_wait *wait)
+{
+ struct nvkm_subdev *subdev = &wait->tmr->subdev;
+ u64 time = nvkm_timer_read(wait->tmr);
+
+ if (wait->reads == 0) {
+ wait->time0 = time;
+ wait->time1 = time;
+ }
+
+ if (wait->time1 == time) {
+ if (wait->reads++ == 16) {
+ nvkm_fatal(subdev, "stalled at %016llx\n", time);
+ return -ETIMEDOUT;
+ }
+ } else {
+ wait->time1 = time;
+ wait->reads = 1;
+ }
+
+ if (wait->time1 - wait->time0 > wait->limit)
+ return -ETIMEDOUT;
+
+ return wait->time1 - wait->time0;
+}
+
+void
+nvkm_timer_wait_init(struct nvkm_device *device, u64 nsec,
+ struct nvkm_timer_wait *wait)
+{
+ wait->tmr = device->timer;
+ wait->limit = nsec;
+ wait->reads = 0;
+}
+
u64
nvkm_timer_read(struct nvkm_timer *tmr)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
index 4f1f3e890650..39081eadfd84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
@@ -86,7 +86,7 @@ gk104_top_oneinit(struct nvkm_top *top)
case 0x0000000d: A_(SEC2 ); break;
case 0x0000000e: B_(NVENC ); break;
case 0x0000000f: A_(NVENC1); break;
- case 0x00000010: A_(NVDEC ); break;
+ case 0x00000010: B_(NVDEC ); break;
case 0x00000013: B_(CE ); break;
break;
default:
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index 1f8161b041be..465120809eb3 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -177,6 +177,7 @@ static int panel_dpi_probe(struct platform_device *pdev)
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
+ drm_bus_flags_from_videomode(&ddata->vm, &dssdev->bus_flags);
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 0a485c5b982e..00a9c2ab9e6c 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -5418,9 +5418,15 @@ static int dsi_probe(struct platform_device *pdev)
dsi->num_lanes_supported = 3;
}
+ r = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (r) {
+ DSSERR("Failed to populate DSI child devices: %d\n", r);
+ goto err_pm_disable;
+ }
+
r = dsi_init_output(dsi);
if (r)
- goto err_pm_disable;
+ goto err_of_depopulate;
r = dsi_probe_of(dsi);
if (r) {
@@ -5428,22 +5434,16 @@ static int dsi_probe(struct platform_device *pdev)
goto err_uninit_output;
}
- r = of_platform_populate(dev->of_node, NULL, NULL, dev);
- if (r) {
- DSSERR("Failed to populate DSI child devices: %d\n", r);
- goto err_uninit_output;
- }
-
r = component_add(&pdev->dev, &dsi_component_ops);
if (r)
- goto err_of_depopulate;
+ goto err_uninit_output;
return 0;
-err_of_depopulate:
- of_platform_depopulate(dev);
err_uninit_output:
dsi_uninit_output(dsi);
+err_of_depopulate:
+ of_platform_depopulate(dev);
err_pm_disable:
pm_runtime_disable(dev);
return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 1f698a95a94a..33e15cb77efa 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -432,7 +432,7 @@ struct omap_dss_device {
const struct omap_dss_driver *driver;
const struct omap_dss_device_ops *ops;
unsigned long ops_flags;
- unsigned long bus_flags;
+ u32 bus_flags;
/* helper variable for driver suspend/resume */
bool activate_after_resume;
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 4566e0a75cb8..148b6b20274f 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -52,17 +52,44 @@ static const struct drm_encoder_funcs omap_encoder_funcs = {
.destroy = omap_encoder_destroy,
};
+static void omap_encoder_hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct omap_dss_device *dssdev = omap_encoder->output;
+ struct drm_connector *connector;
+ bool hdmi_mode;
+
+ hdmi_mode = false;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ hdmi_mode = omap_connector_get_hdmi_mode(connector);
+ break;
+ }
+ }
+
+ if (dssdev->ops->hdmi.set_hdmi_mode)
+ dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
+
+ if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
+ struct hdmi_avi_infoframe avi;
+ int r;
+
+ r = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
+ adjusted_mode);
+ if (r == 0)
+ dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
+ }
+}
+
static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct drm_connector *connector;
struct omap_dss_device *dssdev;
struct videomode vm = { 0 };
- bool hdmi_mode;
- int r;
drm_display_mode_to_videomode(adjusted_mode, &vm);
@@ -112,27 +139,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
}
/* Set the HDMI mode and HDMI infoframe if applicable. */
- hdmi_mode = false;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- hdmi_mode = omap_connector_get_hdmi_mode(connector);
- break;
- }
- }
-
- dssdev = omap_encoder->output;
-
- if (dssdev->ops->hdmi.set_hdmi_mode)
- dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
-
- if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
- struct hdmi_avi_infoframe avi;
-
- r = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
- adjusted_mode);
- if (r == 0)
- dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
- }
+ if (omap_encoder->output->output_type == OMAP_DISPLAY_TYPE_HDMI)
+ omap_encoder_hdmi_mode_set(encoder, adjusted_mode);
}
static void omap_encoder_disable(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 6e828158bcb0..d410e2925162 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -163,8 +163,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
return -EINVAL;
- if (!access_ok(VERIFY_READ,
- u64_to_user_ptr(cmd->command),
+ if (!access_ok(u64_to_user_ptr(cmd->command),
cmd->command_size))
return -EFAULT;
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 0a693fede05e..30f85f0130cb 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -217,7 +217,7 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
qxl_bo_ref(bo);
entry->tv.bo = &bo->tbo;
- entry->tv.shared = false;
+ entry->tv.num_shared = 0;
list_add_tail(&entry->tv.head, &release->bos);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 1ae31dbc61c6..f43305329939 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -178,7 +178,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
}
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
- p->relocs[i].tv.shared = !r->write_domain;
+ p->relocs[i].tv.num_shared = !r->write_domain;
radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
priority);
@@ -253,7 +253,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
resv = reloc->robj->tbo.resv;
r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
- reloc->tv.shared);
+ reloc->tv.num_shared);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 27d8e7dd2d06..44617dec8183 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -552,7 +552,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
INIT_LIST_HEAD(&list);
tv.bo = &bo_va->bo->tbo;
- tv.shared = true;
+ tv.num_shared = 1;
list_add(&tv.head, &list);
vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index f8b35df44c60..b3019505065a 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -119,40 +119,38 @@ static void radeon_mn_release(struct mmu_notifier *mn,
* unmap them by move them into system domain again.
*/
static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end,
- bool blockable)
+ const struct mmu_notifier_range *range)
{
struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
struct ttm_operation_ctx ctx = { false, false };
struct interval_tree_node *it;
+ unsigned long end;
int ret = 0;
/* notification is exclusive, but interval is inclusive */
- end -= 1;
+ end = range->end - 1;
/* TODO we should be able to split locking for interval tree and
* the tear down.
*/
- if (blockable)
+ if (range->blockable)
mutex_lock(&rmn->lock);
else if (!mutex_trylock(&rmn->lock))
return -EAGAIN;
- it = interval_tree_iter_first(&rmn->objects, start, end);
+ it = interval_tree_iter_first(&rmn->objects, range->start, end);
while (it) {
struct radeon_mn_node *node;
struct radeon_bo *bo;
long r;
- if (!blockable) {
+ if (!range->blockable) {
ret = -EAGAIN;
goto out_unlock;
}
node = container_of(it, struct radeon_mn_node, it);
- it = interval_tree_iter_next(it, start, end);
+ it = interval_tree_iter_next(it, range->start, end);
list_for_each_entry(bo, &node->bos, mn_list) {
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index fed11ece0de6..0d374211661c 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -142,7 +142,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].tv.bo = &vm->page_directory->tbo;
- list[0].tv.shared = true;
+ list[0].tv.num_shared = 1;
list[0].tiling_flags = 0;
list_add(&list[0].tv.head, head);
@@ -154,7 +154,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].tv.bo = &list[idx].robj->tbo;
- list[idx].tv.shared = true;
+ list[idx].tv.num_shared = 1;
list[idx].tiling_flags = 0;
list_add(&list[idx++].tv.head, head);
}
@@ -946,7 +946,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE;
if (mem) {
- addr = mem->start << PAGE_SHIFT;
+ addr = (u64)mem->start << PAGE_SHIFT;
if (mem->mem_type != TTM_PL_SYSTEM) {
bo_va->flags |= RADEON_VM_PAGE_VALID;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index d85f0a1c1581..cebf313c6e1f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -202,10 +202,25 @@ void rcar_du_group_put(struct rcar_du_group *rgrp)
static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
{
- struct rcar_du_crtc *rcrtc = &rgrp->dev->crtcs[rgrp->index * 2];
+ struct rcar_du_device *rcdu = rgrp->dev;
+
+ /*
+ * Group start/stop is controlled by the DRES and DEN bits of DSYSR0
+ * for the first group and DSYSR2 for the second group. On most DU
+ * instances, this maps to the first CRTC of the group, and we can just
+ * use rcar_du_crtc_dsysr_clr_set() to access the correct DSYSR. On
+ * M3-N, however, DU2 doesn't exist, but DSYSR2 does. We thus need to
+ * access the register directly using group read/write.
+ */
+ if (rcdu->info->channels_mask & BIT(rgrp->index * 2)) {
+ struct rcar_du_crtc *rcrtc = &rgrp->dev->crtcs[rgrp->index * 2];
- rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_DRES | DSYSR_DEN,
- start ? DSYSR_DEN : DSYSR_DRES);
+ rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_DRES | DSYSR_DEN,
+ start ? DSYSR_DEN : DSYSR_DRES);
+ } else {
+ rcar_du_group_write(rgrp, DSYSR,
+ start ? DSYSR_DEN : DSYSR_DRES);
+ }
}
void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 37f9a3b651ab..be6c2573039a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -448,11 +448,6 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
return 0;
}
-static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
-{
- rockchip_drm_platform_remove(pdev);
-}
-
static const struct of_device_id rockchip_drm_dt_ids[] = {
{ .compatible = "rockchip,display-subsystem", },
{ /* sentinel */ },
@@ -462,7 +457,6 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
static struct platform_driver rockchip_drm_platform_driver = {
.probe = rockchip_drm_platform_probe,
.remove = rockchip_drm_platform_remove,
- .shutdown = rockchip_drm_platform_shutdown,
.driver = {
.name = "rockchip-drm",
.of_match_table = rockchip_drm_dt_ids,
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 18ebbb05762e..dbb69063b3d5 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -60,6 +60,8 @@
static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
+static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job);
+
/**
* drm_sched_rq_init - initialize a given run queue struct
*
@@ -209,6 +211,62 @@ void drm_sched_fault(struct drm_gpu_scheduler *sched)
}
EXPORT_SYMBOL(drm_sched_fault);
+/**
+ * drm_sched_suspend_timeout - Suspend scheduler job timeout
+ *
+ * @sched: scheduler instance for which to suspend the timeout
+ *
+ * Suspend the delayed work timeout for the scheduler. This is done by
+ * modifying the delayed work timeout to an arbitrary large value,
+ * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
+ * called from an IRQ context.
+ *
+ * Returns the timeout remaining
+ *
+ */
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
+{
+ unsigned long sched_timeout, now = jiffies;
+
+ sched_timeout = sched->work_tdr.timer.expires;
+
+ /*
+ * Modify the timeout to an arbitrarily large value. This also prevents
+ * the timeout to be restarted when new submissions arrive
+ */
+ if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
+ && time_after(sched_timeout, now))
+ return sched_timeout - now;
+ else
+ return sched->timeout;
+}
+EXPORT_SYMBOL(drm_sched_suspend_timeout);
+
+/**
+ * drm_sched_resume_timeout - Resume scheduler job timeout
+ *
+ * @sched: scheduler instance for which to resume the timeout
+ * @remaining: remaining timeout
+ *
+ * Resume the delayed work timeout for the scheduler. Note that
+ * this function can be called from an IRQ context.
+ */
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+ unsigned long remaining)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+
+ if (list_empty(&sched->ring_mirror_list))
+ cancel_delayed_work(&sched->work_tdr);
+ else
+ mod_delayed_work(system_wq, &sched->work_tdr, remaining);
+
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+}
+EXPORT_SYMBOL(drm_sched_resume_timeout);
+
/* job_finish is called after hw fence signaled
*/
static void drm_sched_job_finish(struct work_struct *work)
@@ -216,6 +274,7 @@ static void drm_sched_job_finish(struct work_struct *work)
struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
finish_work);
struct drm_gpu_scheduler *sched = s_job->sched;
+ unsigned long flags;
/*
* Canceling the timeout without removing our job from the ring mirror
@@ -226,12 +285,12 @@ static void drm_sched_job_finish(struct work_struct *work)
*/
cancel_delayed_work_sync(&sched->work_tdr);
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
/* remove job from ring_mirror_list */
- list_del(&s_job->node);
+ list_del_init(&s_job->node);
/* queue TDR for next job */
drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
sched->ops->free_job(s_job);
}
@@ -247,56 +306,33 @@ static void drm_sched_job_finish_cb(struct dma_fence *f,
static void drm_sched_job_begin(struct drm_sched_job *s_job)
{
struct drm_gpu_scheduler *sched = s_job->sched;
+ unsigned long flags;
dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
drm_sched_job_finish_cb);
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
list_add_tail(&s_job->node, &sched->ring_mirror_list);
drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
static void drm_sched_job_timedout(struct work_struct *work)
{
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
- int r;
+ unsigned long flags;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
-
- spin_lock(&sched->job_list_lock);
- list_for_each_entry_reverse(job, &sched->ring_mirror_list, node) {
- struct drm_sched_fence *fence = job->s_fence;
-
- if (!dma_fence_remove_callback(fence->parent, &fence->cb))
- goto already_signaled;
- }
-
job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
- spin_unlock(&sched->job_list_lock);
if (job)
- sched->ops->timedout_job(job);
-
- spin_lock(&sched->job_list_lock);
- list_for_each_entry(job, &sched->ring_mirror_list, node) {
- struct drm_sched_fence *fence = job->s_fence;
+ job->sched->ops->timedout_job(job);
- if (!fence->parent || !list_empty(&fence->cb.node))
- continue;
-
- r = dma_fence_add_callback(fence->parent, &fence->cb,
- drm_sched_process_job);
- if (r)
- drm_sched_process_job(fence->parent, &fence->cb);
-
-already_signaled:
- ;
- }
+ spin_lock_irqsave(&sched->job_list_lock, flags);
drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
/**
@@ -310,9 +346,10 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
{
struct drm_sched_job *s_job;
struct drm_sched_entity *entity, *tmp;
+ unsigned long flags;
int i;
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
if (s_job->s_fence->parent &&
dma_fence_remove_callback(s_job->s_fence->parent,
@@ -322,7 +359,7 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
atomic_dec(&sched->hw_rq_count);
}
}
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
atomic_inc(&bad->karma);
@@ -360,9 +397,10 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *s_job, *tmp;
bool found_guilty = false;
+ unsigned long flags;
int r;
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct drm_sched_fence *s_fence = s_job->s_fence;
struct dma_fence *fence;
@@ -376,7 +414,7 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
dma_fence_set_error(&s_fence->finished, -ECANCELED);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
fence = sched->ops->run_job(s_job);
atomic_inc(&sched->hw_rq_count);
@@ -391,12 +429,14 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
r);
dma_fence_put(fence);
} else {
+ if (s_fence->finished.error < 0)
+ drm_sched_expel_job_unlocked(s_job);
drm_sched_process_job(NULL, &s_fence->cb);
}
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
}
drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
EXPORT_SYMBOL(drm_sched_job_recovery);
@@ -595,6 +635,8 @@ static int drm_sched_main(void *param)
r);
dma_fence_put(fence);
} else {
+ if (s_fence->finished.error < 0)
+ drm_sched_expel_job_unlocked(sched_job);
drm_sched_process_job(NULL, &s_fence->cb);
}
@@ -603,6 +645,15 @@ static int drm_sched_main(void *param)
return 0;
}
+static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job)
+{
+ struct drm_gpu_scheduler *sched = s_job->sched;
+
+ spin_lock(&sched->job_list_lock);
+ list_del_init(&s_job->node);
+ spin_unlock(&sched->job_list_lock);
+}
+
/**
* drm_sched_init - Init a gpu scheduler instance
*
@@ -622,7 +673,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
long timeout,
const char *name)
{
- int i;
+ int i, ret;
sched->ops = ops;
sched->hw_submission_limit = hw_submission;
sched->name = name;
@@ -643,8 +694,10 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
/* Each scheduler will run on a seperate kernel thread */
sched->thread = kthread_run(drm_sched_main, sched, sched->name);
if (IS_ERR(sched->thread)) {
+ ret = PTR_ERR(sched->thread);
+ sched->thread = NULL;
DRM_ERROR("Failed to create scheduler for %s.\n", name);
- return PTR_ERR(sched->thread);
+ return ret;
}
sched->ready = true;
diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile
index 383d8d6c5847..1bb73dc4c88c 100644
--- a/drivers/gpu/drm/selftests/Makefile
+++ b/drivers/gpu/drm/selftests/Makefile
@@ -1,4 +1,5 @@
test-drm_modeset-y := test-drm_modeset_common.o test-drm_plane_helper.o \
- test-drm_format.o test-drm_framebuffer.o
+ test-drm_format.o test-drm_framebuffer.o \
+ test-drm_damage_helper.o
obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm_modeset.o
diff --git a/drivers/gpu/drm/selftests/drm_modeset_selftests.h b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
index 92601defd8f6..464753746013 100644
--- a/drivers/gpu/drm/selftests/drm_modeset_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
@@ -11,3 +11,24 @@ selftest(check_drm_format_block_width, igt_check_drm_format_block_width)
selftest(check_drm_format_block_height, igt_check_drm_format_block_height)
selftest(check_drm_format_min_pitch, igt_check_drm_format_min_pitch)
selftest(check_drm_framebuffer_create, igt_check_drm_framebuffer_create)
+selftest(damage_iter_no_damage, igt_damage_iter_no_damage)
+selftest(damage_iter_no_damage_fractional_src, igt_damage_iter_no_damage_fractional_src)
+selftest(damage_iter_no_damage_src_moved, igt_damage_iter_no_damage_src_moved)
+selftest(damage_iter_no_damage_fractional_src_moved, igt_damage_iter_no_damage_fractional_src_moved)
+selftest(damage_iter_no_damage_not_visible, igt_damage_iter_no_damage_not_visible)
+selftest(damage_iter_no_damage_no_crtc, igt_damage_iter_no_damage_no_crtc)
+selftest(damage_iter_no_damage_no_fb, igt_damage_iter_no_damage_no_fb)
+selftest(damage_iter_simple_damage, igt_damage_iter_simple_damage)
+selftest(damage_iter_single_damage, igt_damage_iter_single_damage)
+selftest(damage_iter_single_damage_intersect_src, igt_damage_iter_single_damage_intersect_src)
+selftest(damage_iter_single_damage_outside_src, igt_damage_iter_single_damage_outside_src)
+selftest(damage_iter_single_damage_fractional_src, igt_damage_iter_single_damage_fractional_src)
+selftest(damage_iter_single_damage_intersect_fractional_src, igt_damage_iter_single_damage_intersect_fractional_src)
+selftest(damage_iter_single_damage_outside_fractional_src, igt_damage_iter_single_damage_outside_fractional_src)
+selftest(damage_iter_single_damage_src_moved, igt_damage_iter_single_damage_src_moved)
+selftest(damage_iter_single_damage_fractional_src_moved, igt_damage_iter_single_damage_fractional_src_moved)
+selftest(damage_iter_damage, igt_damage_iter_damage)
+selftest(damage_iter_damage_one_intersect, igt_damage_iter_damage_one_intersect)
+selftest(damage_iter_damage_one_outside, igt_damage_iter_damage_one_outside)
+selftest(damage_iter_damage_src_moved, igt_damage_iter_damage_src_moved)
+selftest(damage_iter_damage_not_visible, igt_damage_iter_damage_not_visible)
diff --git a/drivers/gpu/drm/selftests/test-drm_damage_helper.c b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
new file mode 100644
index 000000000000..9d2bcdf8bc29
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test case for drm_damage_helper functions
+ */
+
+#define pr_fmt(fmt) "drm_damage_helper: " fmt
+
+#include <drm/drm_damage_helper.h>
+
+#include "test-drm_modeset_common.h"
+
+static void set_plane_src(struct drm_plane_state *state, int x1, int y1, int x2,
+ int y2)
+{
+ state->src.x1 = x1;
+ state->src.y1 = y1;
+ state->src.x2 = x2;
+ state->src.y2 = y2;
+}
+
+static void set_damage_clip(struct drm_mode_rect *r, int x1, int y1, int x2,
+ int y2)
+{
+ r->x1 = x1;
+ r->y1 = y1;
+ r->x2 = x2;
+ r->y2 = y2;
+}
+
+static void set_damage_blob(struct drm_property_blob *damage_blob,
+ struct drm_mode_rect *r, uint32_t size)
+{
+ damage_blob->length = size;
+ damage_blob->data = r;
+}
+
+static void set_plane_damage(struct drm_plane_state *state,
+ struct drm_property_blob *damage_blob)
+{
+ state->fb_damage_clips = damage_blob;
+}
+
+static bool check_damage_clip(struct drm_plane_state *state, struct drm_rect *r,
+ int x1, int y1, int x2, int y2)
+{
+ /*
+ * Round down x1/y1 and round up x2/y2. This is because damage is not in
+ * 16.16 fixed point so to catch all pixels.
+ */
+ int src_x1 = state->src.x1 >> 16;
+ int src_y1 = state->src.y1 >> 16;
+ int src_x2 = (state->src.x2 >> 16) + !!(state->src.x2 & 0xFFFF);
+ int src_y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF);
+
+ if (x1 >= x2 || y1 >= y2) {
+ pr_err("Cannot have damage clip with no dimension.\n");
+ return false;
+ }
+
+ if (x1 < src_x1 || y1 < src_y1 || x2 > src_x2 || y2 > src_y2) {
+ pr_err("Damage cannot be outside rounded plane src.\n");
+ return false;
+ }
+
+ if (r->x1 != x1 || r->y1 != y1 || r->x2 != x2 || r->y2 != y2) {
+ pr_err("Damage = %d %d %d %d\n", r->x1, r->y1, r->x2, r->y2);
+ return false;
+ }
+
+ return true;
+}
+
+int igt_damage_iter_no_damage(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src same as fb size. */
+ set_plane_src(&old_state, 0, 0, fb.width << 16, fb.height << 16);
+ set_plane_src(&state, 0, 0, fb.width << 16, fb.height << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 0, 0, 2048, 2048));
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_fractional_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part. */
+ set_plane_src(&old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return rounded off plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 3, 3, 1028, 772));
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src moved since old plane state. */
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 10 << 16, 10 << 16,
+ (10 + 1024) << 16, (10 + 768) << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 1034, 778));
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_fractional_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part and it moved since old plane state. */
+ set_plane_src(&old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_not_visible(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = false,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_no_crtc(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = 0,
+ .fb = &fb,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_no_fb(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = 0,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_simple_damage(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage set to plane src */
+ set_damage_clip(&damage, 0, 0, 1024, 768);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage when set.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 0, 0, 1024, 768));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ set_damage_clip(&damage, 256, 192, 768, 576);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage when set.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 256, 192, 768, 576));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_intersect_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 256, 192, 1360, 768);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage clipped to src.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 256, 192, 1024, 768));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_outside_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage clip outside plane src */
+ set_damage_clip(&damage, 1360, 1360, 1380, 1380);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_fractional_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part. */
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_damage_clip(&damage, 10, 10, 256, 330);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage when set.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 256, 330));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_intersect_fractional_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part. */
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 10, 1, 1360, 330);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage clipped to rounded off src.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 10, 4, 1029, 330));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_outside_fractional_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part. */
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage clip outside plane src */
+ set_damage_clip(&damage, 1360, 1360, 1380, 1380);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src moved since old plane state. */
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 10 << 16, 10 << 16,
+ (10 + 1024) << 16, (10 + 768) << 16);
+ set_damage_clip(&damage, 20, 30, 256, 256);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 1034, 778));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_fractional_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src with fractional part moved since old plane state. */
+ set_plane_src(&old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 20, 30, 1360, 256);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return rounded off plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
+
+ return 0;
+}
+
+int igt_damage_iter_damage(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* 2 damage clips. */
+ set_damage_clip(&damage[0], 20, 30, 200, 180);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ if (num_hits == 0)
+ FAIL_ON(!check_damage_clip(&state, &clip, 20, 30, 200, 180));
+ if (num_hits == 1)
+ FAIL_ON(!check_damage_clip(&state, &clip, 240, 200, 280, 250));
+ num_hits++;
+ }
+
+ FAIL(num_hits != 2, "Should return damage when set.");
+
+ return 0;
+}
+
+int igt_damage_iter_damage_one_intersect(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* 2 damage clips, one intersect plane src. */
+ set_damage_clip(&damage[0], 20, 30, 200, 180);
+ set_damage_clip(&damage[1], 2, 2, 1360, 1360);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ if (num_hits == 0)
+ FAIL_ON(!check_damage_clip(&state, &clip, 20, 30, 200, 180));
+ if (num_hits == 1)
+ FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
+ num_hits++;
+ }
+
+ FAIL(num_hits != 2, "Should return damage when set.");
+
+ return 0;
+}
+
+int igt_damage_iter_damage_one_outside(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage when set.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 240, 200, 280, 250));
+
+ return 0;
+}
+
+int igt_damage_iter_damage_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return round off plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 3, 3, 1028, 772));
+
+ return 0;
+}
+
+int igt_damage_iter_damage_not_visible(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = false,
+ };
+
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should not return any damage.");
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/selftests/test-drm_modeset_common.h b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
index d5df5bd51b05..8c76f09c12d1 100644
--- a/drivers/gpu/drm/selftests/test-drm_modeset_common.h
+++ b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
@@ -18,5 +18,26 @@ int igt_check_drm_format_block_width(void *ignored);
int igt_check_drm_format_block_height(void *ignored);
int igt_check_drm_format_min_pitch(void *ignored);
int igt_check_drm_framebuffer_create(void *ignored);
+int igt_damage_iter_no_damage(void *ignored);
+int igt_damage_iter_no_damage_fractional_src(void *ignored);
+int igt_damage_iter_no_damage_src_moved(void *ignored);
+int igt_damage_iter_no_damage_fractional_src_moved(void *ignored);
+int igt_damage_iter_no_damage_not_visible(void *ignored);
+int igt_damage_iter_no_damage_no_crtc(void *ignored);
+int igt_damage_iter_no_damage_no_fb(void *ignored);
+int igt_damage_iter_simple_damage(void *ignored);
+int igt_damage_iter_single_damage(void *ignored);
+int igt_damage_iter_single_damage_intersect_src(void *ignored);
+int igt_damage_iter_single_damage_outside_src(void *ignored);
+int igt_damage_iter_single_damage_fractional_src(void *ignored);
+int igt_damage_iter_single_damage_intersect_fractional_src(void *ignored);
+int igt_damage_iter_single_damage_outside_fractional_src(void *ignored);
+int igt_damage_iter_single_damage_src_moved(void *ignored);
+int igt_damage_iter_single_damage_fractional_src_moved(void *ignored);
+int igt_damage_iter_damage(void *ignored);
+int igt_damage_iter_damage_one_intersect(void *ignored);
+int igt_damage_iter_damage_one_outside(void *ignored);
+int igt_damage_iter_damage_src_moved(void *ignored);
+int igt_damage_iter_damage_not_visible(void *ignored);
#endif
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index f80e82e16475..607a6ea17ecc 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1978,6 +1978,23 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
return IRQ_HANDLED;
}
+static bool tegra_dc_has_window_groups(struct tegra_dc *dc)
+{
+ unsigned int i;
+
+ if (!dc->soc->wgrps)
+ return true;
+
+ for (i = 0; i < dc->soc->num_wgrps; i++) {
+ const struct tegra_windowgroup_soc *wgrp = &dc->soc->wgrps[i];
+
+ if (wgrp->dc == dc->pipe && wgrp->num_windows > 0)
+ return true;
+ }
+
+ return false;
+}
+
static int tegra_dc_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
@@ -1993,22 +2010,8 @@ static int tegra_dc_init(struct host1x_client *client)
* assign a primary plane to them, which in turn will cause KMS to
* crash.
*/
- if (dc->soc->wgrps) {
- bool has_wgrps = false;
- unsigned int i;
-
- for (i = 0; i < dc->soc->num_wgrps; i++) {
- const struct tegra_windowgroup_soc *wgrp = &dc->soc->wgrps[i];
-
- if (wgrp->dc == dc->pipe && wgrp->num_windows > 0) {
- has_wgrps = true;
- break;
- }
- }
-
- if (!has_wgrps)
- return 0;
- }
+ if (!tegra_dc_has_window_groups(dc))
+ return 0;
dc->syncpt = host1x_syncpt_request(client, flags);
if (!dc->syncpt)
@@ -2094,6 +2097,9 @@ static int tegra_dc_exit(struct host1x_client *client)
struct tegra_dc *dc = host1x_client_to_dc(client);
int err;
+ if (!tegra_dc_has_window_groups(dc))
+ return 0;
+
devm_free_irq(dc->dev, dc->irq, dc);
err = tegra_dc_rgb_exit(dc);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 65ea4988b332..4b70ce664c41 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1274,6 +1274,7 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra194-display", },
{ .compatible = "nvidia,tegra194-dc", },
{ .compatible = "nvidia,tegra194-sor", },
+ { .compatible = "nvidia,tegra194-vic", },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index f685e72949d1..352d05feabb0 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -141,9 +141,9 @@ int falcon_load_firmware(struct falcon *falcon)
/* allocate iova space for the firmware */
falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size,
&falcon->firmware.paddr);
- if (!falcon->firmware.vaddr) {
- dev_err(falcon->dev, "dma memory mapping failed\n");
- return -ENOMEM;
+ if (IS_ERR(falcon->firmware.vaddr)) {
+ dev_err(falcon->dev, "DMA memory mapping failed\n");
+ return PTR_ERR(falcon->firmware.vaddr);
}
/* copy firmware image into local area. this also ensures endianness */
@@ -197,11 +197,19 @@ void falcon_exit(struct falcon *falcon)
int falcon_boot(struct falcon *falcon)
{
unsigned long offset;
+ u32 value;
int err;
if (!falcon->firmware.vaddr)
return -EINVAL;
+ err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value,
+ (value & (FALCON_DMACTL_IMEM_SCRUBBING |
+ FALCON_DMACTL_DMEM_SCRUBBING)) == 0,
+ 10, 10000);
+ if (err < 0)
+ return err;
+
falcon_writel(falcon, 0, FALCON_DMACTL);
/* setup the address of the binary data so Falcon can access it later */
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 3e26be5359cf..71cc3cf60066 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -742,7 +742,9 @@ static const struct host1x_client_ops tegra_display_hub_ops = {
static int tegra_display_hub_probe(struct platform_device *pdev)
{
+ struct device_node *child = NULL;
struct tegra_display_hub *hub;
+ struct clk *clk;
unsigned int i;
int err;
@@ -801,6 +803,34 @@ static int tegra_display_hub_probe(struct platform_device *pdev)
return err;
}
+ hub->num_heads = of_get_child_count(pdev->dev.of_node);
+
+ hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
+ GFP_KERNEL);
+ if (!hub->clk_heads)
+ return -ENOMEM;
+
+ for (i = 0; i < hub->num_heads; i++) {
+ child = of_get_next_child(pdev->dev.of_node, child);
+ if (!child) {
+ dev_err(&pdev->dev, "failed to find node for head %u\n",
+ i);
+ return -ENODEV;
+ }
+
+ clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock for head %u\n",
+ i);
+ of_node_put(child);
+ return PTR_ERR(clk);
+ }
+
+ hub->clk_heads[i] = clk;
+ }
+
+ of_node_put(child);
+
/* XXX: enable clock across reset? */
err = reset_control_assert(hub->rst);
if (err < 0)
@@ -840,12 +870,16 @@ static int tegra_display_hub_remove(struct platform_device *pdev)
static int __maybe_unused tegra_display_hub_suspend(struct device *dev)
{
struct tegra_display_hub *hub = dev_get_drvdata(dev);
+ unsigned int i = hub->num_heads;
int err;
err = reset_control_assert(hub->rst);
if (err < 0)
return err;
+ while (i--)
+ clk_disable_unprepare(hub->clk_heads[i]);
+
clk_disable_unprepare(hub->clk_hub);
clk_disable_unprepare(hub->clk_dsc);
clk_disable_unprepare(hub->clk_disp);
@@ -856,6 +890,7 @@ static int __maybe_unused tegra_display_hub_suspend(struct device *dev)
static int __maybe_unused tegra_display_hub_resume(struct device *dev)
{
struct tegra_display_hub *hub = dev_get_drvdata(dev);
+ unsigned int i;
int err;
err = clk_prepare_enable(hub->clk_disp);
@@ -870,13 +905,22 @@ static int __maybe_unused tegra_display_hub_resume(struct device *dev)
if (err < 0)
goto disable_dsc;
+ for (i = 0; i < hub->num_heads; i++) {
+ err = clk_prepare_enable(hub->clk_heads[i]);
+ if (err < 0)
+ goto disable_heads;
+ }
+
err = reset_control_deassert(hub->rst);
if (err < 0)
- goto disable_hub;
+ goto disable_heads;
return 0;
-disable_hub:
+disable_heads:
+ while (i--)
+ clk_disable_unprepare(hub->clk_heads[i]);
+
clk_disable_unprepare(hub->clk_hub);
disable_dsc:
clk_disable_unprepare(hub->clk_dsc);
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
index 6696a85fc1f2..479087c0705a 100644
--- a/drivers/gpu/drm/tegra/hub.h
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -49,6 +49,9 @@ struct tegra_display_hub {
struct clk *clk_hub;
struct reset_control *rst;
+ unsigned int num_heads;
+ struct clk **clk_heads;
+
const struct tegra_display_hub_soc *soc;
struct tegra_windowgroup *wgrps;
};
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index d90bf5f6a67a..44feac2a0359 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -19,6 +19,8 @@
#include <soc/tegra/pmc.h>
+#include <sound/hda_verbs.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_panel.h>
@@ -29,14 +31,6 @@
#include "sor.h"
#include "trace.h"
-/*
- * XXX Remove this after the commit adding it to soc/tegra/pmc.h has been
- * merged. Having this around after the commit is merged should be safe since
- * the preprocessor will effectively replace all occurrences and therefore no
- * duplicate will be defined.
- */
-#define TEGRA_IO_PAD_HDMI_DP0 26
-
#define SOR_REKEY 0x38
struct tegra_sor_hdmi_settings {
@@ -407,6 +401,7 @@ struct tegra_sor {
const struct tegra_sor_soc *soc;
void __iomem *regs;
unsigned int index;
+ unsigned int irq;
struct reset_control *rst;
struct clk *clk_parent;
@@ -433,6 +428,11 @@ struct tegra_sor {
struct delayed_work scdc;
bool scdc_enabled;
+
+ struct {
+ unsigned int sample_rate;
+ unsigned int channels;
+ } audio;
};
struct tegra_sor_state {
@@ -2140,6 +2140,144 @@ tegra_sor_hdmi_setup_avi_infoframe(struct tegra_sor *sor,
return 0;
}
+static void tegra_sor_write_eld(struct tegra_sor *sor)
+{
+ size_t length = drm_eld_size(sor->output.connector.eld), i;
+
+ for (i = 0; i < length; i++)
+ tegra_sor_writel(sor, i << 8 | sor->output.connector.eld[i],
+ SOR_AUDIO_HDA_ELD_BUFWR);
+
+ /*
+ * The HDA codec will always report an ELD buffer size of 96 bytes and
+ * the HDA codec driver will check that each byte read from the buffer
+ * is valid. Therefore every byte must be written, even if no 96 bytes
+ * were parsed from EDID.
+ */
+ for (i = length; i < 96; i++)
+ tegra_sor_writel(sor, i << 8 | 0, SOR_AUDIO_HDA_ELD_BUFWR);
+}
+
+static void tegra_sor_audio_prepare(struct tegra_sor *sor)
+{
+ u32 value;
+
+ tegra_sor_write_eld(sor);
+
+ value = SOR_AUDIO_HDA_PRESENSE_ELDV | SOR_AUDIO_HDA_PRESENSE_PD;
+ tegra_sor_writel(sor, value, SOR_AUDIO_HDA_PRESENSE);
+}
+
+static void tegra_sor_audio_unprepare(struct tegra_sor *sor)
+{
+ tegra_sor_writel(sor, 0, SOR_AUDIO_HDA_PRESENSE);
+}
+
+static int tegra_sor_hdmi_enable_audio_infoframe(struct tegra_sor *sor)
+{
+ u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)];
+ struct hdmi_audio_infoframe frame;
+ u32 value;
+ int err;
+
+ err = hdmi_audio_infoframe_init(&frame);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to setup audio infoframe: %d\n", err);
+ return err;
+ }
+
+ frame.channels = sor->audio.channels;
+
+ err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(sor->dev, "failed to pack audio infoframe: %d\n", err);
+ return err;
+ }
+
+ tegra_sor_hdmi_write_infopack(sor, buffer, err);
+
+ value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+ value |= INFOFRAME_CTRL_CHECKSUM_ENABLE;
+ value |= INFOFRAME_CTRL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+
+ return 0;
+}
+
+static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL);
+
+ /* select HDA audio input */
+ value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK);
+ value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
+
+ /* inject null samples */
+ if (sor->audio.channels != 2)
+ value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+ else
+ value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+
+ value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH;
+
+ tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL);
+
+ /* enable advertising HBR capability */
+ tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE);
+
+ tegra_sor_writel(sor, 0, SOR_HDMI_ACR_CTRL);
+
+ value = SOR_HDMI_SPARE_ACR_PRIORITY_HIGH |
+ SOR_HDMI_SPARE_CTS_RESET(1) |
+ SOR_HDMI_SPARE_HW_CTS_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_SPARE);
+
+ /* enable HW CTS */
+ value = SOR_HDMI_ACR_SUBPACK_LOW_SB1(0);
+ tegra_sor_writel(sor, value, SOR_HDMI_ACR_0441_SUBPACK_LOW);
+
+ /* allow packet to be sent */
+ value = SOR_HDMI_ACR_SUBPACK_HIGH_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_ACR_0441_SUBPACK_HIGH);
+
+ /* reset N counter and enable lookup */
+ value = SOR_HDMI_AUDIO_N_RESET | SOR_HDMI_AUDIO_N_LOOKUP;
+ tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_N);
+
+ value = (24000 * 4096) / (128 * sor->audio.sample_rate / 1000);
+ tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0320);
+ tegra_sor_writel(sor, 4096, SOR_AUDIO_NVAL_0320);
+
+ tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_0441);
+ tegra_sor_writel(sor, 4704, SOR_AUDIO_NVAL_0441);
+
+ tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_0882);
+ tegra_sor_writel(sor, 9408, SOR_AUDIO_NVAL_0882);
+
+ tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_1764);
+ tegra_sor_writel(sor, 18816, SOR_AUDIO_NVAL_1764);
+
+ value = (24000 * 6144) / (128 * sor->audio.sample_rate / 1000);
+ tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0480);
+ tegra_sor_writel(sor, 6144, SOR_AUDIO_NVAL_0480);
+
+ value = (24000 * 12288) / (128 * sor->audio.sample_rate / 1000);
+ tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0960);
+ tegra_sor_writel(sor, 12288, SOR_AUDIO_NVAL_0960);
+
+ value = (24000 * 24576) / (128 * sor->audio.sample_rate / 1000);
+ tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_1920);
+ tegra_sor_writel(sor, 24576, SOR_AUDIO_NVAL_1920);
+
+ value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_N);
+ value &= ~SOR_HDMI_AUDIO_N_RESET;
+ tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_N);
+
+ tegra_sor_hdmi_enable_audio_infoframe(sor);
+}
+
static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
{
u32 value;
@@ -2149,6 +2287,11 @@ static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
}
+static void tegra_sor_hdmi_audio_disable(struct tegra_sor *sor)
+{
+ tegra_sor_hdmi_disable_audio_infoframe(sor);
+}
+
static struct tegra_sor_hdmi_settings *
tegra_sor_hdmi_find_settings(struct tegra_sor *sor, unsigned long frequency)
{
@@ -2244,6 +2387,7 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
u32 value;
int err;
+ tegra_sor_audio_unprepare(sor);
tegra_sor_hdmi_scdc_stop(sor);
err = tegra_sor_detach(sor);
@@ -2652,6 +2796,7 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
tegra_sor_hdmi_scdc_start(sor);
+ tegra_sor_audio_prepare(sor);
}
static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
@@ -2667,6 +2812,7 @@ static int tegra_sor_init(struct host1x_client *client)
struct tegra_sor *sor = host1x_client_to_sor(client);
int connector = DRM_MODE_CONNECTOR_Unknown;
int encoder = DRM_MODE_ENCODER_NONE;
+ u32 value;
int err;
if (!sor->aux) {
@@ -2760,6 +2906,15 @@ static int tegra_sor_init(struct host1x_client *client)
if (err < 0)
return err;
+ /*
+ * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
+ * is used for interoperability between the HDA codec driver and the
+ * HDMI/DP driver.
+ */
+ value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0;
+ tegra_sor_writel(sor, value, SOR_INT_ENABLE);
+ tegra_sor_writel(sor, value, SOR_INT_MASK);
+
return 0;
}
@@ -2768,6 +2923,9 @@ static int tegra_sor_exit(struct host1x_client *client)
struct tegra_sor *sor = host1x_client_to_sor(client);
int err;
+ tegra_sor_writel(sor, 0, SOR_INT_MASK);
+ tegra_sor_writel(sor, 0, SOR_INT_ENABLE);
+
tegra_output_exit(&sor->output);
if (sor->aux) {
@@ -3038,6 +3196,54 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor)
return 0;
}
+static void tegra_hda_parse_format(unsigned int format, unsigned int *rate,
+ unsigned int *channels)
+{
+ unsigned int mul, div;
+
+ if (format & AC_FMT_BASE_44K)
+ *rate = 44100;
+ else
+ *rate = 48000;
+
+ mul = (format & AC_FMT_MULT_MASK) >> AC_FMT_MULT_SHIFT;
+ div = (format & AC_FMT_DIV_MASK) >> AC_FMT_DIV_SHIFT;
+
+ *rate = *rate * (mul + 1) / (div + 1);
+
+ *channels = (format & AC_FMT_CHAN_MASK) >> AC_FMT_CHAN_SHIFT;
+}
+
+static irqreturn_t tegra_sor_irq(int irq, void *data)
+{
+ struct tegra_sor *sor = data;
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_INT_STATUS);
+ tegra_sor_writel(sor, value, SOR_INT_STATUS);
+
+ if (value & SOR_INT_CODEC_SCRATCH0) {
+ value = tegra_sor_readl(sor, SOR_AUDIO_HDA_CODEC_SCRATCH0);
+
+ if (value & SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID) {
+ unsigned int format, sample_rate, channels;
+
+ format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK;
+
+ tegra_hda_parse_format(format, &sample_rate, &channels);
+
+ sor->audio.sample_rate = sample_rate;
+ sor->audio.channels = channels;
+
+ tegra_sor_hdmi_audio_enable(sor);
+ } else {
+ tegra_sor_hdmi_audio_disable(sor);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
static int tegra_sor_probe(struct platform_device *pdev)
{
struct device_node *np;
@@ -3120,14 +3326,38 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
- if (!pdev->dev.pm_domain) {
- sor->rst = devm_reset_control_get(&pdev->dev, "sor");
- if (IS_ERR(sor->rst)) {
- err = PTR_ERR(sor->rst);
+ err = platform_get_irq(pdev, 0);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
+ goto remove;
+ }
+
+ sor->irq = err;
+
+ err = devm_request_irq(sor->dev, sor->irq, tegra_sor_irq, 0,
+ dev_name(sor->dev), sor);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ goto remove;
+ }
+
+ sor->rst = devm_reset_control_get(&pdev->dev, "sor");
+ if (IS_ERR(sor->rst)) {
+ err = PTR_ERR(sor->rst);
+
+ if (err != -EBUSY || WARN_ON(!pdev->dev.pm_domain)) {
dev_err(&pdev->dev, "failed to get reset control: %d\n",
err);
goto remove;
}
+
+ /*
+ * At this point, the reset control is most likely being used
+ * by the generic power domain implementation. With any luck
+ * the power domain will have taken care of resetting the SOR
+ * and we don't have to do anything.
+ */
+ sor->rst = NULL;
}
sor->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index fb0854d92a27..13f7e68bec42 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -364,12 +364,28 @@
#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
#define INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
+#define SOR_HDMI_ACR_CTRL 0xb1
+
+#define SOR_HDMI_ACR_0320_SUBPACK_LOW 0xb2
+#define SOR_HDMI_ACR_SUBPACK_LOW_SB1(x) (((x) & 0xff) << 24)
+
+#define SOR_HDMI_ACR_0320_SUBPACK_HIGH 0xb3
+#define SOR_HDMI_ACR_SUBPACK_HIGH_ENABLE (1 << 31)
+
+#define SOR_HDMI_ACR_0441_SUBPACK_LOW 0xb4
+#define SOR_HDMI_ACR_0441_SUBPACK_HIGH 0xb5
+
#define SOR_HDMI_CTRL 0xc0
#define SOR_HDMI_CTRL_ENABLE (1 << 30)
#define SOR_HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
#define SOR_HDMI_CTRL_AUDIO_LAYOUT (1 << 10)
#define SOR_HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
+#define SOR_HDMI_SPARE 0xcb
+#define SOR_HDMI_SPARE_ACR_PRIORITY_HIGH (1 << 31)
+#define SOR_HDMI_SPARE_CTS_RESET(x) (((x) & 0x7) << 16)
+#define SOR_HDMI_SPARE_HW_CTS_ENABLE (1 << 0)
+
#define SOR_REFCLK 0xe6
#define SOR_REFCLK_DIV_INT(x) ((((x) >> 2) & 0xff) << 8)
#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x3) << 6)
@@ -378,10 +394,62 @@
#define SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED (1 << 1)
#define SOR_INPUT_CONTROL_HDMI_SRC_SELECT(x) (((x) & 0x1) << 0)
+#define SOR_AUDIO_CNTRL 0xfc
+#define SOR_AUDIO_CNTRL_INJECT_NULLSMPL (1 << 29)
+#define SOR_AUDIO_CNTRL_SOURCE_SELECT(x) (((x) & 0x3) << 20)
+#define SOURCE_SELECT_MASK 0x3
+#define SOURCE_SELECT_HDA 0x2
+#define SOURCE_SELECT_SPDIF 0x1
+#define SOURCE_SELECT_AUTO 0x0
+#define SOR_AUDIO_CNTRL_AFIFO_FLUSH (1 << 12)
+
+#define SOR_AUDIO_SPARE 0xfe
+#define SOR_AUDIO_SPARE_HBR_ENABLE (1 << 27)
+
+#define SOR_AUDIO_NVAL_0320 0xff
+#define SOR_AUDIO_NVAL_0441 0x100
+#define SOR_AUDIO_NVAL_0882 0x101
+#define SOR_AUDIO_NVAL_1764 0x102
+#define SOR_AUDIO_NVAL_0480 0x103
+#define SOR_AUDIO_NVAL_0960 0x104
+#define SOR_AUDIO_NVAL_1920 0x105
+
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0 0x10a
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID (1 << 30)
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK 0xffff
+
+#define SOR_AUDIO_HDA_ELD_BUFWR 0x10c
+#define SOR_AUDIO_HDA_ELD_BUFWR_INDEX(x) (((x) & 0xff) << 8)
+#define SOR_AUDIO_HDA_ELD_BUFWR_DATA(x) (((x) & 0xff) << 0)
+
+#define SOR_AUDIO_HDA_PRESENSE 0x10d
+#define SOR_AUDIO_HDA_PRESENSE_ELDV (1 << 1)
+#define SOR_AUDIO_HDA_PRESENSE_PD (1 << 0)
+
+#define SOR_AUDIO_AVAL_0320 0x10f
+#define SOR_AUDIO_AVAL_0441 0x110
+#define SOR_AUDIO_AVAL_0882 0x111
+#define SOR_AUDIO_AVAL_1764 0x112
+#define SOR_AUDIO_AVAL_0480 0x113
+#define SOR_AUDIO_AVAL_0960 0x114
+#define SOR_AUDIO_AVAL_1920 0x115
+
+#define SOR_INT_STATUS 0x11c
+#define SOR_INT_CODEC_CP_REQUEST (1 << 2)
+#define SOR_INT_CODEC_SCRATCH1 (1 << 1)
+#define SOR_INT_CODEC_SCRATCH0 (1 << 0)
+
+#define SOR_INT_MASK 0x11d
+#define SOR_INT_ENABLE 0x11e
+
#define SOR_HDMI_VSI_INFOFRAME_CTRL 0x123
#define SOR_HDMI_VSI_INFOFRAME_STATUS 0x124
#define SOR_HDMI_VSI_INFOFRAME_HEADER 0x125
+#define SOR_HDMI_AUDIO_N 0x13c
+#define SOR_HDMI_AUDIO_N_LOOKUP (1 << 28)
+#define SOR_HDMI_AUDIO_N_RESET (1 << 20)
+
#define SOR_HDMI2_CTRL 0x13e
#define SOR_HDMI2_CTRL_CLOCK_MODE_DIV_BY_4 (1 << 1)
#define SOR_HDMI2_CTRL_SCRAMBLE (1 << 0)
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 9f657a63b0bb..d47983deb1cf 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -38,6 +38,7 @@ struct vic {
struct iommu_domain *domain;
struct device *dev;
struct clk *clk;
+ struct reset_control *rst;
/* Platform configuration */
const struct vic_config *config;
@@ -56,13 +57,37 @@ static void vic_writel(struct vic *vic, u32 value, unsigned int offset)
static int vic_runtime_resume(struct device *dev)
{
struct vic *vic = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(vic->clk);
+ if (err < 0)
+ return err;
+
+ usleep_range(10, 20);
+
+ err = reset_control_deassert(vic->rst);
+ if (err < 0)
+ goto disable;
+
+ usleep_range(10, 20);
+
+ return 0;
- return clk_prepare_enable(vic->clk);
+disable:
+ clk_disable_unprepare(vic->clk);
+ return err;
}
static int vic_runtime_suspend(struct device *dev)
{
struct vic *vic = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_assert(vic->rst);
+ if (err < 0)
+ return err;
+
+ usleep_range(2000, 4000);
clk_disable_unprepare(vic->clk);
@@ -282,10 +307,18 @@ static const struct vic_config vic_t186_config = {
.version = 0x18,
};
+#define NVIDIA_TEGRA_194_VIC_FIRMWARE "nvidia/tegra194/vic.bin"
+
+static const struct vic_config vic_t194_config = {
+ .firmware = NVIDIA_TEGRA_194_VIC_FIRMWARE,
+ .version = 0x19,
+};
+
static const struct of_device_id vic_match[] = {
{ .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config },
{ .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config },
{ .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config },
+ { .compatible = "nvidia,tegra194-vic", .data = &vic_t194_config },
{ },
};
@@ -323,6 +356,14 @@ static int vic_probe(struct platform_device *pdev)
return PTR_ERR(vic->clk);
}
+ if (!dev->pm_domain) {
+ vic->rst = devm_reset_control_get(dev, "vic");
+ if (IS_ERR(vic->rst)) {
+ dev_err(&pdev->dev, "failed to get reset\n");
+ return PTR_ERR(vic->rst);
+ }
+ }
+
vic->falcon.dev = dev;
vic->falcon.regs = vic->regs;
vic->falcon.ops = &vic_falcon_ops;
@@ -418,3 +459,6 @@ MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE);
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE);
#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_194_VIC_FIRMWARE);
+#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 3dac08b24140..337e86a1d5ea 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -183,6 +183,12 @@ static void tilcdc_fini(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
+#ifdef CONFIG_CPU_FREQ
+ if (priv->freq_transition.notifier_call)
+ cpufreq_unregister_notifier(&priv->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+
if (priv->crtc)
tilcdc_crtc_shutdown(priv->crtc);
@@ -194,12 +200,6 @@ static void tilcdc_fini(struct drm_device *dev)
drm_mode_config_cleanup(dev);
tilcdc_remove_external_device(dev);
-#ifdef CONFIG_CPU_FREQ
- if (priv->freq_transition.notifier_call)
- cpufreq_unregister_notifier(&priv->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-#endif
-
if (priv->clk)
clk_put(priv->clk);
@@ -270,17 +270,6 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
goto init_failed;
}
-#ifdef CONFIG_CPU_FREQ
- priv->freq_transition.notifier_call = cpufreq_transition;
- ret = cpufreq_register_notifier(&priv->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
- if (ret) {
- dev_err(dev, "failed to register cpufreq notifier\n");
- priv->freq_transition.notifier_call = NULL;
- goto init_failed;
- }
-#endif
-
if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth))
priv->max_bandwidth = TILCDC_DEFAULT_MAX_BANDWIDTH;
@@ -357,6 +346,17 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
}
modeset_init(ddev);
+#ifdef CONFIG_CPU_FREQ
+ priv->freq_transition.notifier_call = cpufreq_transition;
+ ret = cpufreq_register_notifier(&priv->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (ret) {
+ dev_err(dev, "failed to register cpufreq notifier\n");
+ priv->freq_transition.notifier_call = NULL;
+ goto init_failed;
+ }
+#endif
+
if (priv->is_componentized) {
ret = component_bind_all(dev, ddev);
if (ret < 0)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index d87935bf8e30..0ec08394e17a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -77,38 +77,39 @@ static inline int ttm_mem_type_from_place(const struct ttm_place *place,
return 0;
}
-static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
+static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p,
+ int mem_type)
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct drm_printer p = drm_debug_printer(TTM_PFX);
- pr_err(" has_type: %d\n", man->has_type);
- pr_err(" use_type: %d\n", man->use_type);
- pr_err(" flags: 0x%08X\n", man->flags);
- pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset);
- pr_err(" size: %llu\n", man->size);
- pr_err(" available_caching: 0x%08X\n", man->available_caching);
- pr_err(" default_caching: 0x%08X\n", man->default_caching);
+ drm_printf(p, " has_type: %d\n", man->has_type);
+ drm_printf(p, " use_type: %d\n", man->use_type);
+ drm_printf(p, " flags: 0x%08X\n", man->flags);
+ drm_printf(p, " gpu_offset: 0x%08llX\n", man->gpu_offset);
+ drm_printf(p, " size: %llu\n", man->size);
+ drm_printf(p, " available_caching: 0x%08X\n", man->available_caching);
+ drm_printf(p, " default_caching: 0x%08X\n", man->default_caching);
if (mem_type != TTM_PL_SYSTEM)
- (*man->func->debug)(man, &p);
+ (*man->func->debug)(man, p);
}
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
+ struct drm_printer p = drm_debug_printer(TTM_PFX);
int i, ret, mem_type;
- pr_err("No space for %p (%lu pages, %luK, %luM)\n",
- bo, bo->mem.num_pages, bo->mem.size >> 10,
- bo->mem.size >> 20);
+ drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
+ bo, bo->mem.num_pages, bo->mem.size >> 10,
+ bo->mem.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
ret = ttm_mem_type_from_place(&placement->placement[i],
&mem_type);
if (ret)
return;
- pr_err(" placement[%d]=0x%08X (%d)\n",
- i, placement->placement[i].flags, mem_type);
- ttm_mem_type_debug(bo->bdev, mem_type);
+ drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
+ i, placement->placement[i].flags, mem_type);
+ ttm_mem_type_debug(bo->bdev, &p, mem_type);
}
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index ba80150d1052..895d77d799e4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -492,8 +492,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
if (!fbo)
return -ENOMEM;
- ttm_bo_get(bo);
fbo->base = *bo;
+ fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
+
+ ttm_bo_get(bo);
fbo->bo = bo;
/**
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index efa005a1c1b7..93860346c426 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -126,10 +126,11 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
}
if (!ret) {
- if (!entry->shared)
+ if (!entry->num_shared)
continue;
- ret = reservation_object_reserve_shared(bo->resv, 1);
+ ret = reservation_object_reserve_shared(bo->resv,
+ entry->num_shared);
if (!ret)
continue;
}
@@ -150,8 +151,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
}
}
- if (!ret && entry->shared)
- ret = reservation_object_reserve_shared(bo->resv, 1);
+ if (!ret && entry->num_shared)
+ ret = reservation_object_reserve_shared(bo->resv,
+ entry->num_shared);
if (unlikely(ret != 0)) {
if (ret == -EINTR)
@@ -199,7 +201,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) {
bo = entry->bo;
- if (entry->shared)
+ if (entry->num_shared)
reservation_object_add_shared_fence(bo->resv, fence);
else
reservation_object_add_excl_fence(bo->resv, fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index b9c078860a7c..25afb1d594e3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -34,7 +34,7 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_module.h>
-#include <linux/dma_remapping.h>
+#include <linux/intel-iommu.h>
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
#define VMWGFX_CHIP_SVGAII 0
@@ -49,6 +49,8 @@
#define VMWGFX_REPO "In Tree"
+#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
+
/**
* Fully encoded drm commands. Might move to vmw_drm.h
@@ -581,7 +583,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
dev_priv->map_mode = vmw_dma_map_populate;
- if (dma_ops->sync_single_for_cpu)
+ if (dma_ops && dma_ops->sync_single_for_cpu)
dev_priv->map_mode = vmw_dma_alloc_coherent;
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl() == 0)
@@ -665,7 +667,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
- mutex_init(&dev_priv->requested_layout_mutex);
mutex_init(&dev_priv->global_kms_state_mutex);
ttm_lock_init(&dev_priv->reservation_sem);
spin_lock_init(&dev_priv->resource_lock);
@@ -912,7 +913,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
spin_unlock(&dev_priv->cap_lock);
}
-
+ vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
goto out_no_kms;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 28df788da44e..cd607ba9c2fe 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -466,15 +466,6 @@ struct vmw_private {
uint32_t num_displays;
/*
- * Currently requested_layout_mutex is used to protect the gui
- * positionig state in display unit. With that use case currently this
- * mutex is only taken during layout ioctl and atomic check_modeset.
- * Other display unit state can be protected with this mutex but that
- * needs careful consideration.
- */
- struct mutex requested_layout_mutex;
-
- /*
* Framebuffer info.
*/
@@ -484,8 +475,6 @@ struct vmw_private {
struct vmw_overlay *overlay_priv;
struct drm_property *hotplug_mode_update_property;
struct drm_property *implicit_placement_property;
- unsigned num_implicit;
- struct vmw_framebuffer *implicit_fb;
struct mutex global_kms_state_mutex;
spinlock_t cursor_lock;
struct drm_atomic_state *suspend_state;
@@ -604,6 +593,9 @@ struct vmw_private {
struct vmw_cmdbuf_man *cman;
DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
+
+ /* Validation memory reservation */
+ struct vmw_validation_mem vvm;
};
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -842,6 +834,8 @@ extern int vmw_fifo_flush(struct vmw_private *dev_priv,
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
+extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
+ size_t gran);
/**
* TTM buffer object driver - vmwgfx_ttm_buffer.c
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 5a6b70ba137a..f2d13a72c05d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1738,7 +1738,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
void *buf)
{
struct vmw_buffer_object *vmw_bo;
- int ret;
struct {
uint32_t header;
@@ -1748,7 +1747,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
return vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->body.ptr,
&vmw_bo);
- return ret;
}
@@ -3837,6 +3835,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct sync_file *sync_file = NULL;
DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
+ vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
+
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index f87261545f2c..301260e23e52 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -906,13 +906,10 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
container_of(action, struct vmw_event_fence_action, action);
struct drm_device *dev = eaction->dev;
struct drm_pending_event *event = eaction->event;
- struct drm_file *file_priv;
-
if (unlikely(event == NULL))
return;
- file_priv = event->file_priv;
spin_lock_irq(&dev->event_lock);
if (likely(eaction->tv_sec != NULL)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index e6b11f6ae2e4..b351fb5214d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -30,6 +30,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_rect.h>
+#include <drm/drm_damage_helper.h>
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
@@ -456,21 +457,8 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
struct drm_crtc *crtc = state->crtc;
struct vmw_connector_state *vcs;
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
vcs = vmw_connector_state_to_vcs(du->connector.state);
-
- /* Only one active implicit framebuffer at a time. */
- mutex_lock(&dev_priv->global_kms_state_mutex);
- if (vcs->is_implicit && dev_priv->implicit_fb &&
- !(dev_priv->num_implicit == 1 && du->active_implicit)
- && dev_priv->implicit_fb != vfb) {
- DRM_ERROR("Multiple implicit framebuffers "
- "not supported.\n");
- ret = -EINVAL;
- }
- mutex_unlock(&dev_priv->global_kms_state_mutex);
}
@@ -846,58 +834,6 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
kfree(vfbs);
}
-static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
- struct drm_file *file_priv,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips)
-{
- struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
- struct vmw_framebuffer_surface *vfbs =
- vmw_framebuffer_to_vfbs(framebuffer);
- struct drm_clip_rect norect;
- int ret, inc = 1;
-
- /* Legacy Display Unit does not support 3D */
- if (dev_priv->active_display_unit == vmw_du_legacy)
- return -EINVAL;
-
- drm_modeset_lock_all(dev_priv->dev);
-
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0)) {
- drm_modeset_unlock_all(dev_priv->dev);
- return ret;
- }
-
- if (!num_clips) {
- num_clips = 1;
- clips = &norect;
- norect.x1 = norect.y1 = 0;
- norect.x2 = framebuffer->width;
- norect.y2 = framebuffer->height;
- } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
- num_clips /= 2;
- inc = 2; /* skip source rects */
- }
-
- if (dev_priv->active_display_unit == vmw_du_screen_object)
- ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
- clips, NULL, NULL, 0, 0,
- num_clips, inc, NULL, NULL);
- else
- ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
- clips, NULL, NULL, 0, 0,
- num_clips, inc, NULL, NULL);
-
- vmw_fifo_flush(dev_priv, false);
- ttm_read_unlock(&dev_priv->reservation_sem);
-
- drm_modeset_unlock_all(dev_priv->dev);
-
- return 0;
-}
-
/**
* vmw_kms_readback - Perform a readback from the screen system to
* a buffer-object backed framebuffer.
@@ -941,7 +877,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
.destroy = vmw_framebuffer_surface_destroy,
- .dirty = vmw_framebuffer_surface_dirty,
+ .dirty = drm_atomic_helper_dirtyfb,
};
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
@@ -1084,16 +1020,6 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
}
switch (dev_priv->active_display_unit) {
- case vmw_du_screen_target:
- ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
- clips, NULL, num_clips, increment,
- true, true, NULL);
- break;
- case vmw_du_screen_object:
- ret = vmw_kms_sou_do_bo_dirty(dev_priv, &vfbd->base,
- clips, NULL, num_clips,
- increment, true, NULL, NULL);
- break;
case vmw_du_legacy:
ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
clips, num_clips, increment);
@@ -1112,9 +1038,25 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
return ret;
}
+static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips)
+{
+ struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+
+ if (dev_priv->active_display_unit == vmw_du_legacy)
+ return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
+ color, clips, num_clips);
+
+ return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
+ clips, num_clips);
+}
+
static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
.destroy = vmw_framebuffer_bo_destroy,
- .dirty = vmw_framebuffer_bo_dirty,
+ .dirty = vmw_framebuffer_bo_dirty_ext,
};
/**
@@ -1565,6 +1507,88 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
}
/**
+ * vmw_crtc_state_and_lock - Return new or current crtc state with locked
+ * crtc mutex
+ * @state: The atomic state pointer containing the new atomic state
+ * @crtc: The crtc
+ *
+ * This function returns the new crtc state if it's part of the state update.
+ * Otherwise returns the current crtc state. It also makes sure that the
+ * crtc mutex is locked.
+ *
+ * Returns: A valid crtc state pointer or NULL. It may also return a
+ * pointer error, in particular -EDEADLK if locking needs to be rerun.
+ */
+static struct drm_crtc_state *
+vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (crtc_state) {
+ lockdep_assert_held(&crtc->mutex.mutex.base);
+ } else {
+ int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
+
+ if (ret != 0 && ret != -EALREADY)
+ return ERR_PTR(ret);
+
+ crtc_state = crtc->state;
+ }
+
+ return crtc_state;
+}
+
+/**
+ * vmw_kms_check_implicit - Verify that all implicit display units scan out
+ * from the same fb after the new state is committed.
+ * @dev: The drm_device.
+ * @state: The new state to be checked.
+ *
+ * Returns:
+ * Zero on success,
+ * -EINVAL on invalid state,
+ * -EDEADLK if modeset locking needs to be rerun.
+ */
+static int vmw_kms_check_implicit(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_framebuffer *implicit_fb = NULL;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+
+ drm_for_each_crtc(crtc, dev) {
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+ if (!du->is_implicit)
+ continue;
+
+ crtc_state = vmw_crtc_state_and_lock(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (!crtc_state || !crtc_state->enable)
+ continue;
+
+ /*
+ * Can't move primary planes across crtcs, so this is OK.
+ * It also means we don't need to take the plane mutex.
+ */
+ plane_state = du->primary.state;
+ if (plane_state->crtc != crtc)
+ continue;
+
+ if (!implicit_fb)
+ implicit_fb = plane_state->fb;
+ else if (implicit_fb != plane_state->fb)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* vmw_kms_check_topology - Validates topology in drm_atomic_state
* @dev: DRM device
* @state: the driver state object
@@ -1575,7 +1599,6 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
static int vmw_kms_check_topology(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_rect *rects;
struct drm_crtc *crtc;
@@ -1587,19 +1610,31 @@ static int vmw_kms_check_topology(struct drm_device *dev,
if (!rects)
return -ENOMEM;
- mutex_lock(&dev_priv->requested_layout_mutex);
-
drm_for_each_crtc(crtc, dev) {
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_crtc_state *crtc_state;
i = drm_crtc_index(crtc);
- if (crtc_state && crtc_state->enable) {
+ crtc_state = vmw_crtc_state_and_lock(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto clean;
+ }
+
+ if (!crtc_state)
+ continue;
+
+ if (crtc_state->enable) {
rects[i].x1 = du->gui_x;
rects[i].y1 = du->gui_y;
rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
+ } else {
+ rects[i].x1 = 0;
+ rects[i].y1 = 0;
+ rects[i].x2 = 0;
+ rects[i].y2 = 0;
}
}
@@ -1611,14 +1646,6 @@ static int vmw_kms_check_topology(struct drm_device *dev,
struct drm_connector_state *conn_state;
struct vmw_connector_state *vmw_conn_state;
- if (!new_crtc_state->enable) {
- rects[i].x1 = 0;
- rects[i].y1 = 0;
- rects[i].x2 = 0;
- rects[i].y2 = 0;
- continue;
- }
-
if (!du->pref_active) {
ret = -EINVAL;
goto clean;
@@ -1639,18 +1666,12 @@ static int vmw_kms_check_topology(struct drm_device *dev,
vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
vmw_conn_state->gui_x = du->gui_x;
vmw_conn_state->gui_y = du->gui_y;
-
- rects[i].x1 = du->gui_x;
- rects[i].y1 = du->gui_y;
- rects[i].x2 = du->gui_x + new_crtc_state->mode.hdisplay;
- rects[i].y2 = du->gui_y + new_crtc_state->mode.vdisplay;
}
ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
rects);
clean:
- mutex_unlock(&dev_priv->requested_layout_mutex);
kfree(rects);
return ret;
}
@@ -1681,6 +1702,10 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
if (ret)
return ret;
+ ret = vmw_kms_check_implicit(dev, state);
+ if (ret)
+ return ret;
+
if (!state->allow_modeset)
return ret;
@@ -2003,11 +2028,25 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv,
struct vmw_display_unit *du;
struct drm_connector *con;
struct drm_connector_list_iter conn_iter;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_crtc *crtc;
+ int ret;
+
+ /* Currently gui_x/y is protected with the crtc mutex */
+ mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_acquire_init(&ctx, 0);
+retry:
+ drm_for_each_crtc(crtc, dev) {
+ ret = drm_modeset_lock(&crtc->mutex, &ctx);
+ if (ret < 0) {
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+ goto out_fini;
+ }
+ }
- /*
- * Currently only gui_x/y is protected with requested_layout_mutex.
- */
- mutex_lock(&dev_priv->requested_layout_mutex);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(con, &conn_iter) {
du = vmw_connector_to_du(con);
@@ -2026,9 +2065,7 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv,
}
}
drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev_priv->requested_layout_mutex);
- mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(con, &dev->mode_config.connector_list, head) {
du = vmw_connector_to_du(con);
if (num_rects > du->unit) {
@@ -2048,10 +2085,13 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv,
}
con->status = vmw_du_connector_detect(con, true);
}
- mutex_unlock(&dev->mode_config.mutex);
drm_sysfs_hotplug_event(dev);
-
+out_fini:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ mutex_unlock(&dev->mode_config.mutex);
+
return 0;
}
@@ -2275,84 +2315,6 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
return 1;
}
-int vmw_du_connector_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
-{
- struct vmw_display_unit *du = vmw_connector_to_du(connector);
- struct vmw_private *dev_priv = vmw_priv(connector->dev);
-
- if (property == dev_priv->implicit_placement_property)
- du->is_implicit = val;
-
- return 0;
-}
-
-
-
-/**
- * vmw_du_connector_atomic_set_property - Atomic version of get property
- *
- * @crtc - crtc the property is associated with
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int
-vmw_du_connector_atomic_set_property(struct drm_connector *connector,
- struct drm_connector_state *state,
- struct drm_property *property,
- uint64_t val)
-{
- struct vmw_private *dev_priv = vmw_priv(connector->dev);
- struct vmw_connector_state *vcs = vmw_connector_state_to_vcs(state);
- struct vmw_display_unit *du = vmw_connector_to_du(connector);
-
-
- if (property == dev_priv->implicit_placement_property) {
- vcs->is_implicit = val;
-
- /*
- * We should really be doing a drm_atomic_commit() to
- * commit the new state, but since this doesn't cause
- * an immedate state change, this is probably ok
- */
- du->is_implicit = vcs->is_implicit;
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-/**
- * vmw_du_connector_atomic_get_property - Atomic version of get property
- *
- * @connector - connector the property is associated with
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int
-vmw_du_connector_atomic_get_property(struct drm_connector *connector,
- const struct drm_connector_state *state,
- struct drm_property *property,
- uint64_t *val)
-{
- struct vmw_private *dev_priv = vmw_priv(connector->dev);
- struct vmw_connector_state *vcs = vmw_connector_state_to_vcs(state);
-
- if (property == dev_priv->implicit_placement_property)
- *val = vcs->is_implicit;
- else {
- DRM_ERROR("Invalid Property %s\n", property->name);
- return -EINVAL;
- }
-
- return 0;
-}
-
/**
* vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
* @dev: drm device for the ioctl
@@ -2742,143 +2704,25 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
}
/**
- * vmw_kms_del_active - unregister a crtc binding to the implicit framebuffer
- *
- * @dev_priv: Pointer to a device private struct.
- * @du: The display unit of the crtc.
- */
-void vmw_kms_del_active(struct vmw_private *dev_priv,
- struct vmw_display_unit *du)
-{
- mutex_lock(&dev_priv->global_kms_state_mutex);
- if (du->active_implicit) {
- if (--(dev_priv->num_implicit) == 0)
- dev_priv->implicit_fb = NULL;
- du->active_implicit = false;
- }
- mutex_unlock(&dev_priv->global_kms_state_mutex);
-}
-
-/**
- * vmw_kms_add_active - register a crtc binding to an implicit framebuffer
- *
- * @vmw_priv: Pointer to a device private struct.
- * @du: The display unit of the crtc.
- * @vfb: The implicit framebuffer
- *
- * Registers a binding to an implicit framebuffer.
- */
-void vmw_kms_add_active(struct vmw_private *dev_priv,
- struct vmw_display_unit *du,
- struct vmw_framebuffer *vfb)
-{
- mutex_lock(&dev_priv->global_kms_state_mutex);
- WARN_ON_ONCE(!dev_priv->num_implicit && dev_priv->implicit_fb);
-
- if (!du->active_implicit && du->is_implicit) {
- dev_priv->implicit_fb = vfb;
- du->active_implicit = true;
- dev_priv->num_implicit++;
- }
- mutex_unlock(&dev_priv->global_kms_state_mutex);
-}
-
-/**
- * vmw_kms_screen_object_flippable - Check whether we can page-flip a crtc.
- *
- * @dev_priv: Pointer to device-private struct.
- * @crtc: The crtc we want to flip.
- *
- * Returns true or false depending whether it's OK to flip this crtc
- * based on the criterion that we must not have more than one implicit
- * frame-buffer at any one time.
- */
-bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
- struct drm_crtc *crtc)
-{
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- bool ret;
-
- mutex_lock(&dev_priv->global_kms_state_mutex);
- ret = !du->is_implicit || dev_priv->num_implicit == 1;
- mutex_unlock(&dev_priv->global_kms_state_mutex);
-
- return ret;
-}
-
-/**
- * vmw_kms_update_implicit_fb - Update the implicit fb.
- *
- * @dev_priv: Pointer to device-private struct.
- * @crtc: The crtc the new implicit frame-buffer is bound to.
- */
-void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
- struct drm_crtc *crtc)
-{
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct drm_plane *plane = crtc->primary;
- struct vmw_framebuffer *vfb;
-
- mutex_lock(&dev_priv->global_kms_state_mutex);
-
- if (!du->is_implicit)
- goto out_unlock;
-
- vfb = vmw_framebuffer_to_vfb(plane->state->fb);
- WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
- dev_priv->implicit_fb != vfb);
-
- dev_priv->implicit_fb = vfb;
-out_unlock:
- mutex_unlock(&dev_priv->global_kms_state_mutex);
-}
-
-/**
* vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
* property.
*
* @dev_priv: Pointer to a device private struct.
- * @immutable: Whether the property is immutable.
*
* Sets up the implicit placement property unless it's already set up.
*/
void
-vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
- bool immutable)
+vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
{
if (dev_priv->implicit_placement_property)
return;
dev_priv->implicit_placement_property =
drm_property_create_range(dev_priv->dev,
- immutable ?
- DRM_MODE_PROP_IMMUTABLE : 0,
+ DRM_MODE_PROP_IMMUTABLE,
"implicit_placement", 0, 1);
-
-}
-
-
-/**
- * vmw_kms_set_config - Wrapper around drm_atomic_helper_set_config
- *
- * @set: The configuration to set.
- *
- * The vmwgfx Xorg driver doesn't assign the mode::type member, which
- * when drm_mode_set_crtcinfo is called as part of the configuration setting
- * causes it to return incorrect crtc dimensions causing severe problems in
- * the vmwgfx modesetting. So explicitly clear that member before calling
- * into drm_atomic_helper_set_config.
- */
-int vmw_kms_set_config(struct drm_mode_set *set,
- struct drm_modeset_acquire_ctx *ctx)
-{
- if (set && set->mode)
- set->mode->type = 0;
-
- return drm_atomic_helper_set_config(set, ctx);
}
-
/**
* vmw_kms_suspend - Save modesetting state and turn modesetting off.
*
@@ -2935,3 +2779,124 @@ void vmw_kms_lost_device(struct drm_device *dev)
{
drm_atomic_helper_shutdown(dev);
}
+
+/**
+ * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
+ * @update: The closure structure.
+ *
+ * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
+ * update on display unit.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
+{
+ struct drm_plane_state *state = update->plane->state;
+ struct drm_plane_state *old_state = update->old_state;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ struct drm_rect bb;
+ DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
+ uint32_t reserved_size = 0;
+ uint32_t submit_size = 0;
+ uint32_t curr_size = 0;
+ uint32_t num_hits = 0;
+ void *cmd_start;
+ char *cmd_next;
+ int ret;
+
+ /*
+ * Iterate in advance to check if really need plane update and find the
+ * number of clips that actually are in plane src for fifo allocation.
+ */
+ drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ if (num_hits == 0)
+ return 0;
+
+ if (update->vfb->bo) {
+ struct vmw_framebuffer_bo *vfbbo =
+ container_of(update->vfb, typeof(*vfbbo), base);
+
+ ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
+ update->cpu_blit);
+ } else {
+ struct vmw_framebuffer_surface *vfbs =
+ container_of(update->vfb, typeof(*vfbs), base);
+
+ ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
+ 0, NULL, NULL);
+ }
+
+ if (ret)
+ return ret;
+
+ ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
+ if (ret)
+ goto out_unref;
+
+ reserved_size = update->calc_fifo_size(update, num_hits);
+ cmd_start = vmw_fifo_reserve(update->dev_priv, reserved_size);
+ if (!cmd_start) {
+ ret = -ENOMEM;
+ goto out_revert;
+ }
+
+ cmd_next = cmd_start;
+
+ if (update->post_prepare) {
+ curr_size = update->post_prepare(update, cmd_next);
+ cmd_next += curr_size;
+ submit_size += curr_size;
+ }
+
+ if (update->pre_clip) {
+ curr_size = update->pre_clip(update, cmd_next, num_hits);
+ cmd_next += curr_size;
+ submit_size += curr_size;
+ }
+
+ bb.x1 = INT_MAX;
+ bb.y1 = INT_MAX;
+ bb.x2 = INT_MIN;
+ bb.y2 = INT_MIN;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ uint32_t fb_x = clip.x1;
+ uint32_t fb_y = clip.y1;
+
+ vmw_du_translate_to_crtc(state, &clip);
+ if (update->clip) {
+ curr_size = update->clip(update, cmd_next, &clip, fb_x,
+ fb_y);
+ cmd_next += curr_size;
+ submit_size += curr_size;
+ }
+ bb.x1 = min_t(int, bb.x1, clip.x1);
+ bb.y1 = min_t(int, bb.y1, clip.y1);
+ bb.x2 = max_t(int, bb.x2, clip.x2);
+ bb.y2 = max_t(int, bb.y2, clip.y2);
+ }
+
+ curr_size = update->post_clip(update, cmd_next, &bb);
+ submit_size += curr_size;
+
+ if (reserved_size < submit_size)
+ submit_size = 0;
+
+ vmw_fifo_commit(update->dev_priv, submit_size);
+
+ vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
+ update->out_fence, NULL);
+ return ret;
+
+out_revert:
+ vmw_validation_revert(&val_ctx);
+
+out_unref:
+ vmw_validation_unref_lists(&val_ctx);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 76ec570c0684..655abbcd4058 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -33,7 +33,123 @@
#include <drm/drm_encoder.h>
#include "vmwgfx_drv.h"
+/**
+ * struct vmw_du_update_plane - Closure structure for vmw_du_helper_plane_update
+ * @plane: Plane which is being updated.
+ * @old_state: Old state of plane.
+ * @dev_priv: Device private.
+ * @du: Display unit on which to update the plane.
+ * @vfb: Framebuffer which is blitted to display unit.
+ * @out_fence: Out fence for resource finish.
+ * @mutex: The mutex used to protect resource reservation.
+ * @cpu_blit: True if need cpu blit.
+ * @intr: Whether to perform waits interruptible if possible.
+ *
+ * This structure loosely represent the set of operations needed to perform a
+ * plane update on a display unit. Implementer will define that functionality
+ * according to the function callbacks for this structure. In brief it involves
+ * surface/buffer object validation, populate FIFO commands and command
+ * submission to the device.
+ */
+struct vmw_du_update_plane {
+ /**
+ * @calc_fifo_size: Calculate fifo size.
+ *
+ * Determine fifo size for the commands needed for update. The number of
+ * damage clips on display unit @num_hits will be passed to allocate
+ * sufficient fifo space.
+ *
+ * Return: Fifo size needed
+ */
+ uint32_t (*calc_fifo_size)(struct vmw_du_update_plane *update,
+ uint32_t num_hits);
+
+ /**
+ * @post_prepare: Populate fifo for resource preparation.
+ *
+ * Some surface resource or buffer object need some extra cmd submission
+ * like update GB image for proxy surface and define a GMRFB for screen
+ * object. That should should be done here as this callback will be
+ * called after FIFO allocation with the address of command buufer.
+ *
+ * This callback is optional.
+ *
+ * Return: Size of commands populated to command buffer.
+ */
+ uint32_t (*post_prepare)(struct vmw_du_update_plane *update, void *cmd);
+
+ /**
+ * @pre_clip: Populate fifo before clip.
+ *
+ * This is where pre clip related command should be populated like
+ * surface copy/DMA, etc.
+ *
+ * This callback is optional.
+ *
+ * Return: Size of commands populated to command buffer.
+ */
+ uint32_t (*pre_clip)(struct vmw_du_update_plane *update, void *cmd,
+ uint32_t num_hits);
+
+ /**
+ * @clip: Populate fifo for clip.
+ *
+ * This is where to populate clips for surface copy/dma or blit commands
+ * if needed. This will be called times have damage in display unit,
+ * which is one if doing full update. @clip is the damage in destination
+ * coordinates which is crtc/DU and @src_x, @src_y is damage clip src in
+ * framebuffer coordinate.
+ *
+ * This callback is optional.
+ *
+ * Return: Size of commands populated to command buffer.
+ */
+ uint32_t (*clip)(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *clip, uint32_t src_x, uint32_t src_y);
+
+ /**
+ * @post_clip: Populate fifo after clip.
+ *
+ * This is where to populate display unit update commands or blit
+ * commands.
+ *
+ * Return: Size of commands populated to command buffer.
+ */
+ uint32_t (*post_clip)(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *bb);
+
+ struct drm_plane *plane;
+ struct drm_plane_state *old_state;
+ struct vmw_private *dev_priv;
+ struct vmw_display_unit *du;
+ struct vmw_framebuffer *vfb;
+ struct vmw_fence_obj **out_fence;
+ struct mutex *mutex;
+ bool cpu_blit;
+ bool intr;
+};
+
+/**
+ * struct vmw_du_update_plane_surface - closure structure for surface
+ * @base: base closure structure.
+ * @cmd_start: FIFO command start address (used by SOU only).
+ */
+struct vmw_du_update_plane_surface {
+ struct vmw_du_update_plane base;
+ /* This member is to handle special case SOU surface update */
+ void *cmd_start;
+};
+/**
+ * struct vmw_du_update_plane_buffer - Closure structure for buffer object
+ * @base: Base closure structure.
+ * @fb_left: x1 for fb damage bounding box.
+ * @fb_top: y1 for fb damage bounding box.
+ */
+struct vmw_du_update_plane_buffer {
+ struct vmw_du_update_plane base;
+ int fb_left, fb_top;
+};
/**
* struct vmw_kms_dirty - closure structure for the vmw_kms_helper_dirty
@@ -191,8 +307,6 @@ struct vmw_plane_state {
struct vmw_connector_state {
struct drm_connector_state base;
- bool is_implicit;
-
/**
* @gui_x:
*
@@ -254,7 +368,6 @@ struct vmw_display_unit {
int gui_x;
int gui_y;
bool is_implicit;
- bool active_implicit;
int set_gui_x;
int set_gui_y;
};
@@ -334,17 +447,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
struct drm_crtc **p_crtc,
struct drm_display_mode **p_mode);
void vmw_guess_mode_timing(struct drm_display_mode *mode);
-void vmw_kms_del_active(struct vmw_private *dev_priv,
- struct vmw_display_unit *du);
-void vmw_kms_add_active(struct vmw_private *dev_priv,
- struct vmw_display_unit *du,
- struct vmw_framebuffer *vfb);
-bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
- struct drm_crtc *crtc);
-void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
- struct drm_crtc *crtc);
-void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
- bool immutable);
+void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv);
+void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv);
/* Universal Plane Helpers */
void vmw_du_primary_plane_destroy(struct drm_plane *plane);
@@ -456,6 +560,20 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
bool interruptible,
struct drm_crtc *crtc);
-int vmw_kms_set_config(struct drm_mode_set *set,
- struct drm_modeset_acquire_ctx *ctx);
+int vmw_du_helper_plane_update(struct vmw_du_update_plane *update);
+
+/**
+ * vmw_du_translate_to_crtc - Translate a rect from framebuffer to crtc
+ * @state: Plane state.
+ * @r: Rectangle to translate.
+ */
+static inline void vmw_du_translate_to_crtc(struct drm_plane_state *state,
+ struct drm_rect *r)
+{
+ int translate_crtc_x = -((state->src_x >> 16) - state->crtc_x);
+ int translate_crtc_y = -((state->src_y >> 16) - state->crtc_y);
+
+ drm_rect_translate(r, translate_crtc_x, translate_crtc_y);
+}
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 4b5378495eea..16be515c4c0f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -233,7 +233,7 @@ static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.reset = vmw_du_crtc_reset,
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
- .set_config = vmw_kms_set_config,
+ .set_config = drm_atomic_helper_set_config,
};
@@ -263,13 +263,10 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
- .set_property = vmw_du_connector_set_property,
.destroy = vmw_ldu_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
.atomic_destroy_state = vmw_du_connector_destroy_state,
- .atomic_set_property = vmw_du_connector_atomic_set_property,
- .atomic_get_property = vmw_du_connector_atomic_get_property,
};
static const struct
@@ -416,7 +413,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs);
-
vmw_du_connector_reset(connector);
ret = drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
@@ -427,8 +423,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_connector_helper_add(connector, &vmw_ldu_connector_helper_funcs);
connector->status = vmw_du_connector_detect(connector, true);
- vmw_connector_state_to_vcs(connector->state)->is_implicit = true;
-
ret = drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -447,7 +441,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
goto err_free_encoder;
}
-
vmw_du_crtc_reset(crtc);
ret = drm_crtc_init_with_planes(dev, crtc, &ldu->base.primary,
&ldu->base.cursor,
@@ -513,7 +506,7 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
if (ret != 0)
goto err_free;
- vmw_kms_create_implicit_placement_property(dev_priv, true);
+ vmw_kms_create_implicit_placement_property(dev_priv);
if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 8a029bade32a..3025bfc001a1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -85,7 +85,7 @@ static void vmw_resource_release(struct kref *kref)
struct ttm_validate_buffer val_buf;
val_buf.bo = bo;
- val_buf.shared = false;
+ val_buf.num_shared = 0;
res->func->unbind(res, false, &val_buf);
}
res->backup_dirty = false;
@@ -462,7 +462,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base);
- val_buf->shared = false;
+ val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
@@ -565,7 +565,7 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
BUG_ON(!func->may_evict);
val_buf.bo = NULL;
- val_buf.shared = false;
+ val_buf.num_shared = 0;
ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
if (unlikely(ret != 0))
return ret;
@@ -614,7 +614,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr)
return 0;
val_buf.bo = NULL;
- val_buf.shared = false;
+ val_buf.num_shared = 0;
if (res->backup)
val_buf.bo = &res->backup->base;
do {
@@ -685,7 +685,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
struct vmw_resource *res, *next;
struct ttm_validate_buffer val_buf = {
.bo = &vbo->base,
- .shared = false
+ .num_shared = 0
};
lockdep_assert_held(&vbo->base.resv->lock.base);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 333418dc259f..cd586c52af7e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -29,6 +29,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
#define vmw_crtc_to_sou(x) \
@@ -76,6 +77,11 @@ struct vmw_kms_sou_dirty_cmd {
SVGA3dCmdBlitSurfaceToScreen body;
};
+struct vmw_kms_sou_define_gmrfb {
+ uint32_t header;
+ SVGAFifoCmdDefineGMRFB body;
+};
+
/**
* Display unit using screen objects.
*/
@@ -241,28 +247,20 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
sou->buffer = vps->bo;
sou->buffer_size = vps->bo_size;
- if (sou->base.is_implicit) {
- x = crtc->x;
- y = crtc->y;
- } else {
- conn_state = sou->base.connector.state;
- vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
+ conn_state = sou->base.connector.state;
+ vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
- x = vmw_conn_state->gui_x;
- y = vmw_conn_state->gui_y;
- }
+ x = vmw_conn_state->gui_x;
+ y = vmw_conn_state->gui_y;
ret = vmw_sou_fifo_create(dev_priv, sou, x, y, &crtc->mode);
if (ret)
DRM_ERROR("Failed to define Screen Object %dx%d\n",
crtc->x, crtc->y);
- vmw_kms_add_active(dev_priv, &sou->base, vfb);
} else {
sou->buffer = NULL;
sou->buffer_size = 0;
-
- vmw_kms_del_active(dev_priv, &sou->base);
}
}
@@ -317,38 +315,14 @@ static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc,
}
}
-static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *new_fb,
- struct drm_pending_vblank_event *event,
- uint32_t flags,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- int ret;
-
- if (!vmw_kms_crtc_flippable(dev_priv, crtc))
- return -EINVAL;
-
- ret = drm_atomic_helper_page_flip(crtc, new_fb, event, flags, ctx);
- if (ret) {
- DRM_ERROR("Page flip error %d.\n", ret);
- return ret;
- }
-
- if (vmw_crtc_to_du(crtc)->is_implicit)
- vmw_kms_update_implicit_fb(dev_priv, crtc);
-
- return ret;
-}
-
static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_sou_crtc_destroy,
.reset = vmw_du_crtc_reset,
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
- .set_config = vmw_kms_set_config,
- .page_flip = vmw_sou_crtc_page_flip,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
};
/*
@@ -377,13 +351,10 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
- .set_property = vmw_du_connector_set_property,
.destroy = vmw_sou_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
.atomic_destroy_state = vmw_du_connector_destroy_state,
- .atomic_set_property = vmw_du_connector_atomic_set_property,
- .atomic_get_property = vmw_du_connector_atomic_get_property,
};
@@ -498,6 +469,263 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
return vmw_bo_pin_in_vram(dev_priv, vps->bo, true);
}
+static uint32_t vmw_sou_bo_fifo_size(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ return sizeof(struct vmw_kms_sou_define_gmrfb) +
+ sizeof(struct vmw_kms_sou_bo_blit) * num_hits;
+}
+
+static uint32_t vmw_sou_bo_define_gmrfb(struct vmw_du_update_plane *update,
+ void *cmd)
+{
+ struct vmw_framebuffer_bo *vfbbo =
+ container_of(update->vfb, typeof(*vfbbo), base);
+ struct vmw_kms_sou_define_gmrfb *gmr = cmd;
+ int depth = update->vfb->base.format->depth;
+
+ /* Emulate RGBA support, contrary to svga_reg.h this is not
+ * supported by hosts. This is only a problem if we are reading
+ * this value later and expecting what we uploaded back.
+ */
+ if (depth == 32)
+ depth = 24;
+
+ gmr->header = SVGA_CMD_DEFINE_GMRFB;
+
+ gmr->body.format.bitsPerPixel = update->vfb->base.format->cpp[0] * 8;
+ gmr->body.format.colorDepth = depth;
+ gmr->body.format.reserved = 0;
+ gmr->body.bytesPerLine = update->vfb->base.pitches[0];
+ vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &gmr->body.ptr);
+
+ return sizeof(*gmr);
+}
+
+static uint32_t vmw_sou_bo_populate_clip(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *clip,
+ uint32_t fb_x, uint32_t fb_y)
+{
+ struct vmw_kms_sou_bo_blit *blit = cmd;
+
+ blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
+ blit->body.destScreenId = update->du->unit;
+ blit->body.srcOrigin.x = fb_x;
+ blit->body.srcOrigin.y = fb_y;
+ blit->body.destRect.left = clip->x1;
+ blit->body.destRect.top = clip->y1;
+ blit->body.destRect.right = clip->x2;
+ blit->body.destRect.bottom = clip->y2;
+
+ return sizeof(*blit);
+}
+
+static uint32_t vmw_stud_bo_post_clip(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *bb)
+{
+ return 0;
+}
+
+/**
+ * vmw_sou_plane_update_bo - Update display unit for bo backed fb.
+ * @dev_priv: Device private.
+ * @plane: Plane state.
+ * @old_state: Old plane state.
+ * @vfb: Framebuffer which is blitted to display unit.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ * The returned fence pointer may be NULL in which case the device
+ * has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_sou_plane_update_bo(struct vmw_private *dev_priv,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_state,
+ struct vmw_framebuffer *vfb,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_du_update_plane_buffer bo_update;
+
+ memset(&bo_update, 0, sizeof(struct vmw_du_update_plane_buffer));
+ bo_update.base.plane = plane;
+ bo_update.base.old_state = old_state;
+ bo_update.base.dev_priv = dev_priv;
+ bo_update.base.du = vmw_crtc_to_du(plane->state->crtc);
+ bo_update.base.vfb = vfb;
+ bo_update.base.out_fence = out_fence;
+ bo_update.base.mutex = NULL;
+ bo_update.base.cpu_blit = false;
+ bo_update.base.intr = true;
+
+ bo_update.base.calc_fifo_size = vmw_sou_bo_fifo_size;
+ bo_update.base.post_prepare = vmw_sou_bo_define_gmrfb;
+ bo_update.base.clip = vmw_sou_bo_populate_clip;
+ bo_update.base.post_clip = vmw_stud_bo_post_clip;
+
+ return vmw_du_helper_plane_update(&bo_update.base);
+}
+
+static uint32_t vmw_sou_surface_fifo_size(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ return sizeof(struct vmw_kms_sou_dirty_cmd) + sizeof(SVGASignedRect) *
+ num_hits;
+}
+
+static uint32_t vmw_sou_surface_post_prepare(struct vmw_du_update_plane *update,
+ void *cmd)
+{
+ struct vmw_du_update_plane_surface *srf_update;
+
+ srf_update = container_of(update, typeof(*srf_update), base);
+
+ /*
+ * SOU SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN is special in the sense that
+ * its bounding box is filled before iterating over all the clips. So
+ * store the FIFO start address and revisit to fill the details.
+ */
+ srf_update->cmd_start = cmd;
+
+ return 0;
+}
+
+static uint32_t vmw_sou_surface_pre_clip(struct vmw_du_update_plane *update,
+ void *cmd, uint32_t num_hits)
+{
+ struct vmw_kms_sou_dirty_cmd *blit = cmd;
+ struct vmw_framebuffer_surface *vfbs;
+
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ blit->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
+ blit->header.size = sizeof(blit->body) + sizeof(SVGASignedRect) *
+ num_hits;
+
+ blit->body.srcImage.sid = vfbs->surface->res.id;
+ blit->body.destScreenId = update->du->unit;
+
+ /* Update the source and destination bounding box later in post_clip */
+ blit->body.srcRect.left = 0;
+ blit->body.srcRect.top = 0;
+ blit->body.srcRect.right = 0;
+ blit->body.srcRect.bottom = 0;
+
+ blit->body.destRect.left = 0;
+ blit->body.destRect.top = 0;
+ blit->body.destRect.right = 0;
+ blit->body.destRect.bottom = 0;
+
+ return sizeof(*blit);
+}
+
+static uint32_t vmw_sou_surface_clip_rect(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *clip,
+ uint32_t src_x, uint32_t src_y)
+{
+ SVGASignedRect *rect = cmd;
+
+ /*
+ * rects are relative to dest bounding box rect on screen object, so
+ * translate to it later in post_clip
+ */
+ rect->left = clip->x1;
+ rect->top = clip->y1;
+ rect->right = clip->x2;
+ rect->bottom = clip->y2;
+
+ return sizeof(*rect);
+}
+
+static uint32_t vmw_sou_surface_post_clip(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *bb)
+{
+ struct vmw_du_update_plane_surface *srf_update;
+ struct drm_plane_state *state = update->plane->state;
+ struct drm_rect src_bb;
+ struct vmw_kms_sou_dirty_cmd *blit;
+ SVGASignedRect *rect;
+ uint32_t num_hits;
+ int translate_src_x;
+ int translate_src_y;
+ int i;
+
+ srf_update = container_of(update, typeof(*srf_update), base);
+
+ blit = srf_update->cmd_start;
+ rect = (SVGASignedRect *)&blit[1];
+
+ num_hits = (blit->header.size - sizeof(blit->body))/
+ sizeof(SVGASignedRect);
+
+ src_bb = *bb;
+
+ /* To translate bb back to fb src coord */
+ translate_src_x = (state->src_x >> 16) - state->crtc_x;
+ translate_src_y = (state->src_y >> 16) - state->crtc_y;
+
+ drm_rect_translate(&src_bb, translate_src_x, translate_src_y);
+
+ blit->body.srcRect.left = src_bb.x1;
+ blit->body.srcRect.top = src_bb.y1;
+ blit->body.srcRect.right = src_bb.x2;
+ blit->body.srcRect.bottom = src_bb.y2;
+
+ blit->body.destRect.left = bb->x1;
+ blit->body.destRect.top = bb->y1;
+ blit->body.destRect.right = bb->x2;
+ blit->body.destRect.bottom = bb->y2;
+
+ /* rects are relative to dest bb rect */
+ for (i = 0; i < num_hits; i++) {
+ rect->left -= bb->x1;
+ rect->top -= bb->y1;
+ rect->right -= bb->x1;
+ rect->bottom -= bb->y1;
+ rect++;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_sou_plane_update_surface - Update display unit for surface backed fb.
+ * @dev_priv: Device private.
+ * @plane: Plane state.
+ * @old_state: Old plane state.
+ * @vfb: Framebuffer which is blitted to display unit
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ * The returned fence pointer may be NULL in which case the device
+ * has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_sou_plane_update_surface(struct vmw_private *dev_priv,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_state,
+ struct vmw_framebuffer *vfb,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_du_update_plane_surface srf_update;
+
+ memset(&srf_update, 0, sizeof(struct vmw_du_update_plane_surface));
+ srf_update.base.plane = plane;
+ srf_update.base.old_state = old_state;
+ srf_update.base.dev_priv = dev_priv;
+ srf_update.base.du = vmw_crtc_to_du(plane->state->crtc);
+ srf_update.base.vfb = vfb;
+ srf_update.base.out_fence = out_fence;
+ srf_update.base.mutex = &dev_priv->cmdbuf_mutex;
+ srf_update.base.cpu_blit = false;
+ srf_update.base.intr = true;
+
+ srf_update.base.calc_fifo_size = vmw_sou_surface_fifo_size;
+ srf_update.base.post_prepare = vmw_sou_surface_post_prepare;
+ srf_update.base.pre_clip = vmw_sou_surface_pre_clip;
+ srf_update.base.clip = vmw_sou_surface_clip_rect;
+ srf_update.base.post_clip = vmw_sou_surface_post_clip;
+
+ return vmw_du_helper_plane_update(&srf_update.base);
+}
static void
vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
@@ -508,47 +736,28 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
struct vmw_fence_obj *fence = NULL;
int ret;
+ /* In case of device error, maintain consistent atomic state */
if (crtc && plane->state->fb) {
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_framebuffer *vfb =
vmw_framebuffer_to_vfb(plane->state->fb);
- struct drm_vmw_rect vclips;
-
- vclips.x = crtc->x;
- vclips.y = crtc->y;
- vclips.w = crtc->mode.hdisplay;
- vclips.h = crtc->mode.vdisplay;
if (vfb->bo)
- ret = vmw_kms_sou_do_bo_dirty(dev_priv, vfb, NULL,
- &vclips, 1, 1, true,
- &fence, crtc);
+ ret = vmw_sou_plane_update_bo(dev_priv, plane,
+ old_state, vfb, &fence);
else
- ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL,
- &vclips, NULL, 0, 0,
- 1, 1, &fence, crtc);
-
- /*
- * We cannot really fail this function, so if we do, then output
- * an error and maintain consistent atomic state.
- */
+ ret = vmw_sou_plane_update_surface(dev_priv, plane,
+ old_state, vfb,
+ &fence);
if (ret != 0)
DRM_ERROR("Failed to update screen.\n");
} else {
- /*
- * When disabling a plane, CRTC and FB should always be NULL
- * together, otherwise it's an error.
- * Here primary plane is being disable so should really blank
- * the screen object display unit, if not already done.
- */
+ /* Do nothing when fb and crtc is NULL (blank crtc) */
return;
}
+ /* For error case vblank event is send from vmw_du_crtc_atomic_flush */
event = crtc->state->event;
- /*
- * In case of failure and other cases, vblank event will be sent in
- * vmw_du_crtc_atomic_flush.
- */
if (event && fence) {
struct drm_file *file_priv = event->base.file_priv;
@@ -639,7 +848,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
primary = &sou->base.primary;
cursor = &sou->base.cursor;
- sou->base.active_implicit = false;
sou->base.pref_active = (unit == 0);
sou->base.pref_width = dev_priv->initial_width;
sou->base.pref_height = dev_priv->initial_height;
@@ -665,6 +873,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_plane_helper_add(primary, &vmw_sou_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary);
/* Initialize cursor plane */
vmw_du_plane_reset(cursor);
@@ -692,8 +901,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
drm_connector_helper_add(connector, &vmw_sou_connector_helper_funcs);
connector->status = vmw_du_connector_detect(connector, true);
- vmw_connector_state_to_vcs(connector->state)->is_implicit = false;
-
ret = drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -732,12 +939,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
- if (dev_priv->implicit_placement_property)
- drm_object_attach_property
- (&connector->base,
- dev_priv->implicit_placement_property,
- sou->base.is_implicit);
-
return 0;
err_free_unregister:
@@ -763,15 +964,11 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
}
ret = -ENOMEM;
- dev_priv->num_implicit = 0;
- dev_priv->implicit_fb = NULL;
ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
if (unlikely(ret != 0))
return ret;
- vmw_kms_create_implicit_placement_property(dev_priv, false);
-
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
vmw_sou_init(dev_priv, i);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index c3e435f444c1..096c2941a8e4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -30,7 +30,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-
+#include <drm/drm_damage_helper.h>
#define vmw_crtc_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.crtc)
@@ -92,6 +92,10 @@ struct vmw_stdu_surface_copy {
SVGA3dCmdSurfaceCopy body;
};
+struct vmw_stdu_update_gb_image {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBImage body;
+};
/**
* struct vmw_screen_target_display_unit
@@ -396,13 +400,8 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (!crtc->state->enable)
return;
- if (stdu->base.is_implicit) {
- x = crtc->x;
- y = crtc->y;
- } else {
- x = vmw_conn_state->gui_x;
- y = vmw_conn_state->gui_y;
- }
+ x = vmw_conn_state->gui_x;
+ y = vmw_conn_state->gui_y;
vmw_svga_enable(dev_priv);
ret = vmw_stdu_define_st(dev_priv, stdu, &crtc->mode, x, y);
@@ -417,27 +416,9 @@ static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc)
{
}
-
static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- struct drm_plane_state *plane_state = crtc->primary->state;
- struct vmw_private *dev_priv;
- struct vmw_screen_target_display_unit *stdu;
- struct vmw_framebuffer *vfb;
- struct drm_framebuffer *fb;
-
-
- stdu = vmw_crtc_to_stdu(crtc);
- dev_priv = vmw_priv(crtc->dev);
- fb = plane_state->fb;
-
- vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
-
- if (vfb)
- vmw_kms_add_active(dev_priv, &stdu->base, vfb);
- else
- vmw_kms_del_active(dev_priv, &stdu->base);
}
static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -472,49 +453,6 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
}
/**
- * vmw_stdu_crtc_page_flip - Binds a buffer to a screen target
- *
- * @crtc: CRTC to attach FB to
- * @fb: FB to attach
- * @event: Event to be posted. This event should've been alloced
- * using k[mz]alloc, and should've been completely initialized.
- * @page_flip_flags: Input flags.
- *
- * If the STDU uses the same display and content buffers, i.e. a true flip,
- * this function will replace the existing display buffer with the new content
- * buffer.
- *
- * If the STDU uses different display and content buffers, i.e. a blit, then
- * only the content buffer will be updated.
- *
- * RETURNS:
- * 0 on success, error code on failure
- */
-static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *new_fb,
- struct drm_pending_vblank_event *event,
- uint32_t flags,
- struct drm_modeset_acquire_ctx *ctx)
-
-{
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct vmw_screen_target_display_unit *stdu = vmw_crtc_to_stdu(crtc);
- int ret;
-
- if (!stdu->defined || !vmw_kms_crtc_flippable(dev_priv, crtc))
- return -EINVAL;
-
- ret = drm_atomic_helper_page_flip(crtc, new_fb, event, flags, ctx);
- if (ret) {
- DRM_ERROR("Page flip error %d.\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-
-/**
* vmw_stdu_bo_clip - Callback to encode a suface DMA command cliprect
*
* @dirty: The closure structure.
@@ -986,8 +924,8 @@ static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
.reset = vmw_du_crtc_reset,
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
- .set_config = vmw_kms_set_config,
- .page_flip = vmw_stdu_crtc_page_flip,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
};
@@ -1042,13 +980,10 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
- .set_property = vmw_du_connector_set_property,
.destroy = vmw_stdu_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
.atomic_destroy_state = vmw_du_connector_destroy_state,
- .atomic_set_property = vmw_du_connector_atomic_set_property,
- .atomic_get_property = vmw_du_connector_atomic_get_property,
};
@@ -1256,11 +1191,402 @@ out_srf_unref:
return ret;
}
+static uint32_t vmw_stdu_bo_fifo_size(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ return sizeof(struct vmw_stdu_dma) + sizeof(SVGA3dCopyBox) * num_hits +
+ sizeof(SVGA3dCmdSurfaceDMASuffix) +
+ sizeof(struct vmw_stdu_update);
+}
+
+static uint32_t vmw_stdu_bo_fifo_size_cpu(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ return sizeof(struct vmw_stdu_update_gb_image) +
+ sizeof(struct vmw_stdu_update);
+}
+
+static uint32_t vmw_stdu_bo_populate_dma(struct vmw_du_update_plane *update,
+ void *cmd, uint32_t num_hits)
+{
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_bo *vfbbo;
+ struct vmw_stdu_dma *cmd_dma = cmd;
+
+ stdu = container_of(update->du, typeof(*stdu), base);
+ vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
+
+ cmd_dma->header.id = SVGA_3D_CMD_SURFACE_DMA;
+ cmd_dma->header.size = sizeof(cmd_dma->body) +
+ sizeof(struct SVGA3dCopyBox) * num_hits +
+ sizeof(SVGA3dCmdSurfaceDMASuffix);
+ vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &cmd_dma->body.guest.ptr);
+ cmd_dma->body.guest.pitch = update->vfb->base.pitches[0];
+ cmd_dma->body.host.sid = stdu->display_srf->res.id;
+ cmd_dma->body.host.face = 0;
+ cmd_dma->body.host.mipmap = 0;
+ cmd_dma->body.transfer = SVGA3D_WRITE_HOST_VRAM;
+
+ return sizeof(*cmd_dma);
+}
+
+static uint32_t vmw_stdu_bo_populate_clip(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *clip,
+ uint32_t fb_x, uint32_t fb_y)
+{
+ struct SVGA3dCopyBox *box = cmd;
+
+ box->srcx = fb_x;
+ box->srcy = fb_y;
+ box->srcz = 0;
+ box->x = clip->x1;
+ box->y = clip->y1;
+ box->z = 0;
+ box->w = drm_rect_width(clip);
+ box->h = drm_rect_height(clip);
+ box->d = 1;
+
+ return sizeof(*box);
+}
+
+static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *bb)
+{
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_bo *vfbbo;
+ SVGA3dCmdSurfaceDMASuffix *suffix = cmd;
+
+ stdu = container_of(update->du, typeof(*stdu), base);
+ vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
+
+ suffix->suffixSize = sizeof(*suffix);
+ suffix->maximumOffset = vfbbo->buffer->base.num_pages * PAGE_SIZE;
+
+ vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2,
+ bb->y1, bb->y2);
+
+ return sizeof(*suffix) + sizeof(struct vmw_stdu_update);
+}
+
+static uint32_t vmw_stdu_bo_pre_clip_cpu(struct vmw_du_update_plane *update,
+ void *cmd, uint32_t num_hits)
+{
+ struct vmw_du_update_plane_buffer *bo_update =
+ container_of(update, typeof(*bo_update), base);
+
+ bo_update->fb_left = INT_MAX;
+ bo_update->fb_top = INT_MAX;
+
+ return 0;
+}
+
+static uint32_t vmw_stdu_bo_clip_cpu(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *clip,
+ uint32_t fb_x, uint32_t fb_y)
+{
+ struct vmw_du_update_plane_buffer *bo_update =
+ container_of(update, typeof(*bo_update), base);
+
+ bo_update->fb_left = min_t(int, bo_update->fb_left, fb_x);
+ bo_update->fb_top = min_t(int, bo_update->fb_top, fb_y);
+
+ return 0;
+}
+
+static uint32_t
+vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *bb)
+{
+ struct vmw_du_update_plane_buffer *bo_update;
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_bo *vfbbo;
+ struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(0);
+ struct vmw_stdu_update_gb_image *cmd_img = cmd;
+ struct vmw_stdu_update *cmd_update;
+ struct ttm_buffer_object *src_bo, *dst_bo;
+ u32 src_offset, dst_offset;
+ s32 src_pitch, dst_pitch;
+ s32 width, height;
+
+ bo_update = container_of(update, typeof(*bo_update), base);
+ stdu = container_of(update->du, typeof(*stdu), base);
+ vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
+
+ width = bb->x2 - bb->x1;
+ height = bb->y2 - bb->y1;
+
+ diff.cpp = stdu->cpp;
+
+ dst_bo = &stdu->display_srf->res.backup->base;
+ dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
+ dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp;
+
+ src_bo = &vfbbo->buffer->base;
+ src_pitch = update->vfb->base.pitches[0];
+ src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left *
+ stdu->cpp;
+
+ (void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch, src_bo,
+ src_offset, src_pitch, width * stdu->cpp, height,
+ &diff);
+
+ if (drm_rect_visible(&diff.rect)) {
+ SVGA3dBox *box = &cmd_img->body.box;
+
+ cmd_img->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+ cmd_img->header.size = sizeof(cmd_img->body);
+ cmd_img->body.image.sid = stdu->display_srf->res.id;
+ cmd_img->body.image.face = 0;
+ cmd_img->body.image.mipmap = 0;
+
+ box->x = diff.rect.x1;
+ box->y = diff.rect.y1;
+ box->z = 0;
+ box->w = drm_rect_width(&diff.rect);
+ box->h = drm_rect_height(&diff.rect);
+ box->d = 1;
+
+ cmd_update = (struct vmw_stdu_update *)&cmd_img[1];
+ vmw_stdu_populate_update(cmd_update, stdu->base.unit,
+ diff.rect.x1, diff.rect.x2,
+ diff.rect.y1, diff.rect.y2);
+
+ return sizeof(*cmd_img) + sizeof(*cmd_update);
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_stdu_plane_update_bo - Update display unit for bo backed fb.
+ * @dev_priv: device private.
+ * @plane: plane state.
+ * @old_state: old plane state.
+ * @vfb: framebuffer which is blitted to display unit.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ * The returned fence pointer may be NULL in which case the device
+ * has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_stdu_plane_update_bo(struct vmw_private *dev_priv,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_state,
+ struct vmw_framebuffer *vfb,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_du_update_plane_buffer bo_update;
+
+ memset(&bo_update, 0, sizeof(struct vmw_du_update_plane_buffer));
+ bo_update.base.plane = plane;
+ bo_update.base.old_state = old_state;
+ bo_update.base.dev_priv = dev_priv;
+ bo_update.base.du = vmw_crtc_to_du(plane->state->crtc);
+ bo_update.base.vfb = vfb;
+ bo_update.base.out_fence = out_fence;
+ bo_update.base.mutex = NULL;
+ bo_update.base.cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
+ bo_update.base.intr = false;
+
+ /*
+ * VM without 3D support don't have surface DMA command and framebuffer
+ * should be moved out of VRAM.
+ */
+ if (bo_update.base.cpu_blit) {
+ bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size_cpu;
+ bo_update.base.pre_clip = vmw_stdu_bo_pre_clip_cpu;
+ bo_update.base.clip = vmw_stdu_bo_clip_cpu;
+ bo_update.base.post_clip = vmw_stdu_bo_populate_update_cpu;
+ } else {
+ bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size;
+ bo_update.base.pre_clip = vmw_stdu_bo_populate_dma;
+ bo_update.base.clip = vmw_stdu_bo_populate_clip;
+ bo_update.base.post_clip = vmw_stdu_bo_populate_update;
+ }
+
+ return vmw_du_helper_plane_update(&bo_update.base);
+}
+
+static uint32_t
+vmw_stdu_surface_fifo_size_same_display(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ struct vmw_framebuffer_surface *vfbs;
+ uint32_t size = 0;
+
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ if (vfbs->is_bo_proxy)
+ size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
+
+ size += sizeof(struct vmw_stdu_update);
+
+ return size;
+}
+
+static uint32_t vmw_stdu_surface_fifo_size(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ struct vmw_framebuffer_surface *vfbs;
+ uint32_t size = 0;
+
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ if (vfbs->is_bo_proxy)
+ size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
+
+ size += sizeof(struct vmw_stdu_surface_copy) + sizeof(SVGA3dCopyBox) *
+ num_hits + sizeof(struct vmw_stdu_update);
+
+ return size;
+}
+
+static uint32_t
+vmw_stdu_surface_update_proxy(struct vmw_du_update_plane *update, void *cmd)
+{
+ struct vmw_framebuffer_surface *vfbs;
+ struct drm_plane_state *state = update->plane->state;
+ struct drm_plane_state *old_state = update->old_state;
+ struct vmw_stdu_update_gb_image *cmd_update = cmd;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ uint32_t copy_size = 0;
+
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ /*
+ * proxy surface is special where a buffer object type fb is wrapped
+ * in a surface and need an update gb image command to sync with device.
+ */
+ drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ SVGA3dBox *box = &cmd_update->body.box;
+
+ cmd_update->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+ cmd_update->header.size = sizeof(cmd_update->body);
+ cmd_update->body.image.sid = vfbs->surface->res.id;
+ cmd_update->body.image.face = 0;
+ cmd_update->body.image.mipmap = 0;
+
+ box->x = clip.x1;
+ box->y = clip.y1;
+ box->z = 0;
+ box->w = drm_rect_width(&clip);
+ box->h = drm_rect_height(&clip);
+ box->d = 1;
+
+ copy_size += sizeof(*cmd_update);
+ cmd_update++;
+ }
+
+ return copy_size;
+}
+
+static uint32_t
+vmw_stdu_surface_populate_copy(struct vmw_du_update_plane *update, void *cmd,
+ uint32_t num_hits)
+{
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_surface *vfbs;
+ struct vmw_stdu_surface_copy *cmd_copy = cmd;
+
+ stdu = container_of(update->du, typeof(*stdu), base);
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ cmd_copy->header.id = SVGA_3D_CMD_SURFACE_COPY;
+ cmd_copy->header.size = sizeof(cmd_copy->body) + sizeof(SVGA3dCopyBox) *
+ num_hits;
+ cmd_copy->body.src.sid = vfbs->surface->res.id;
+ cmd_copy->body.dest.sid = stdu->display_srf->res.id;
+
+ return sizeof(*cmd_copy);
+}
+
+static uint32_t
+vmw_stdu_surface_populate_clip(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *clip, uint32_t fb_x,
+ uint32_t fb_y)
+{
+ struct SVGA3dCopyBox *box = cmd;
+
+ box->srcx = fb_x;
+ box->srcy = fb_y;
+ box->srcz = 0;
+ box->x = clip->x1;
+ box->y = clip->y1;
+ box->z = 0;
+ box->w = drm_rect_width(clip);
+ box->h = drm_rect_height(clip);
+ box->d = 1;
+
+ return sizeof(*box);
+}
+
+static uint32_t
+vmw_stdu_surface_populate_update(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *bb)
+{
+ vmw_stdu_populate_update(cmd, update->du->unit, bb->x1, bb->x2, bb->y1,
+ bb->y2);
+ return sizeof(struct vmw_stdu_update);
+}
/**
- * vmw_stdu_primary_plane_atomic_update - formally switches STDU to new plane
+ * vmw_stdu_plane_update_surface - Update display unit for surface backed fb
+ * @dev_priv: Device private
+ * @plane: Plane state
+ * @old_state: Old plane state
+ * @vfb: Framebuffer which is blitted to display unit
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ * The returned fence pointer may be NULL in which case the device
+ * has already synchronized.
*
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_stdu_plane_update_surface(struct vmw_private *dev_priv,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_state,
+ struct vmw_framebuffer *vfb,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_du_update_plane srf_update;
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_surface *vfbs;
+
+ stdu = vmw_crtc_to_stdu(plane->state->crtc);
+ vfbs = container_of(vfb, typeof(*vfbs), base);
+
+ memset(&srf_update, 0, sizeof(struct vmw_du_update_plane));
+ srf_update.plane = plane;
+ srf_update.old_state = old_state;
+ srf_update.dev_priv = dev_priv;
+ srf_update.du = vmw_crtc_to_du(plane->state->crtc);
+ srf_update.vfb = vfb;
+ srf_update.out_fence = out_fence;
+ srf_update.mutex = &dev_priv->cmdbuf_mutex;
+ srf_update.cpu_blit = false;
+ srf_update.intr = true;
+
+ if (vfbs->is_bo_proxy)
+ srf_update.post_prepare = vmw_stdu_surface_update_proxy;
+
+ if (vfbs->surface->res.id != stdu->display_srf->res.id) {
+ srf_update.calc_fifo_size = vmw_stdu_surface_fifo_size;
+ srf_update.pre_clip = vmw_stdu_surface_populate_copy;
+ srf_update.clip = vmw_stdu_surface_populate_clip;
+ } else {
+ srf_update.calc_fifo_size =
+ vmw_stdu_surface_fifo_size_same_display;
+ }
+
+ srf_update.post_clip = vmw_stdu_surface_populate_update;
+
+ return vmw_du_helper_plane_update(&srf_update);
+}
+
+/**
+ * vmw_stdu_primary_plane_atomic_update - formally switches STDU to new plane
* @plane: display plane
* @old_state: Only used to get crtc info
*
@@ -1277,17 +1603,14 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
struct drm_crtc *crtc = plane->state->crtc;
struct vmw_screen_target_display_unit *stdu;
struct drm_pending_vblank_event *event;
+ struct vmw_fence_obj *fence = NULL;
struct vmw_private *dev_priv;
int ret;
- /*
- * We cannot really fail this function, so if we do, then output an
- * error and maintain consistent atomic state.
- */
+ /* If case of device error, maintain consistent atomic state */
if (crtc && plane->state->fb) {
struct vmw_framebuffer *vfb =
vmw_framebuffer_to_vfb(plane->state->fb);
- struct drm_vmw_rect vclips;
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
@@ -1295,23 +1618,17 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
stdu->content_fb_type = vps->content_fb_type;
stdu->cpp = vps->cpp;
- vclips.x = crtc->x;
- vclips.y = crtc->y;
- vclips.w = crtc->mode.hdisplay;
- vclips.h = crtc->mode.vdisplay;
-
ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
if (ret)
DRM_ERROR("Failed to bind surface to STDU.\n");
if (vfb->bo)
- ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL,
- &vclips, 1, 1, true, false,
- crtc);
+ ret = vmw_stdu_plane_update_bo(dev_priv, plane,
+ old_state, vfb, &fence);
else
- ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL,
- &vclips, NULL, 0, 0,
- 1, 1, NULL, crtc);
+ ret = vmw_stdu_plane_update_surface(dev_priv, plane,
+ old_state, vfb,
+ &fence);
if (ret)
DRM_ERROR("Failed to update STDU.\n");
} else {
@@ -1319,12 +1636,7 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
- /*
- * When disabling a plane, CRTC and FB should always be NULL
- * together, otherwise it's an error.
- * Here primary plane is being disable so blank the screen
- * target display unit, if not already done.
- */
+ /* Blank STDU when fb and crtc are NULL */
if (!stdu->defined)
return;
@@ -1339,36 +1651,25 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
return;
}
+ /* In case of error, vblank event is send in vmw_du_crtc_atomic_flush */
event = crtc->state->event;
- /*
- * In case of failure and other cases, vblank event will be sent in
- * vmw_du_crtc_atomic_flush.
- */
- if (event && (ret == 0)) {
- struct vmw_fence_obj *fence = NULL;
+ if (event && fence) {
struct drm_file *file_priv = event->base.file_priv;
- vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
-
- /*
- * If fence is NULL, then already sync.
- */
- if (fence) {
- ret = vmw_event_fence_action_queue(
- file_priv, fence, &event->base,
- &event->event.vbl.tv_sec,
- &event->event.vbl.tv_usec,
- true);
- if (ret)
- DRM_ERROR("Failed to queue event on fence.\n");
- else
- crtc->state->event = NULL;
-
- vmw_fence_obj_unreference(&fence);
- }
- } else {
- (void) vmw_fifo_flush(dev_priv, false);
+ ret = vmw_event_fence_action_queue(file_priv,
+ fence,
+ &event->base,
+ &event->event.vbl.tv_sec,
+ &event->event.vbl.tv_usec,
+ true);
+ if (ret)
+ DRM_ERROR("Failed to queue event on fence.\n");
+ else
+ crtc->state->event = NULL;
}
+
+ if (fence)
+ vmw_fence_obj_unreference(&fence);
}
@@ -1456,11 +1757,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
stdu->base.pref_active = (unit == 0);
stdu->base.pref_width = dev_priv->initial_width;
stdu->base.pref_height = dev_priv->initial_height;
-
- /*
- * Remove this after enabling atomic because property values can
- * only exist in a state object
- */
stdu->base.is_implicit = false;
/* Initialize primary plane */
@@ -1477,6 +1773,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_plane_helper_add(primary, &vmw_stdu_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary);
/* Initialize cursor plane */
vmw_du_plane_reset(cursor);
@@ -1505,7 +1802,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
drm_connector_helper_add(connector, &vmw_stdu_connector_helper_funcs);
connector->status = vmw_du_connector_detect(connector, false);
- vmw_connector_state_to_vcs(connector->state)->is_implicit = false;
ret = drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -1543,11 +1839,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
- if (dev_priv->implicit_placement_property)
- drm_object_attach_property
- (&connector->base,
- dev_priv->implicit_placement_property,
- stdu->base.is_implicit);
return 0;
err_free_unregister:
@@ -1616,8 +1907,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
dev_priv->active_display_unit = vmw_du_screen_target;
- vmw_kms_create_implicit_placement_property(dev_priv, false);
-
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
ret = vmw_stdu_init(dev_priv, i);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 154eb09aa91e..e6d75e377dd8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -42,3 +42,39 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
dev_priv = vmw_priv(file_priv->minor->dev);
return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
}
+
+/* struct vmw_validation_mem callback */
+static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
+{
+ static struct ttm_operation_ctx ctx = {.interruptible = false,
+ .no_wait_gpu = false};
+ struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
+
+ return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
+}
+
+/* struct vmw_validation_mem callback */
+static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
+{
+ struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
+
+ return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_validation_mem_init_ttm - Interface the validation memory tracker
+ * to ttm.
+ * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
+ * rather than a struct vmw_validation_mem is to make sure assumption in the
+ * callbacks that struct vmw_private derives from struct vmw_validation_mem
+ * holds true.
+ * @gran: The recommended allocation granularity
+ */
+void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
+{
+ struct vmw_validation_mem *vvm = &dev_priv->vvm;
+
+ vvm->reserve_mem = vmw_vmt_reserve;
+ vvm->unreserve_mem = vmw_vmt_unreserve;
+ vvm->gran = gran;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index 184025fa938e..b3f547fc5d3d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -104,11 +104,25 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
return NULL;
if (ctx->mem_size_left < size) {
- struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ struct page *page;
+ if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
+ int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
+
+ if (ret)
+ return NULL;
+
+ ctx->vm_size_left += ctx->vm->gran;
+ ctx->total_mem += ctx->vm->gran;
+ }
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return NULL;
+ if (ctx->vm)
+ ctx->vm_size_left -= PAGE_SIZE;
+
list_add_tail(&page->lru, &ctx->page_list);
ctx->page_address = page_address(page);
ctx->mem_size_left = PAGE_SIZE;
@@ -138,6 +152,11 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
}
ctx->mem_size_left = 0;
+ if (ctx->vm && ctx->total_mem) {
+ ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
+ ctx->total_mem = 0;
+ ctx->vm_size_left = 0;
+ }
}
/**
@@ -266,7 +285,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
if (!val_buf->bo)
return -ESRCH;
- val_buf->shared = false;
+ val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &ctx->bo_list);
bo_node->as_mob = as_mob;
bo_node->cpu_blit = cpu_blit;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index b57e3292c386..3b396fea40d7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -34,6 +34,21 @@
#include <drm/ttm/ttm_execbuf_util.h>
/**
+ * struct vmw_validation_mem - Custom interface to provide memory reservations
+ * for the validation code.
+ * @reserve_mem: Callback to reserve memory
+ * @unreserve_mem: Callback to unreserve memory
+ * @gran: Reservation granularity. Contains a hint how much memory should
+ * be reserved in each call to @reserve_mem(). A slow implementation may want
+ * reservation to be done in large batches.
+ */
+struct vmw_validation_mem {
+ int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
+ void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
+ size_t gran;
+};
+
+/**
* struct vmw_validation_context - Per command submission validation context
* @ht: Hash table used to find resource- or buffer object duplicates
* @resource_list: List head for resource validation metadata
@@ -47,6 +62,10 @@
* buffer objects
* @mem_size_left: Free memory left in the last page in @page_list
* @page_address: Kernel virtual address of the last page in @page_list
+ * @vm: A pointer to the memory reservation interface or NULL if no
+ * memory reservation is needed.
+ * @vm_size_left: Amount of reserved memory that so far has not been allocated.
+ * @total_mem: Amount of reserved memory.
*/
struct vmw_validation_context {
struct drm_open_hash *ht;
@@ -59,6 +78,9 @@ struct vmw_validation_context {
unsigned int merge_dups;
unsigned int mem_size_left;
u8 *page_address;
+ struct vmw_validation_mem *vm;
+ size_t vm_size_left;
+ size_t total_mem;
};
struct vmw_buffer_object;
@@ -102,6 +124,21 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
}
/**
+ * vmw_validation_set_val_mem - Register a validation mem object for
+ * validation memory reservation
+ * @ctx: The validation context
+ * @vm: Pointer to a struct vmw_validation_mem
+ *
+ * Must be set before the first attempt to allocate validation memory.
+ */
+static inline void
+vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
+ struct vmw_validation_mem *vm)
+{
+ ctx->vm = vm;
+}
+
+/**
* vmw_validation_set_ht - Register a hash table for duplicate finding
* @ctx: The validation context
* @ht: Pointer to a hash table to use for duplicate finding
diff --git a/drivers/gpu/drm/xen/Kconfig b/drivers/gpu/drm/xen/Kconfig
index 4cca160782ab..f969d486855d 100644
--- a/drivers/gpu/drm/xen/Kconfig
+++ b/drivers/gpu/drm/xen/Kconfig
@@ -12,6 +12,7 @@ config DRM_XEN_FRONTEND
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select XEN_XENBUS_FRONTEND
+ select XEN_FRONT_PGDIR_SHBUF
help
Choose this option if you want to enable a para-virtualized
frontend DRM/KMS driver for Xen guest OSes.
diff --git a/drivers/gpu/drm/xen/Makefile b/drivers/gpu/drm/xen/Makefile
index 712afff5ffc3..825905f67faa 100644
--- a/drivers/gpu/drm/xen/Makefile
+++ b/drivers/gpu/drm/xen/Makefile
@@ -4,7 +4,6 @@ drm_xen_front-objs := xen_drm_front.o \
xen_drm_front_kms.o \
xen_drm_front_conn.o \
xen_drm_front_evtchnl.o \
- xen_drm_front_shbuf.o \
xen_drm_front_cfg.o \
xen_drm_front_gem.o
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 6b6d5ab82ec3..4d3d36fc3a5d 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -19,6 +19,7 @@
#include <xen/xen.h>
#include <xen/xenbus.h>
+#include <xen/xen-front-pgdir-shbuf.h>
#include <xen/interface/io/displif.h>
#include "xen_drm_front.h"
@@ -26,28 +27,20 @@
#include "xen_drm_front_evtchnl.h"
#include "xen_drm_front_gem.h"
#include "xen_drm_front_kms.h"
-#include "xen_drm_front_shbuf.h"
struct xen_drm_front_dbuf {
struct list_head list;
u64 dbuf_cookie;
u64 fb_cookie;
- struct xen_drm_front_shbuf *shbuf;
+
+ struct xen_front_pgdir_shbuf shbuf;
};
-static int dbuf_add_to_list(struct xen_drm_front_info *front_info,
- struct xen_drm_front_shbuf *shbuf, u64 dbuf_cookie)
+static void dbuf_add_to_list(struct xen_drm_front_info *front_info,
+ struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie)
{
- struct xen_drm_front_dbuf *dbuf;
-
- dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
- if (!dbuf)
- return -ENOMEM;
-
dbuf->dbuf_cookie = dbuf_cookie;
- dbuf->shbuf = shbuf;
list_add(&dbuf->list, &front_info->dbuf_list);
- return 0;
}
static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
@@ -62,15 +55,6 @@ static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
return NULL;
}
-static void dbuf_flush_fb(struct list_head *dbuf_list, u64 fb_cookie)
-{
- struct xen_drm_front_dbuf *buf, *q;
-
- list_for_each_entry_safe(buf, q, dbuf_list, list)
- if (buf->fb_cookie == fb_cookie)
- xen_drm_front_shbuf_flush(buf->shbuf);
-}
-
static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
{
struct xen_drm_front_dbuf *buf, *q;
@@ -78,8 +62,8 @@ static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
list_for_each_entry_safe(buf, q, dbuf_list, list)
if (buf->dbuf_cookie == dbuf_cookie) {
list_del(&buf->list);
- xen_drm_front_shbuf_unmap(buf->shbuf);
- xen_drm_front_shbuf_free(buf->shbuf);
+ xen_front_pgdir_shbuf_unmap(&buf->shbuf);
+ xen_front_pgdir_shbuf_free(&buf->shbuf);
kfree(buf);
break;
}
@@ -91,8 +75,8 @@ static void dbuf_free_all(struct list_head *dbuf_list)
list_for_each_entry_safe(buf, q, dbuf_list, list) {
list_del(&buf->list);
- xen_drm_front_shbuf_unmap(buf->shbuf);
- xen_drm_front_shbuf_free(buf->shbuf);
+ xen_front_pgdir_shbuf_unmap(&buf->shbuf);
+ xen_front_pgdir_shbuf_free(&buf->shbuf);
kfree(buf);
}
}
@@ -171,9 +155,9 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
u32 bpp, u64 size, struct page **pages)
{
struct xen_drm_front_evtchnl *evtchnl;
- struct xen_drm_front_shbuf *shbuf;
+ struct xen_drm_front_dbuf *dbuf;
struct xendispl_req *req;
- struct xen_drm_front_shbuf_cfg buf_cfg;
+ struct xen_front_pgdir_shbuf_cfg buf_cfg;
unsigned long flags;
int ret;
@@ -181,28 +165,29 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
if (unlikely(!evtchnl))
return -EIO;
+ dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
+ if (!dbuf)
+ return -ENOMEM;
+
+ dbuf_add_to_list(front_info, dbuf, dbuf_cookie);
+
memset(&buf_cfg, 0, sizeof(buf_cfg));
buf_cfg.xb_dev = front_info->xb_dev;
+ buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
buf_cfg.pages = pages;
- buf_cfg.size = size;
+ buf_cfg.pgdir = &dbuf->shbuf;
buf_cfg.be_alloc = front_info->cfg.be_alloc;
- shbuf = xen_drm_front_shbuf_alloc(&buf_cfg);
- if (IS_ERR(shbuf))
- return PTR_ERR(shbuf);
-
- ret = dbuf_add_to_list(front_info, shbuf, dbuf_cookie);
- if (ret < 0) {
- xen_drm_front_shbuf_free(shbuf);
- return ret;
- }
+ ret = xen_front_pgdir_shbuf_alloc(&buf_cfg);
+ if (ret < 0)
+ goto fail_shbuf_alloc;
mutex_lock(&evtchnl->u.req.req_io_lock);
spin_lock_irqsave(&front_info->io_lock, flags);
req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
req->op.dbuf_create.gref_directory =
- xen_drm_front_shbuf_get_dir_start(shbuf);
+ xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf);
req->op.dbuf_create.buffer_sz = size;
req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
req->op.dbuf_create.width = width;
@@ -221,7 +206,7 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
if (ret < 0)
goto fail;
- ret = xen_drm_front_shbuf_map(shbuf);
+ ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf);
if (ret < 0)
goto fail;
@@ -230,6 +215,7 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
fail:
mutex_unlock(&evtchnl->u.req.req_io_lock);
+fail_shbuf_alloc:
dbuf_free(&front_info->dbuf_list, dbuf_cookie);
return ret;
}
@@ -358,7 +344,6 @@ int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
if (unlikely(conn_idx >= front_info->num_evt_pairs))
return -EINVAL;
- dbuf_flush_fb(&front_info->dbuf_list, fb_cookie);
evtchnl = &front_info->evt_pairs[conn_idx].req;
mutex_lock(&evtchnl->u.req.req_io_lock);
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 47ff019d3aef..28bc501af450 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -22,7 +22,6 @@
#include <xen/balloon.h>
#include "xen_drm_front.h"
-#include "xen_drm_front_shbuf.h"
struct xen_gem_object {
struct drm_gem_object base;
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
deleted file mode 100644
index d333b67cc1a0..000000000000
--- a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
+++ /dev/null
@@ -1,414 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-
-/*
- * Xen para-virtual DRM device
- *
- * Copyright (C) 2016-2018 EPAM Systems Inc.
- *
- * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
- */
-
-#include <drm/drmP.h>
-
-#if defined(CONFIG_X86)
-#include <drm/drm_cache.h>
-#endif
-#include <linux/errno.h>
-#include <linux/mm.h>
-
-#include <asm/xen/hypervisor.h>
-#include <xen/balloon.h>
-#include <xen/xen.h>
-#include <xen/xenbus.h>
-#include <xen/interface/io/ring.h>
-#include <xen/interface/io/displif.h>
-
-#include "xen_drm_front.h"
-#include "xen_drm_front_shbuf.h"
-
-struct xen_drm_front_shbuf_ops {
- /*
- * Calculate number of grefs required to handle this buffer,
- * e.g. if grefs are required for page directory only or the buffer
- * pages as well.
- */
- void (*calc_num_grefs)(struct xen_drm_front_shbuf *buf);
- /* Fill page directory according to para-virtual display protocol. */
- void (*fill_page_dir)(struct xen_drm_front_shbuf *buf);
- /* Claim grant references for the pages of the buffer. */
- int (*grant_refs_for_buffer)(struct xen_drm_front_shbuf *buf,
- grant_ref_t *priv_gref_head, int gref_idx);
- /* Map grant references of the buffer. */
- int (*map)(struct xen_drm_front_shbuf *buf);
- /* Unmap grant references of the buffer. */
- int (*unmap)(struct xen_drm_front_shbuf *buf);
-};
-
-grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf)
-{
- if (!buf->grefs)
- return GRANT_INVALID_REF;
-
- return buf->grefs[0];
-}
-
-int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf)
-{
- if (buf->ops->map)
- return buf->ops->map(buf);
-
- /* no need to map own grant references */
- return 0;
-}
-
-int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf)
-{
- if (buf->ops->unmap)
- return buf->ops->unmap(buf);
-
- /* no need to unmap own grant references */
- return 0;
-}
-
-void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf)
-{
-#if defined(CONFIG_X86)
- drm_clflush_pages(buf->pages, buf->num_pages);
-#endif
-}
-
-void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf)
-{
- if (buf->grefs) {
- int i;
-
- for (i = 0; i < buf->num_grefs; i++)
- if (buf->grefs[i] != GRANT_INVALID_REF)
- gnttab_end_foreign_access(buf->grefs[i],
- 0, 0UL);
- }
- kfree(buf->grefs);
- kfree(buf->directory);
- kfree(buf);
-}
-
-/*
- * number of grefs a page can hold with respect to the
- * struct xendispl_page_directory header
- */
-#define XEN_DRM_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
- offsetof(struct xendispl_page_directory, gref)) / \
- sizeof(grant_ref_t))
-
-static int get_num_pages_dir(struct xen_drm_front_shbuf *buf)
-{
- /* number of pages the page directory consumes itself */
- return DIV_ROUND_UP(buf->num_pages, XEN_DRM_NUM_GREFS_PER_PAGE);
-}
-
-static void backend_calc_num_grefs(struct xen_drm_front_shbuf *buf)
-{
- /* only for pages the page directory consumes itself */
- buf->num_grefs = get_num_pages_dir(buf);
-}
-
-static void guest_calc_num_grefs(struct xen_drm_front_shbuf *buf)
-{
- /*
- * number of pages the page directory consumes itself
- * plus grefs for the buffer pages
- */
- buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
-}
-
-#define xen_page_to_vaddr(page) \
- ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
-
-static int backend_unmap(struct xen_drm_front_shbuf *buf)
-{
- struct gnttab_unmap_grant_ref *unmap_ops;
- int i, ret;
-
- if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
- return 0;
-
- unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
- GFP_KERNEL);
- if (!unmap_ops) {
- DRM_ERROR("Failed to get memory while unmapping\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < buf->num_pages; i++) {
- phys_addr_t addr;
-
- addr = xen_page_to_vaddr(buf->pages[i]);
- gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
- buf->backend_map_handles[i]);
- }
-
- ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
- buf->num_pages);
-
- for (i = 0; i < buf->num_pages; i++) {
- if (unlikely(unmap_ops[i].status != GNTST_okay))
- DRM_ERROR("Failed to unmap page %d: %d\n",
- i, unmap_ops[i].status);
- }
-
- if (ret)
- DRM_ERROR("Failed to unmap grant references, ret %d", ret);
-
- kfree(unmap_ops);
- kfree(buf->backend_map_handles);
- buf->backend_map_handles = NULL;
- return ret;
-}
-
-static int backend_map(struct xen_drm_front_shbuf *buf)
-{
- struct gnttab_map_grant_ref *map_ops = NULL;
- unsigned char *ptr;
- int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
-
- map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
- if (!map_ops)
- return -ENOMEM;
-
- buf->backend_map_handles = kcalloc(buf->num_pages,
- sizeof(*buf->backend_map_handles),
- GFP_KERNEL);
- if (!buf->backend_map_handles) {
- kfree(map_ops);
- return -ENOMEM;
- }
-
- /*
- * read page directory to get grefs from the backend: for external
- * buffer we only allocate buf->grefs for the page directory,
- * so buf->num_grefs has number of pages in the page directory itself
- */
- ptr = buf->directory;
- grefs_left = buf->num_pages;
- cur_page = 0;
- for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
- struct xendispl_page_directory *page_dir =
- (struct xendispl_page_directory *)ptr;
- int to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
-
- if (to_copy > grefs_left)
- to_copy = grefs_left;
-
- for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
- phys_addr_t addr;
-
- addr = xen_page_to_vaddr(buf->pages[cur_page]);
- gnttab_set_map_op(&map_ops[cur_page], addr,
- GNTMAP_host_map,
- page_dir->gref[cur_gref],
- buf->xb_dev->otherend_id);
- cur_page++;
- }
-
- grefs_left -= to_copy;
- ptr += PAGE_SIZE;
- }
- ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
-
- /* save handles even if error, so we can unmap */
- for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
- buf->backend_map_handles[cur_page] = map_ops[cur_page].handle;
- if (unlikely(map_ops[cur_page].status != GNTST_okay))
- DRM_ERROR("Failed to map page %d: %d\n",
- cur_page, map_ops[cur_page].status);
- }
-
- if (ret) {
- DRM_ERROR("Failed to map grant references, ret %d", ret);
- backend_unmap(buf);
- }
-
- kfree(map_ops);
- return ret;
-}
-
-static void backend_fill_page_dir(struct xen_drm_front_shbuf *buf)
-{
- struct xendispl_page_directory *page_dir;
- unsigned char *ptr;
- int i, num_pages_dir;
-
- ptr = buf->directory;
- num_pages_dir = get_num_pages_dir(buf);
-
- /* fill only grefs for the page directory itself */
- for (i = 0; i < num_pages_dir - 1; i++) {
- page_dir = (struct xendispl_page_directory *)ptr;
-
- page_dir->gref_dir_next_page = buf->grefs[i + 1];
- ptr += PAGE_SIZE;
- }
- /* last page must say there is no more pages */
- page_dir = (struct xendispl_page_directory *)ptr;
- page_dir->gref_dir_next_page = GRANT_INVALID_REF;
-}
-
-static void guest_fill_page_dir(struct xen_drm_front_shbuf *buf)
-{
- unsigned char *ptr;
- int cur_gref, grefs_left, to_copy, i, num_pages_dir;
-
- ptr = buf->directory;
- num_pages_dir = get_num_pages_dir(buf);
-
- /*
- * while copying, skip grefs at start, they are for pages
- * granted for the page directory itself
- */
- cur_gref = num_pages_dir;
- grefs_left = buf->num_pages;
- for (i = 0; i < num_pages_dir; i++) {
- struct xendispl_page_directory *page_dir =
- (struct xendispl_page_directory *)ptr;
-
- if (grefs_left <= XEN_DRM_NUM_GREFS_PER_PAGE) {
- to_copy = grefs_left;
- page_dir->gref_dir_next_page = GRANT_INVALID_REF;
- } else {
- to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
- page_dir->gref_dir_next_page = buf->grefs[i + 1];
- }
- memcpy(&page_dir->gref, &buf->grefs[cur_gref],
- to_copy * sizeof(grant_ref_t));
- ptr += PAGE_SIZE;
- grefs_left -= to_copy;
- cur_gref += to_copy;
- }
-}
-
-static int guest_grant_refs_for_buffer(struct xen_drm_front_shbuf *buf,
- grant_ref_t *priv_gref_head,
- int gref_idx)
-{
- int i, cur_ref, otherend_id;
-
- otherend_id = buf->xb_dev->otherend_id;
- for (i = 0; i < buf->num_pages; i++) {
- cur_ref = gnttab_claim_grant_reference(priv_gref_head);
- if (cur_ref < 0)
- return cur_ref;
-
- gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
- xen_page_to_gfn(buf->pages[i]),
- 0);
- buf->grefs[gref_idx++] = cur_ref;
- }
- return 0;
-}
-
-static int grant_references(struct xen_drm_front_shbuf *buf)
-{
- grant_ref_t priv_gref_head;
- int ret, i, j, cur_ref;
- int otherend_id, num_pages_dir;
-
- ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
- if (ret < 0) {
- DRM_ERROR("Cannot allocate grant references\n");
- return ret;
- }
-
- otherend_id = buf->xb_dev->otherend_id;
- j = 0;
- num_pages_dir = get_num_pages_dir(buf);
- for (i = 0; i < num_pages_dir; i++) {
- unsigned long frame;
-
- cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
- if (cur_ref < 0)
- return cur_ref;
-
- frame = xen_page_to_gfn(virt_to_page(buf->directory +
- PAGE_SIZE * i));
- gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
- buf->grefs[j++] = cur_ref;
- }
-
- if (buf->ops->grant_refs_for_buffer) {
- ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
- if (ret)
- return ret;
- }
-
- gnttab_free_grant_references(priv_gref_head);
- return 0;
-}
-
-static int alloc_storage(struct xen_drm_front_shbuf *buf)
-{
- buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
- if (!buf->grefs)
- return -ENOMEM;
-
- buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
- if (!buf->directory)
- return -ENOMEM;
-
- return 0;
-}
-
-/*
- * For be allocated buffers we don't need grant_refs_for_buffer as those
- * grant references are allocated at backend side
- */
-static const struct xen_drm_front_shbuf_ops backend_ops = {
- .calc_num_grefs = backend_calc_num_grefs,
- .fill_page_dir = backend_fill_page_dir,
- .map = backend_map,
- .unmap = backend_unmap
-};
-
-/* For locally granted references we do not need to map/unmap the references */
-static const struct xen_drm_front_shbuf_ops local_ops = {
- .calc_num_grefs = guest_calc_num_grefs,
- .fill_page_dir = guest_fill_page_dir,
- .grant_refs_for_buffer = guest_grant_refs_for_buffer,
-};
-
-struct xen_drm_front_shbuf *
-xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg)
-{
- struct xen_drm_front_shbuf *buf;
- int ret;
-
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- if (cfg->be_alloc)
- buf->ops = &backend_ops;
- else
- buf->ops = &local_ops;
-
- buf->xb_dev = cfg->xb_dev;
- buf->num_pages = DIV_ROUND_UP(cfg->size, PAGE_SIZE);
- buf->pages = cfg->pages;
-
- buf->ops->calc_num_grefs(buf);
-
- ret = alloc_storage(buf);
- if (ret)
- goto fail;
-
- ret = grant_references(buf);
- if (ret)
- goto fail;
-
- buf->ops->fill_page_dir(buf);
-
- return buf;
-
-fail:
- xen_drm_front_shbuf_free(buf);
- return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.h b/drivers/gpu/drm/xen/xen_drm_front_shbuf.h
deleted file mode 100644
index 7545c692539e..000000000000
--- a/drivers/gpu/drm/xen/xen_drm_front_shbuf.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-
-/*
- * Xen para-virtual DRM device
- *
- * Copyright (C) 2016-2018 EPAM Systems Inc.
- *
- * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
- */
-
-#ifndef __XEN_DRM_FRONT_SHBUF_H_
-#define __XEN_DRM_FRONT_SHBUF_H_
-
-#include <linux/kernel.h>
-#include <linux/scatterlist.h>
-
-#include <xen/grant_table.h>
-
-struct xen_drm_front_shbuf {
- /*
- * number of references granted for the backend use:
- * - for allocated/imported dma-buf's this holds number of grant
- * references for the page directory and pages of the buffer
- * - for the buffer provided by the backend this holds number of
- * grant references for the page directory as grant references for
- * the buffer will be provided by the backend
- */
- int num_grefs;
- grant_ref_t *grefs;
- unsigned char *directory;
-
- int num_pages;
- struct page **pages;
-
- struct xenbus_device *xb_dev;
-
- /* these are the ops used internally depending on be_alloc mode */
- const struct xen_drm_front_shbuf_ops *ops;
-
- /* Xen map handles for the buffer allocated by the backend */
- grant_handle_t *backend_map_handles;
-};
-
-struct xen_drm_front_shbuf_cfg {
- struct xenbus_device *xb_dev;
- size_t size;
- struct page **pages;
- bool be_alloc;
-};
-
-struct xen_drm_front_shbuf *
-xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg);
-
-grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf);
-
-int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf);
-
-int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf);
-
-void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf);
-
-void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf);
-
-#endif /* __XEN_DRM_FRONT_SHBUF_H_ */