summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/display/panel/display-timing.txt8
-rw-r--r--Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt33
-rw-r--r--Documentation/gpu/drm-kms.rst3
-rw-r--r--Documentation/gpu/drm-uapi.rst6
-rw-r--r--Documentation/gpu/i915.rst9
-rw-r--r--Documentation/sync_file.txt14
-rw-r--r--MAINTAINERS10
-rw-r--r--drivers/base/Kconfig6
-rw-r--r--drivers/dma-buf/Kconfig2
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/dma-buf.c28
-rw-r--r--drivers/dma-buf/dma-fence-array.c (renamed from drivers/dma-buf/fence-array.c)91
-rw-r--r--drivers/dma-buf/dma-fence.c (renamed from drivers/dma-buf/fence.c)201
-rw-r--r--drivers/dma-buf/reservation.c192
-rw-r--r--drivers/dma-buf/seqno-fence.c18
-rw-r--r--drivers/dma-buf/sw_sync.c48
-rw-r--r--drivers/dma-buf/sync_debug.c13
-rw-r--r--drivers/dma-buf/sync_debug.h9
-rw-r--r--drivers/dma-buf/sync_file.c63
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ObjectID.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h884
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c470
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h443
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h185
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c197
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c359
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h205
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c222
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c834
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c135
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c135
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c317
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c293
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c432
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c152
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c221
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c162
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c988
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h23
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h2
-rwxr-xr-xdrivers/gpu/drm/amd/include/cgs_common.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c21
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c22
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c59
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h16
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/power_state.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c6
-rwxr-xr-xdrivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c4
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h4
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c67
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h26
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c48
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c5
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c4
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c7
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c13
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c41
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c1
-rw-r--r--drivers/gpu/drm/bridge/Kconfig7
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c1564
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.h1517
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c1
-rw-r--r--drivers/gpu/drm/drm_atomic.c33
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c117
-rw-r--r--drivers/gpu/drm/drm_blend.c39
-rw-r--r--drivers/gpu/drm/drm_crtc.c37
-rw-r--r--drivers/gpu/drm/drm_debugfs.c34
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c352
-rw-r--r--drivers/gpu/drm/drm_dp_dual_mode_helper.c121
-rw-r--r--drivers/gpu/drm/drm_edid.c111
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c26
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c27
-rw-r--r--drivers/gpu/drm/drm_fops.c8
-rw-r--r--drivers/gpu/drm/drm_fourcc.c281
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c102
-rw-r--r--drivers/gpu/drm/drm_internal.h16
-rw-r--r--drivers/gpu/drm/drm_irq.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c51
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c17
-rw-r--r--drivers/gpu/drm/drm_of.c28
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c28
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c46
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c3
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c23
-rw-r--r--drivers/gpu/drm/gma500/gtt.c2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c7
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c3
-rw-r--r--drivers/gpu/drm/i915/Kconfig25
-rw-r--r--drivers/gpu/drm/i915/Makefile5
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile4
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c352
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c288
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2831
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.h49
-rw-r--r--drivers/gpu/drm/i915/gvt/debug.h29
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c330
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h163
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c532
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.h150
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c860
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h188
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c312
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2232
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h270
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c172
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h325
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c2797
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h34
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c741
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.h233
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c306
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h105
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h220
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c344
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h80
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c291
-rw-r--r--drivers/gpu/drm/i915/gvt/render.h43
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c294
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.h58
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c578
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h139
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h286
-rw-r--r--drivers/gpu/drm/i915/gvt/trace_points.c36
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c274
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c388
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c144
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h404
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c217
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c88
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c383
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c37
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c593
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c12
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c283
-rw-r--r--drivers/gpu/drm/i915/i915_params.c9
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h18
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c8
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c41
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h8
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c25
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h2
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c3
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c302
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c53
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c10
-rw-r--r--drivers/gpu/drm/i915/intel_color.c16
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c118
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c160
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c16
-rw-r--r--drivers/gpu/drm/i915/intel_display.c709
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c250
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c1
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c10
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h82
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c38
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c48
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c26
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c16
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c169
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c69
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c10
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c42
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c143
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c14
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c93
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c136
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c32
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c729
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c22
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c96
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h42
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c15
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c26
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c56
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c52
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c602
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c35
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c3
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c12
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h2
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c28
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c14
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c2
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h24
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c80
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.c139
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.h57
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/firmware.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c139
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c129
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c137
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c47
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c50
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c49
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c20
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c31
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c20
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c30
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c25
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c59
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c52
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c58
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c53
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c57
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c54
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c228
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c78
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c40
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c160
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c31
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c31
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c85
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c73
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h98
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/rfbi.c49
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c33
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c97
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c87
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c43
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c53
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c50
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c35
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c3
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c6
-rw-r--r--drivers/gpu/drm/savage/savage_state.c1
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c3
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/drm.c3
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c18
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c4
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c59
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c22
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c3
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c53
-rw-r--r--drivers/gpu/drm/virtio/Kconfig6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c26
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c12
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/ipu-v3/Kconfig1
-rw-r--r--drivers/gpu/vga/vgaarb.c15
-rw-r--r--drivers/video/hdmi.c4
-rw-r--r--drivers/video/of_display_timing.c9
-rw-r--r--include/drm/bridge/mhl.h291
-rw-r--r--include/drm/drmP.h4
-rw-r--r--include/drm/drm_atomic.h42
-rw-r--r--include/drm/drm_blend.h10
-rw-r--r--include/drm/drm_crtc.h55
-rw-r--r--include/drm/drm_debugfs_crc.h73
-rw-r--r--include/drm/drm_dp_dual_mode_helper.h27
-rw-r--r--include/drm/drm_dp_helper.h6
-rw-r--r--include/drm/drm_encoder.h2
-rw-r--r--include/drm/drm_fourcc.h23
-rw-r--r--include/drm/drm_of.h13
-rw-r--r--include/drm/drm_plane.h5
-rw-r--r--include/drm/i915_component.h6
-rw-r--r--include/drm/ttm/ttm_bo_api.h15
-rw-r--r--include/drm/ttm/ttm_bo_driver.h48
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h2
-rw-r--r--include/linux/dma-buf.h4
-rw-r--r--include/linux/dma-fence-array.h86
-rw-r--r--include/linux/dma-fence.h437
-rw-r--r--include/linux/fence-array.h83
-rw-r--r--include/linux/fence.h378
-rw-r--r--include/linux/hdmi.h2
-rw-r--r--include/linux/reservation.h28
-rw-r--r--include/linux/seqno-fence.h20
-rw-r--r--include/linux/sync_file.h14
-rw-r--r--include/sound/hda_i915.h11
-rw-r--r--include/trace/events/dma_fence.h (renamed from include/trace/events/fence.h)44
-rw-r--r--include/uapi/drm/amdgpu_drm.h53
-rw-r--r--include/uapi/drm/drm_mode.h24
-rw-r--r--include/video/display_timing.h4
-rw-r--r--sound/hda/hdac_i915.c18
-rw-r--r--sound/pci/hda/patch_hdmi.c7
-rw-r--r--sound/soc/codecs/hdac_hdmi.c2
503 files changed, 32458 insertions, 10956 deletions
diff --git a/Documentation/devicetree/bindings/display/panel/display-timing.txt b/Documentation/devicetree/bindings/display/panel/display-timing.txt
index e1d4a0b59612..81a75893d1b8 100644
--- a/Documentation/devicetree/bindings/display/panel/display-timing.txt
+++ b/Documentation/devicetree/bindings/display/panel/display-timing.txt
@@ -32,6 +32,14 @@ optional properties:
- active low = drive pixel data on falling edge/
sample data on rising edge
- ignored = ignored
+ - syncclk-active: with
+ - active high = drive sync on rising edge/
+ sample sync on falling edge of pixel
+ clock
+ - active low = drive sync on falling edge/
+ sample sync on rising edge of pixel
+ clock
+ - omitted = same configuration as pixelclk-active
- interlaced (bool): boolean to enable interlaced mode
- doublescan (bool): boolean to enable doublescan mode
- doubleclk (bool): boolean to enable doubleclock mode
diff --git a/Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt b/Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt
new file mode 100644
index 000000000000..9409d9c6a260
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/bridge/sil-sii8620.txt
@@ -0,0 +1,33 @@
+Silicon Image SiI8620 HDMI/MHL bridge bindings
+
+Required properties:
+ - compatible: "sil,sii8620"
+ - reg: i2c address of the bridge
+ - cvcc10-supply: Digital Core Supply Voltage (1.0V)
+ - iovcc18-supply: I/O Supply Voltage (1.8V)
+ - interrupts, interrupt-parent: interrupt specifier of INT pin
+ - reset-gpios: gpio specifier of RESET pin
+ - clocks, clock-names: specification and name of "xtal" clock
+ - video interfaces: Device node can contain video interface port
+ node for HDMI encoder according to [1].
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+ sii8620@39 {
+ reg = <0x39>;
+ compatible = "sil,sii8620";
+ cvcc10-supply = <&ldo36_reg>;
+ iovcc18-supply = <&ldo34_reg>;
+ interrupt-parent = <&gpf0>;
+ interrupts = <2 0>;
+ reset-gpio = <&gpv7 0 0>;
+ clocks = <&pmu_system_controller 0>;
+ clock-names = "xtal";
+
+ port {
+ mhl_to_hdmi: endpoint {
+ remote-endpoint = <&hdmi_to_mhl>;
+ };
+ };
+ };
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 53b872c105d2..cb0d3537b705 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -63,6 +63,9 @@ Frame Buffer Functions Reference
DRM Format Handling
===================
+.. kernel-doc:: include/drm/drm_fourcc.h
+ :internal:
+
.. kernel-doc:: drivers/gpu/drm/drm_fourcc.c
:export:
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index 1ba301cebe16..de3ac9f90f8f 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -216,3 +216,9 @@ interfaces. Especially since all hardware-acceleration interfaces to
userspace are driver specific for efficiency and other reasons these
interfaces can be rather substantial. Hence every driver has its own
chapter.
+
+Testing and validation
+======================
+
+.. kernel-doc:: drivers/gpu/drm/drm_debugfs_crc.c
+ :doc: CRC ABI
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 87aaffc22920..95ce77ff4342 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -49,6 +49,15 @@ Intel GVT-g Guest Support(vGPU)
.. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c
:internal:
+Intel GVT-g Host Support(vGPU device model)
+-------------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c
+ :doc: Intel GVT-g host support
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c
+ :internal:
+
Display Hardware Handling
=========================
diff --git a/Documentation/sync_file.txt b/Documentation/sync_file.txt
index b63a68531afd..269681a6faec 100644
--- a/Documentation/sync_file.txt
+++ b/Documentation/sync_file.txt
@@ -6,7 +6,7 @@
This document serves as a guide for device drivers writers on what the
sync_file API is, and how drivers can support it. Sync file is the carrier of
-the fences(struct fence) that are needed to synchronize between drivers or
+the fences(struct dma_fence) that are needed to synchronize between drivers or
across process boundaries.
The sync_file API is meant to be used to send and receive fence information
@@ -32,9 +32,9 @@ in-fences and out-fences
Sync files can go either to or from userspace. When a sync_file is sent from
the driver to userspace we call the fences it contains 'out-fences'. They are
related to a buffer that the driver is processing or is going to process, so
-the driver creates an out-fence to be able to notify, through fence_signal(),
-when it has finished using (or processing) that buffer. Out-fences are fences
-that the driver creates.
+the driver creates an out-fence to be able to notify, through
+dma_fence_signal(), when it has finished using (or processing) that buffer.
+Out-fences are fences that the driver creates.
On the other hand if the driver receives fence(s) through a sync_file from
userspace we call these fence(s) 'in-fences'. Receiveing in-fences means that
@@ -47,7 +47,7 @@ Creating Sync Files
When a driver needs to send an out-fence userspace it creates a sync_file.
Interface:
- struct sync_file *sync_file_create(struct fence *fence);
+ struct sync_file *sync_file_create(struct dma_fence *fence);
The caller pass the out-fence and gets back the sync_file. That is just the
first step, next it needs to install an fd on sync_file->file. So it gets an
@@ -72,11 +72,11 @@ of the Sync File to the kernel. The kernel can then retrieve the fences
from it.
Interface:
- struct fence *sync_file_get_fence(int fd);
+ struct dma_fence *sync_file_get_fence(int fd);
The returned reference is owned by the caller and must be disposed of
-afterwards using fence_put(). In case of error, a NULL is returned instead.
+afterwards using dma_fence_put(). In case of error, a NULL is returned instead.
References:
[1] struct sync_file in include/linux/sync_file.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 411e3b87b8c2..e18faacc096c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4070,6 +4070,16 @@ F: include/drm/i915*
F: include/uapi/drm/i915_drm.h
F: Documentation/gpu/i915.rst
+INTEL GVT-g DRIVERS (Intel GPU Virtualization)
+M: Zhenyu Wang <zhenyuw@linux.intel.com>
+M: Zhi Wang <zhi.a.wang@intel.com>
+L: igvt-g-dev@lists.01.org
+L: intel-gfx@lists.freedesktop.org
+W: https://01.org/igvt-g
+T: git https://github.com/01org/gvt-linux.git
+S: Supported
+F: drivers/gpu/drm/i915/gvt/
+
DRM DRIVERS FOR ATMEL HLCDC
M: Boris Brezillon <boris.brezillon@free-electrons.com>
L: dri-devel@lists.freedesktop.org
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index d02e7c0f5bfd..0e40967a5d6e 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -250,11 +250,11 @@ config DMA_SHARED_BUFFER
APIs extension; the file's descriptor can then be passed on to other
driver.
-config FENCE_TRACE
- bool "Enable verbose FENCE_TRACE messages"
+config DMA_FENCE_TRACE
+ bool "Enable verbose DMA_FENCE_TRACE messages"
depends on DMA_SHARED_BUFFER
help
- Enable the FENCE_TRACE printks. This will add extra
+ Enable the DMA_FENCE_TRACE printks. This will add extra
spam to the console log, but will make it easier to diagnose
lockup related problems for dma-buffers shared across multiple
devices.
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 2585821b24ab..ed3b785bae37 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -7,7 +7,7 @@ config SYNC_FILE
select DMA_SHARED_BUFFER
---help---
The Sync File Framework adds explicit syncronization via
- userspace. It enables send/receive 'struct fence' objects to/from
+ userspace. It enables send/receive 'struct dma_fence' objects to/from
userspace via Sync File fds for synchronization between drivers via
userspace components. It has been ported from Android.
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 210a10bfad2b..c33bf8863147 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,3 +1,3 @@
-obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
+obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index cf04d249a6a4..e72e64484131 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -25,7 +25,7 @@
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/anon_inodes.h>
#include <linux/export.h>
#include <linux/debugfs.h>
@@ -124,7 +124,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
return base + offset;
}
-static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
+static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
unsigned long flags;
@@ -140,7 +140,7 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
struct dma_buf *dmabuf;
struct reservation_object *resv;
struct reservation_object_list *fobj;
- struct fence *fence_excl;
+ struct dma_fence *fence_excl;
unsigned long events;
unsigned shared_count, seq;
@@ -187,20 +187,20 @@ retry:
spin_unlock_irq(&dmabuf->poll.lock);
if (events & pevents) {
- if (!fence_get_rcu(fence_excl)) {
+ if (!dma_fence_get_rcu(fence_excl)) {
/* force a recheck */
events &= ~pevents;
dma_buf_poll_cb(NULL, &dcb->cb);
- } else if (!fence_add_callback(fence_excl, &dcb->cb,
- dma_buf_poll_cb)) {
+ } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
+ dma_buf_poll_cb)) {
events &= ~pevents;
- fence_put(fence_excl);
+ dma_fence_put(fence_excl);
} else {
/*
* No callback queued, wake up any additional
* waiters.
*/
- fence_put(fence_excl);
+ dma_fence_put(fence_excl);
dma_buf_poll_cb(NULL, &dcb->cb);
}
}
@@ -222,9 +222,9 @@ retry:
goto out;
for (i = 0; i < shared_count; ++i) {
- struct fence *fence = rcu_dereference(fobj->shared[i]);
+ struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
- if (!fence_get_rcu(fence)) {
+ if (!dma_fence_get_rcu(fence)) {
/*
* fence refcount dropped to zero, this means
* that fobj has been freed
@@ -235,13 +235,13 @@ retry:
dma_buf_poll_cb(NULL, &dcb->cb);
break;
}
- if (!fence_add_callback(fence, &dcb->cb,
- dma_buf_poll_cb)) {
- fence_put(fence);
+ if (!dma_fence_add_callback(fence, &dcb->cb,
+ dma_buf_poll_cb)) {
+ dma_fence_put(fence);
events &= ~POLLOUT;
break;
}
- fence_put(fence);
+ dma_fence_put(fence);
}
/* No callback queued, wake up any additional waiters. */
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/dma-fence-array.c
index f1989fcaf354..67eb7c8fb88c 100644
--- a/drivers/dma-buf/fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -1,5 +1,5 @@
/*
- * fence-array: aggregate fences to be waited together
+ * dma-fence-array: aggregate fences to be waited together
*
* Copyright (C) 2016 Collabora Ltd
* Copyright (C) 2016 Advanced Micro Devices, Inc.
@@ -19,35 +19,34 @@
#include <linux/export.h>
#include <linux/slab.h>
-#include <linux/fence-array.h>
+#include <linux/dma-fence-array.h>
-static void fence_array_cb_func(struct fence *f, struct fence_cb *cb);
-
-static const char *fence_array_get_driver_name(struct fence *fence)
+static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
{
- return "fence_array";
+ return "dma_fence_array";
}
-static const char *fence_array_get_timeline_name(struct fence *fence)
+static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
{
return "unbound";
}
-static void fence_array_cb_func(struct fence *f, struct fence_cb *cb)
+static void dma_fence_array_cb_func(struct dma_fence *f,
+ struct dma_fence_cb *cb)
{
- struct fence_array_cb *array_cb =
- container_of(cb, struct fence_array_cb, cb);
- struct fence_array *array = array_cb->array;
+ struct dma_fence_array_cb *array_cb =
+ container_of(cb, struct dma_fence_array_cb, cb);
+ struct dma_fence_array *array = array_cb->array;
if (atomic_dec_and_test(&array->num_pending))
- fence_signal(&array->base);
- fence_put(&array->base);
+ dma_fence_signal(&array->base);
+ dma_fence_put(&array->base);
}
-static bool fence_array_enable_signaling(struct fence *fence)
+static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
{
- struct fence_array *array = to_fence_array(fence);
- struct fence_array_cb *cb = (void *)(&array[1]);
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+ struct dma_fence_array_cb *cb = (void *)(&array[1]);
unsigned i;
for (i = 0; i < array->num_fences; ++i) {
@@ -60,10 +59,10 @@ static bool fence_array_enable_signaling(struct fence *fence)
* until we signal the array as complete (but that is now
* insufficient).
*/
- fence_get(&array->base);
- if (fence_add_callback(array->fences[i], &cb[i].cb,
- fence_array_cb_func)) {
- fence_put(&array->base);
+ dma_fence_get(&array->base);
+ if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
+ dma_fence_array_cb_func)) {
+ dma_fence_put(&array->base);
if (atomic_dec_and_test(&array->num_pending))
return false;
}
@@ -72,69 +71,71 @@ static bool fence_array_enable_signaling(struct fence *fence)
return true;
}
-static bool fence_array_signaled(struct fence *fence)
+static bool dma_fence_array_signaled(struct dma_fence *fence)
{
- struct fence_array *array = to_fence_array(fence);
+ struct dma_fence_array *array = to_dma_fence_array(fence);
return atomic_read(&array->num_pending) <= 0;
}
-static void fence_array_release(struct fence *fence)
+static void dma_fence_array_release(struct dma_fence *fence)
{
- struct fence_array *array = to_fence_array(fence);
+ struct dma_fence_array *array = to_dma_fence_array(fence);
unsigned i;
for (i = 0; i < array->num_fences; ++i)
- fence_put(array->fences[i]);
+ dma_fence_put(array->fences[i]);
kfree(array->fences);
- fence_free(fence);
+ dma_fence_free(fence);
}
-const struct fence_ops fence_array_ops = {
- .get_driver_name = fence_array_get_driver_name,
- .get_timeline_name = fence_array_get_timeline_name,
- .enable_signaling = fence_array_enable_signaling,
- .signaled = fence_array_signaled,
- .wait = fence_default_wait,
- .release = fence_array_release,
+const struct dma_fence_ops dma_fence_array_ops = {
+ .get_driver_name = dma_fence_array_get_driver_name,
+ .get_timeline_name = dma_fence_array_get_timeline_name,
+ .enable_signaling = dma_fence_array_enable_signaling,
+ .signaled = dma_fence_array_signaled,
+ .wait = dma_fence_default_wait,
+ .release = dma_fence_array_release,
};
-EXPORT_SYMBOL(fence_array_ops);
+EXPORT_SYMBOL(dma_fence_array_ops);
/**
- * fence_array_create - Create a custom fence array
+ * dma_fence_array_create - Create a custom fence array
* @num_fences: [in] number of fences to add in the array
* @fences: [in] array containing the fences
* @context: [in] fence context to use
* @seqno: [in] sequence number to use
* @signal_on_any: [in] signal on any fence in the array
*
- * Allocate a fence_array object and initialize the base fence with fence_init().
+ * Allocate a dma_fence_array object and initialize the base fence with
+ * dma_fence_init().
* In case of error it returns NULL.
*
* The caller should allocate the fences array with num_fences size
* and fill it with the fences it wants to add to the object. Ownership of this
- * array is taken and fence_put() is used on each fence on release.
+ * array is taken and dma_fence_put() is used on each fence on release.
*
* If @signal_on_any is true the fence array signals if any fence in the array
* signals, otherwise it signals when all fences in the array signal.
*/
-struct fence_array *fence_array_create(int num_fences, struct fence **fences,
- u64 context, unsigned seqno,
- bool signal_on_any)
+struct dma_fence_array *dma_fence_array_create(int num_fences,
+ struct dma_fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any)
{
- struct fence_array *array;
+ struct dma_fence_array *array;
size_t size = sizeof(*array);
/* Allocate the callback structures behind the array. */
- size += num_fences * sizeof(struct fence_array_cb);
+ size += num_fences * sizeof(struct dma_fence_array_cb);
array = kzalloc(size, GFP_KERNEL);
if (!array)
return NULL;
spin_lock_init(&array->lock);
- fence_init(&array->base, &fence_array_ops, &array->lock,
- context, seqno);
+ dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
+ context, seqno);
array->num_fences = num_fences;
atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
@@ -142,4 +143,4 @@ struct fence_array *fence_array_create(int num_fences, struct fence **fences,
return array;
}
-EXPORT_SYMBOL(fence_array_create);
+EXPORT_SYMBOL(dma_fence_array_create);
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/dma-fence.c
index 4d51f9e83fa8..3a7bf009c21c 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -21,13 +21,13 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/atomic.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#define CREATE_TRACE_POINTS
-#include <trace/events/fence.h>
+#include <trace/events/dma_fence.h>
-EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on);
-EXPORT_TRACEPOINT_SYMBOL(fence_emit);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
/*
* fence context counter: each execution context should have its own
@@ -35,39 +35,41 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit);
* context or not. One device can have multiple separate contexts,
* and they're used if some engine can run independently of another.
*/
-static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
+static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
/**
- * fence_context_alloc - allocate an array of fence contexts
+ * dma_fence_context_alloc - allocate an array of fence contexts
* @num: [in] amount of contexts to allocate
*
* This function will return the first index of the number of fences allocated.
* The fence context is used for setting fence->context to a unique number.
*/
-u64 fence_context_alloc(unsigned num)
+u64 dma_fence_context_alloc(unsigned num)
{
BUG_ON(!num);
- return atomic64_add_return(num, &fence_context_counter) - num;
+ return atomic64_add_return(num, &dma_fence_context_counter) - num;
}
-EXPORT_SYMBOL(fence_context_alloc);
+EXPORT_SYMBOL(dma_fence_context_alloc);
/**
- * fence_signal_locked - signal completion of a fence
+ * dma_fence_signal_locked - signal completion of a fence
* @fence: the fence to signal
*
* Signal completion for software callbacks on a fence, this will unblock
- * fence_wait() calls and run all the callbacks added with
- * fence_add_callback(). Can be called multiple times, but since a fence
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
* can only go from unsignaled to signaled state, it will only be effective
* the first time.
*
- * Unlike fence_signal, this function must be called with fence->lock held.
+ * Unlike dma_fence_signal, this function must be called with fence->lock held.
*/
-int fence_signal_locked(struct fence *fence)
+int dma_fence_signal_locked(struct dma_fence *fence)
{
- struct fence_cb *cur, *tmp;
+ struct dma_fence_cb *cur, *tmp;
int ret = 0;
+ lockdep_assert_held(fence->lock);
+
if (WARN_ON(!fence))
return -EINVAL;
@@ -76,15 +78,15 @@ int fence_signal_locked(struct fence *fence)
smp_mb__before_atomic();
}
- if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
ret = -EINVAL;
/*
- * we might have raced with the unlocked fence_signal,
+ * we might have raced with the unlocked dma_fence_signal,
* still run through all callbacks
*/
} else
- trace_fence_signaled(fence);
+ trace_dma_fence_signaled(fence);
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
list_del_init(&cur->node);
@@ -92,19 +94,19 @@ int fence_signal_locked(struct fence *fence)
}
return ret;
}
-EXPORT_SYMBOL(fence_signal_locked);
+EXPORT_SYMBOL(dma_fence_signal_locked);
/**
- * fence_signal - signal completion of a fence
+ * dma_fence_signal - signal completion of a fence
* @fence: the fence to signal
*
* Signal completion for software callbacks on a fence, this will unblock
- * fence_wait() calls and run all the callbacks added with
- * fence_add_callback(). Can be called multiple times, but since a fence
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
* can only go from unsignaled to signaled state, it will only be effective
* the first time.
*/
-int fence_signal(struct fence *fence)
+int dma_fence_signal(struct dma_fence *fence)
{
unsigned long flags;
@@ -116,13 +118,13 @@ int fence_signal(struct fence *fence)
smp_mb__before_atomic();
}
- if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return -EINVAL;
- trace_fence_signaled(fence);
+ trace_dma_fence_signaled(fence);
- if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
- struct fence_cb *cur, *tmp;
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
+ struct dma_fence_cb *cur, *tmp;
spin_lock_irqsave(fence->lock, flags);
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
@@ -133,10 +135,10 @@ int fence_signal(struct fence *fence)
}
return 0;
}
-EXPORT_SYMBOL(fence_signal);
+EXPORT_SYMBOL(dma_fence_signal);
/**
- * fence_wait_timeout - sleep until the fence gets signaled
+ * dma_fence_wait_timeout - sleep until the fence gets signaled
* or until timeout elapses
* @fence: [in] the fence to wait on
* @intr: [in] if true, do an interruptible wait
@@ -152,7 +154,7 @@ EXPORT_SYMBOL(fence_signal);
* freed before return, resulting in undefined behavior.
*/
signed long
-fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
+dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
{
signed long ret;
@@ -160,70 +162,71 @@ fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
return -EINVAL;
if (timeout == 0)
- return fence_is_signaled(fence);
+ return dma_fence_is_signaled(fence);
- trace_fence_wait_start(fence);
+ trace_dma_fence_wait_start(fence);
ret = fence->ops->wait(fence, intr, timeout);
- trace_fence_wait_end(fence);
+ trace_dma_fence_wait_end(fence);
return ret;
}
-EXPORT_SYMBOL(fence_wait_timeout);
+EXPORT_SYMBOL(dma_fence_wait_timeout);
-void fence_release(struct kref *kref)
+void dma_fence_release(struct kref *kref)
{
- struct fence *fence =
- container_of(kref, struct fence, refcount);
+ struct dma_fence *fence =
+ container_of(kref, struct dma_fence, refcount);
- trace_fence_destroy(fence);
+ trace_dma_fence_destroy(fence);
BUG_ON(!list_empty(&fence->cb_list));
if (fence->ops->release)
fence->ops->release(fence);
else
- fence_free(fence);
+ dma_fence_free(fence);
}
-EXPORT_SYMBOL(fence_release);
+EXPORT_SYMBOL(dma_fence_release);
-void fence_free(struct fence *fence)
+void dma_fence_free(struct dma_fence *fence)
{
kfree_rcu(fence, rcu);
}
-EXPORT_SYMBOL(fence_free);
+EXPORT_SYMBOL(dma_fence_free);
/**
- * fence_enable_sw_signaling - enable signaling on fence
+ * dma_fence_enable_sw_signaling - enable signaling on fence
* @fence: [in] the fence to enable
*
* this will request for sw signaling to be enabled, to make the fence
* complete as soon as possible
*/
-void fence_enable_sw_signaling(struct fence *fence)
+void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{
unsigned long flags;
- if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
- !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- trace_fence_enable_signal(fence);
+ if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &fence->flags) &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ trace_dma_fence_enable_signal(fence);
spin_lock_irqsave(fence->lock, flags);
if (!fence->ops->enable_signaling(fence))
- fence_signal_locked(fence);
+ dma_fence_signal_locked(fence);
spin_unlock_irqrestore(fence->lock, flags);
}
}
-EXPORT_SYMBOL(fence_enable_sw_signaling);
+EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
/**
- * fence_add_callback - add a callback to be called when the fence
+ * dma_fence_add_callback - add a callback to be called when the fence
* is signaled
* @fence: [in] the fence to wait on
* @cb: [in] the callback to register
* @func: [in] the function to call
*
- * cb will be initialized by fence_add_callback, no initialization
+ * cb will be initialized by dma_fence_add_callback, no initialization
* by the caller is required. Any number of callbacks can be registered
* to a fence, but a callback can only be registered to one fence at a time.
*
@@ -232,15 +235,15 @@ EXPORT_SYMBOL(fence_enable_sw_signaling);
* *not* call the callback)
*
* Add a software callback to the fence. Same restrictions apply to
- * refcount as it does to fence_wait, however the caller doesn't need to
+ * refcount as it does to dma_fence_wait, however the caller doesn't need to
* keep a refcount to fence afterwards: when software access is enabled,
* the creator of the fence is required to keep the fence alive until
- * after it signals with fence_signal. The callback itself can be called
+ * after it signals with dma_fence_signal. The callback itself can be called
* from irq context.
*
*/
-int fence_add_callback(struct fence *fence, struct fence_cb *cb,
- fence_func_t func)
+int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
+ dma_fence_func_t func)
{
unsigned long flags;
int ret = 0;
@@ -249,22 +252,23 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb,
if (WARN_ON(!fence || !func))
return -EINVAL;
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
INIT_LIST_HEAD(&cb->node);
return -ENOENT;
}
spin_lock_irqsave(fence->lock, flags);
- was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
+ was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &fence->flags);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
ret = -ENOENT;
else if (!was_set) {
- trace_fence_enable_signal(fence);
+ trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
- fence_signal_locked(fence);
+ dma_fence_signal_locked(fence);
ret = -ENOENT;
}
}
@@ -278,10 +282,10 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb,
return ret;
}
-EXPORT_SYMBOL(fence_add_callback);
+EXPORT_SYMBOL(dma_fence_add_callback);
/**
- * fence_remove_callback - remove a callback from the signaling list
+ * dma_fence_remove_callback - remove a callback from the signaling list
* @fence: [in] the fence to wait on
* @cb: [in] the callback to remove
*
@@ -296,7 +300,7 @@ EXPORT_SYMBOL(fence_add_callback);
* with a reference held to the fence.
*/
bool
-fence_remove_callback(struct fence *fence, struct fence_cb *cb)
+dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
{
unsigned long flags;
bool ret;
@@ -311,15 +315,15 @@ fence_remove_callback(struct fence *fence, struct fence_cb *cb)
return ret;
}
-EXPORT_SYMBOL(fence_remove_callback);
+EXPORT_SYMBOL(dma_fence_remove_callback);
struct default_wait_cb {
- struct fence_cb base;
+ struct dma_fence_cb base;
struct task_struct *task;
};
static void
-fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
+dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct default_wait_cb *wait =
container_of(cb, struct default_wait_cb, base);
@@ -328,7 +332,7 @@ fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
}
/**
- * fence_default_wait - default sleep until the fence gets signaled
+ * dma_fence_default_wait - default sleep until the fence gets signaled
* or until timeout elapses
* @fence: [in] the fence to wait on
* @intr: [in] if true, do an interruptible wait
@@ -338,14 +342,14 @@ fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
* remaining timeout in jiffies on success.
*/
signed long
-fence_default_wait(struct fence *fence, bool intr, signed long timeout)
+dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
{
struct default_wait_cb cb;
unsigned long flags;
signed long ret = timeout;
bool was_set;
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return timeout;
spin_lock_irqsave(fence->lock, flags);
@@ -355,25 +359,26 @@ fence_default_wait(struct fence *fence, bool intr, signed long timeout)
goto out;
}
- was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
+ was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &fence->flags);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
goto out;
if (!was_set) {
- trace_fence_enable_signal(fence);
+ trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
- fence_signal_locked(fence);
+ dma_fence_signal_locked(fence);
goto out;
}
}
- cb.base.func = fence_default_wait_cb;
+ cb.base.func = dma_fence_default_wait_cb;
cb.task = current;
list_add(&cb.base.node, &fence->cb_list);
- while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
+ while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
if (intr)
__set_current_state(TASK_INTERRUPTIBLE);
else
@@ -395,23 +400,23 @@ out:
spin_unlock_irqrestore(fence->lock, flags);
return ret;
}
-EXPORT_SYMBOL(fence_default_wait);
+EXPORT_SYMBOL(dma_fence_default_wait);
static bool
-fence_test_signaled_any(struct fence **fences, uint32_t count)
+dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count)
{
int i;
for (i = 0; i < count; ++i) {
- struct fence *fence = fences[i];
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ struct dma_fence *fence = fences[i];
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return true;
}
return false;
}
/**
- * fence_wait_any_timeout - sleep until any fence gets signaled
+ * dma_fence_wait_any_timeout - sleep until any fence gets signaled
* or until timeout elapses
* @fences: [in] array of fences to wait on
* @count: [in] number of fences to wait on
@@ -427,8 +432,8 @@ fence_test_signaled_any(struct fence **fences, uint32_t count)
* fence might be freed before return, resulting in undefined behavior.
*/
signed long
-fence_wait_any_timeout(struct fence **fences, uint32_t count,
- bool intr, signed long timeout)
+dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
+ bool intr, signed long timeout)
{
struct default_wait_cb *cb;
signed long ret = timeout;
@@ -439,7 +444,7 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
if (timeout == 0) {
for (i = 0; i < count; ++i)
- if (fence_is_signaled(fences[i]))
+ if (dma_fence_is_signaled(fences[i]))
return 1;
return 0;
@@ -452,16 +457,16 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
}
for (i = 0; i < count; ++i) {
- struct fence *fence = fences[i];
+ struct dma_fence *fence = fences[i];
- if (fence->ops->wait != fence_default_wait) {
+ if (fence->ops->wait != dma_fence_default_wait) {
ret = -EINVAL;
goto fence_rm_cb;
}
cb[i].task = current;
- if (fence_add_callback(fence, &cb[i].base,
- fence_default_wait_cb)) {
+ if (dma_fence_add_callback(fence, &cb[i].base,
+ dma_fence_default_wait_cb)) {
/* This fence is already signaled */
goto fence_rm_cb;
}
@@ -473,7 +478,7 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
else
set_current_state(TASK_UNINTERRUPTIBLE);
- if (fence_test_signaled_any(fences, count))
+ if (dma_fence_test_signaled_any(fences, count))
break;
ret = schedule_timeout(ret);
@@ -486,34 +491,34 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
fence_rm_cb:
while (i-- > 0)
- fence_remove_callback(fences[i], &cb[i].base);
+ dma_fence_remove_callback(fences[i], &cb[i].base);
err_free_cb:
kfree(cb);
return ret;
}
-EXPORT_SYMBOL(fence_wait_any_timeout);
+EXPORT_SYMBOL(dma_fence_wait_any_timeout);
/**
- * fence_init - Initialize a custom fence.
+ * dma_fence_init - Initialize a custom fence.
* @fence: [in] the fence to initialize
- * @ops: [in] the fence_ops for operations on this fence
+ * @ops: [in] the dma_fence_ops for operations on this fence
* @lock: [in] the irqsafe spinlock to use for locking this fence
* @context: [in] the execution context this fence is run on
* @seqno: [in] a linear increasing sequence number for this context
*
* Initializes an allocated fence, the caller doesn't have to keep its
* refcount after committing with this fence, but it will need to hold a
- * refcount again if fence_ops.enable_signaling gets called. This can
+ * refcount again if dma_fence_ops.enable_signaling gets called. This can
* be used for other implementing other types of fence.
*
* context and seqno are used for easy comparison between fences, allowing
- * to check which fence is later by simply using fence_later.
+ * to check which fence is later by simply using dma_fence_later.
*/
void
-fence_init(struct fence *fence, const struct fence_ops *ops,
- spinlock_t *lock, u64 context, unsigned seqno)
+dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, unsigned seqno)
{
BUG_ON(!lock);
BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
@@ -527,6 +532,6 @@ fence_init(struct fence *fence, const struct fence_ops *ops,
fence->seqno = seqno;
fence->flags = 0UL;
- trace_fence_init(fence);
+ trace_dma_fence_init(fence);
}
-EXPORT_SYMBOL(fence_init);
+EXPORT_SYMBOL(dma_fence_init);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 723d8af988e5..7ed56f3edfb7 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -102,17 +102,17 @@ EXPORT_SYMBOL(reservation_object_reserve_shared);
static void
reservation_object_add_shared_inplace(struct reservation_object *obj,
struct reservation_object_list *fobj,
- struct fence *fence)
+ struct dma_fence *fence)
{
u32 i;
- fence_get(fence);
+ dma_fence_get(fence);
preempt_disable();
write_seqcount_begin(&obj->seq);
for (i = 0; i < fobj->shared_count; ++i) {
- struct fence *old_fence;
+ struct dma_fence *old_fence;
old_fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(obj));
@@ -123,7 +123,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
write_seqcount_end(&obj->seq);
preempt_enable();
- fence_put(old_fence);
+ dma_fence_put(old_fence);
return;
}
}
@@ -143,12 +143,12 @@ static void
reservation_object_add_shared_replace(struct reservation_object *obj,
struct reservation_object_list *old,
struct reservation_object_list *fobj,
- struct fence *fence)
+ struct dma_fence *fence)
{
unsigned i;
- struct fence *old_fence = NULL;
+ struct dma_fence *old_fence = NULL;
- fence_get(fence);
+ dma_fence_get(fence);
if (!old) {
RCU_INIT_POINTER(fobj->shared[0], fence);
@@ -165,7 +165,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
fobj->shared_count = old->shared_count;
for (i = 0; i < old->shared_count; ++i) {
- struct fence *check;
+ struct dma_fence *check;
check = rcu_dereference_protected(old->shared[i],
reservation_object_held(obj));
@@ -196,7 +196,7 @@ done:
kfree_rcu(old, rcu);
if (old_fence)
- fence_put(old_fence);
+ dma_fence_put(old_fence);
}
/**
@@ -208,7 +208,7 @@ done:
* reservation_object_reserve_shared() has been called.
*/
void reservation_object_add_shared_fence(struct reservation_object *obj,
- struct fence *fence)
+ struct dma_fence *fence)
{
struct reservation_object_list *old, *fobj = obj->staged;
@@ -231,9 +231,9 @@ EXPORT_SYMBOL(reservation_object_add_shared_fence);
* Add a fence to the exclusive slot. The obj->lock must be held.
*/
void reservation_object_add_excl_fence(struct reservation_object *obj,
- struct fence *fence)
+ struct dma_fence *fence)
{
- struct fence *old_fence = reservation_object_get_excl(obj);
+ struct dma_fence *old_fence = reservation_object_get_excl(obj);
struct reservation_object_list *old;
u32 i = 0;
@@ -242,7 +242,7 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
i = old->shared_count;
if (fence)
- fence_get(fence);
+ dma_fence_get(fence);
preempt_disable();
write_seqcount_begin(&obj->seq);
@@ -255,11 +255,11 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
/* inplace update, no shared fences */
while (i--)
- fence_put(rcu_dereference_protected(old->shared[i],
+ dma_fence_put(rcu_dereference_protected(old->shared[i],
reservation_object_held(obj)));
if (old_fence)
- fence_put(old_fence);
+ dma_fence_put(old_fence);
}
EXPORT_SYMBOL(reservation_object_add_excl_fence);
@@ -276,26 +276,32 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence);
* Zero or -errno
*/
int reservation_object_get_fences_rcu(struct reservation_object *obj,
- struct fence **pfence_excl,
+ struct dma_fence **pfence_excl,
unsigned *pshared_count,
- struct fence ***pshared)
+ struct dma_fence ***pshared)
{
- unsigned shared_count = 0;
- unsigned retry = 1;
- struct fence **shared = NULL, *fence_excl = NULL;
- int ret = 0;
+ struct dma_fence **shared = NULL;
+ struct dma_fence *fence_excl;
+ unsigned int shared_count;
+ int ret = 1;
- while (retry) {
+ do {
struct reservation_object_list *fobj;
unsigned seq;
+ unsigned int i;
- seq = read_seqcount_begin(&obj->seq);
+ shared_count = i = 0;
rcu_read_lock();
+ seq = read_seqcount_begin(&obj->seq);
+
+ fence_excl = rcu_dereference(obj->fence_excl);
+ if (fence_excl && !dma_fence_get_rcu(fence_excl))
+ goto unlock;
fobj = rcu_dereference(obj->fence);
if (fobj) {
- struct fence **nshared;
+ struct dma_fence **nshared;
size_t sz = sizeof(*shared) * fobj->shared_max;
nshared = krealloc(shared, sz,
@@ -309,52 +315,37 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
}
ret = -ENOMEM;
- shared_count = 0;
break;
}
shared = nshared;
- memcpy(shared, fobj->shared, sz);
shared_count = fobj->shared_count;
- } else
- shared_count = 0;
- fence_excl = rcu_dereference(obj->fence_excl);
-
- retry = read_seqcount_retry(&obj->seq, seq);
- if (retry)
- goto unlock;
-
- if (!fence_excl || fence_get_rcu(fence_excl)) {
- unsigned i;
for (i = 0; i < shared_count; ++i) {
- if (fence_get_rcu(shared[i]))
- continue;
-
- /* uh oh, refcount failed, abort and retry */
- while (i--)
- fence_put(shared[i]);
-
- if (fence_excl) {
- fence_put(fence_excl);
- fence_excl = NULL;
- }
-
- retry = 1;
- break;
+ shared[i] = rcu_dereference(fobj->shared[i]);
+ if (!dma_fence_get_rcu(shared[i]))
+ break;
}
- } else
- retry = 1;
+ }
+ if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
+ while (i--)
+ dma_fence_put(shared[i]);
+ dma_fence_put(fence_excl);
+ goto unlock;
+ }
+
+ ret = 0;
unlock:
rcu_read_unlock();
- }
- *pshared_count = shared_count;
- if (shared_count)
- *pshared = shared;
- else {
- *pshared = NULL;
+ } while (ret);
+
+ if (!shared_count) {
kfree(shared);
+ shared = NULL;
}
+
+ *pshared_count = shared_count;
+ *pshared = shared;
*pfence_excl = fence_excl;
return ret;
@@ -377,7 +368,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
bool wait_all, bool intr,
unsigned long timeout)
{
- struct fence *fence;
+ struct dma_fence *fence;
unsigned seq, shared_count, i = 0;
long ret = timeout;
@@ -397,20 +388,18 @@ retry:
if (fobj)
shared_count = fobj->shared_count;
- if (read_seqcount_retry(&obj->seq, seq))
- goto unlock_retry;
-
for (i = 0; i < shared_count; ++i) {
- struct fence *lfence = rcu_dereference(fobj->shared[i]);
+ struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &lfence->flags))
continue;
- if (!fence_get_rcu(lfence))
+ if (!dma_fence_get_rcu(lfence))
goto unlock_retry;
- if (fence_is_signaled(lfence)) {
- fence_put(lfence);
+ if (dma_fence_is_signaled(lfence)) {
+ dma_fence_put(lfence);
continue;
}
@@ -420,18 +409,16 @@ retry:
}
if (!shared_count) {
- struct fence *fence_excl = rcu_dereference(obj->fence_excl);
-
- if (read_seqcount_retry(&obj->seq, seq))
- goto unlock_retry;
+ struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl &&
- !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
- if (!fence_get_rcu(fence_excl))
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &fence_excl->flags)) {
+ if (!dma_fence_get_rcu(fence_excl))
goto unlock_retry;
- if (fence_is_signaled(fence_excl))
- fence_put(fence_excl);
+ if (dma_fence_is_signaled(fence_excl))
+ dma_fence_put(fence_excl);
else
fence = fence_excl;
}
@@ -439,8 +426,13 @@ retry:
rcu_read_unlock();
if (fence) {
- ret = fence_wait_timeout(fence, intr, ret);
- fence_put(fence);
+ if (read_seqcount_retry(&obj->seq, seq)) {
+ dma_fence_put(fence);
+ goto retry;
+ }
+
+ ret = dma_fence_wait_timeout(fence, intr, ret);
+ dma_fence_put(fence);
if (ret > 0 && wait_all && (i + 1 < shared_count))
goto retry;
}
@@ -454,18 +446,18 @@ EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
static inline int
-reservation_object_test_signaled_single(struct fence *passed_fence)
+reservation_object_test_signaled_single(struct dma_fence *passed_fence)
{
- struct fence *fence, *lfence = passed_fence;
+ struct dma_fence *fence, *lfence = passed_fence;
int ret = 1;
- if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
- fence = fence_get_rcu(lfence);
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
+ fence = dma_fence_get_rcu(lfence);
if (!fence)
return -1;
- ret = !!fence_is_signaled(fence);
- fence_put(fence);
+ ret = !!dma_fence_is_signaled(fence);
+ dma_fence_put(fence);
}
return ret;
}
@@ -484,12 +476,13 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
bool test_all)
{
unsigned seq, shared_count;
- int ret = true;
+ int ret;
+ rcu_read_lock();
retry:
+ ret = true;
shared_count = 0;
seq = read_seqcount_begin(&obj->seq);
- rcu_read_lock();
if (test_all) {
unsigned i;
@@ -500,46 +493,35 @@ retry:
if (fobj)
shared_count = fobj->shared_count;
- if (read_seqcount_retry(&obj->seq, seq))
- goto unlock_retry;
-
for (i = 0; i < shared_count; ++i) {
- struct fence *fence = rcu_dereference(fobj->shared[i]);
+ struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
ret = reservation_object_test_signaled_single(fence);
if (ret < 0)
- goto unlock_retry;
+ goto retry;
else if (!ret)
break;
}
- /*
- * There could be a read_seqcount_retry here, but nothing cares
- * about whether it's the old or newer fence pointers that are
- * signaled. That race could still have happened after checking
- * read_seqcount_retry. If you care, use ww_mutex_lock.
- */
+ if (read_seqcount_retry(&obj->seq, seq))
+ goto retry;
}
if (!shared_count) {
- struct fence *fence_excl = rcu_dereference(obj->fence_excl);
-
- if (read_seqcount_retry(&obj->seq, seq))
- goto unlock_retry;
+ struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl) {
ret = reservation_object_test_signaled_single(
fence_excl);
if (ret < 0)
- goto unlock_retry;
+ goto retry;
+
+ if (read_seqcount_retry(&obj->seq, seq))
+ goto retry;
}
}
rcu_read_unlock();
return ret;
-
-unlock_retry:
- rcu_read_unlock();
- goto retry;
}
EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
diff --git a/drivers/dma-buf/seqno-fence.c b/drivers/dma-buf/seqno-fence.c
index 71127f8f1626..f47112a64763 100644
--- a/drivers/dma-buf/seqno-fence.c
+++ b/drivers/dma-buf/seqno-fence.c
@@ -21,35 +21,35 @@
#include <linux/export.h>
#include <linux/seqno-fence.h>
-static const char *seqno_fence_get_driver_name(struct fence *fence)
+static const char *seqno_fence_get_driver_name(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->get_driver_name(fence);
}
-static const char *seqno_fence_get_timeline_name(struct fence *fence)
+static const char *seqno_fence_get_timeline_name(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->get_timeline_name(fence);
}
-static bool seqno_enable_signaling(struct fence *fence)
+static bool seqno_enable_signaling(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->enable_signaling(fence);
}
-static bool seqno_signaled(struct fence *fence)
+static bool seqno_signaled(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence);
}
-static void seqno_release(struct fence *fence)
+static void seqno_release(struct dma_fence *fence)
{
struct seqno_fence *f = to_seqno_fence(fence);
@@ -57,18 +57,18 @@ static void seqno_release(struct fence *fence)
if (f->ops->release)
f->ops->release(fence);
else
- fence_free(&f->base);
+ dma_fence_free(&f->base);
}
-static signed long seqno_wait(struct fence *fence, bool intr,
- signed long timeout)
+static signed long seqno_wait(struct dma_fence *fence, bool intr,
+ signed long timeout)
{
struct seqno_fence *f = to_seqno_fence(fence);
return f->ops->wait(fence, intr, timeout);
}
-const struct fence_ops seqno_fence_ops = {
+const struct dma_fence_ops seqno_fence_ops = {
.get_driver_name = seqno_fence_get_driver_name,
.get_timeline_name = seqno_fence_get_timeline_name,
.enable_signaling = seqno_enable_signaling,
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 62e8e6dc7953..82e0ca4dd0c1 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -68,9 +68,9 @@ struct sw_sync_create_fence_data {
#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
-static const struct fence_ops timeline_fence_ops;
+static const struct dma_fence_ops timeline_fence_ops;
-static inline struct sync_pt *fence_to_sync_pt(struct fence *fence)
+static inline struct sync_pt *dma_fence_to_sync_pt(struct dma_fence *fence)
{
if (fence->ops != &timeline_fence_ops)
return NULL;
@@ -93,7 +93,7 @@ struct sync_timeline *sync_timeline_create(const char *name)
return NULL;
kref_init(&obj->kref);
- obj->context = fence_context_alloc(1);
+ obj->context = dma_fence_context_alloc(1);
strlcpy(obj->name, name, sizeof(obj->name));
INIT_LIST_HEAD(&obj->child_list_head);
@@ -146,7 +146,7 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
list_for_each_entry_safe(pt, next, &obj->active_list_head,
active_list) {
- if (fence_is_signaled_locked(&pt->base))
+ if (dma_fence_is_signaled_locked(&pt->base))
list_del_init(&pt->active_list);
}
@@ -179,30 +179,30 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
spin_lock_irqsave(&obj->child_list_lock, flags);
sync_timeline_get(obj);
- fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
- obj->context, value);
+ dma_fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
+ obj->context, value);
list_add_tail(&pt->child_list, &obj->child_list_head);
INIT_LIST_HEAD(&pt->active_list);
spin_unlock_irqrestore(&obj->child_list_lock, flags);
return pt;
}
-static const char *timeline_fence_get_driver_name(struct fence *fence)
+static const char *timeline_fence_get_driver_name(struct dma_fence *fence)
{
return "sw_sync";
}
-static const char *timeline_fence_get_timeline_name(struct fence *fence)
+static const char *timeline_fence_get_timeline_name(struct dma_fence *fence)
{
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
return parent->name;
}
-static void timeline_fence_release(struct fence *fence)
+static void timeline_fence_release(struct dma_fence *fence)
{
- struct sync_pt *pt = fence_to_sync_pt(fence);
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
unsigned long flags;
spin_lock_irqsave(fence->lock, flags);
@@ -212,20 +212,20 @@ static void timeline_fence_release(struct fence *fence)
spin_unlock_irqrestore(fence->lock, flags);
sync_timeline_put(parent);
- fence_free(fence);
+ dma_fence_free(fence);
}
-static bool timeline_fence_signaled(struct fence *fence)
+static bool timeline_fence_signaled(struct dma_fence *fence)
{
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
return (fence->seqno > parent->value) ? false : true;
}
-static bool timeline_fence_enable_signaling(struct fence *fence)
+static bool timeline_fence_enable_signaling(struct dma_fence *fence)
{
- struct sync_pt *pt = fence_to_sync_pt(fence);
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
if (timeline_fence_signaled(fence))
return false;
@@ -234,26 +234,26 @@ static bool timeline_fence_enable_signaling(struct fence *fence)
return true;
}
-static void timeline_fence_value_str(struct fence *fence,
+static void timeline_fence_value_str(struct dma_fence *fence,
char *str, int size)
{
snprintf(str, size, "%d", fence->seqno);
}
-static void timeline_fence_timeline_value_str(struct fence *fence,
+static void timeline_fence_timeline_value_str(struct dma_fence *fence,
char *str, int size)
{
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
snprintf(str, size, "%d", parent->value);
}
-static const struct fence_ops timeline_fence_ops = {
+static const struct dma_fence_ops timeline_fence_ops = {
.get_driver_name = timeline_fence_get_driver_name,
.get_timeline_name = timeline_fence_get_timeline_name,
.enable_signaling = timeline_fence_enable_signaling,
.signaled = timeline_fence_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = timeline_fence_release,
.fence_value_str = timeline_fence_value_str,
.timeline_value_str = timeline_fence_timeline_value_str,
@@ -317,7 +317,7 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
sync_file = sync_file_create(&pt->base);
if (!sync_file) {
- fence_put(&pt->base);
+ dma_fence_put(&pt->base);
err = -ENOMEM;
goto err;
}
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 2dd4c3db6caa..48b20e34fb6d 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -71,12 +71,13 @@ static const char *sync_status_str(int status)
return "error";
}
-static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
+static void sync_print_fence(struct seq_file *s,
+ struct dma_fence *fence, bool show)
{
int status = 1;
- struct sync_timeline *parent = fence_parent(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
- if (fence_is_signaled_locked(fence))
+ if (dma_fence_is_signaled_locked(fence))
status = fence->status;
seq_printf(s, " %s%sfence %s",
@@ -135,10 +136,10 @@ static void sync_print_sync_file(struct seq_file *s,
int i;
seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
- sync_status_str(!fence_is_signaled(sync_file->fence)));
+ sync_status_str(!dma_fence_is_signaled(sync_file->fence)));
- if (fence_is_array(sync_file->fence)) {
- struct fence_array *array = to_fence_array(sync_file->fence);
+ if (dma_fence_is_array(sync_file->fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
for (i = 0; i < array->num_fences; ++i)
sync_print_fence(s, array->fences[i], true);
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index d269aa6783aa..26fe8b9907b3 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -15,7 +15,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/sync_file.h>
#include <uapi/linux/sync_file.h>
@@ -45,10 +45,9 @@ struct sync_timeline {
struct list_head sync_timeline_list;
};
-static inline struct sync_timeline *fence_parent(struct fence *fence)
+static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence)
{
- return container_of(fence->lock, struct sync_timeline,
- child_list_lock);
+ return container_of(fence->lock, struct sync_timeline, child_list_lock);
}
/**
@@ -58,7 +57,7 @@ static inline struct sync_timeline *fence_parent(struct fence *fence)
* @active_list: sync timeline active child's list
*/
struct sync_pt {
- struct fence base;
+ struct dma_fence base;
struct list_head child_list;
struct list_head active_list;
};
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index b29a9e817320..69d8ef98d34c 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -54,7 +54,7 @@ err:
return NULL;
}
-static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
+static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct sync_file *sync_file;
@@ -71,7 +71,7 @@ static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
* takes ownership of @fence. The sync_file can be released with
* fput(sync_file->file). Returns the sync_file or NULL in case of error.
*/
-struct sync_file *sync_file_create(struct fence *fence)
+struct sync_file *sync_file_create(struct dma_fence *fence)
{
struct sync_file *sync_file;
@@ -79,7 +79,7 @@ struct sync_file *sync_file_create(struct fence *fence)
if (!sync_file)
return NULL;
- sync_file->fence = fence;
+ sync_file->fence = dma_fence_get(fence);
snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
fence->ops->get_driver_name(fence),
@@ -121,16 +121,16 @@ err:
* Ensures @fd references a valid sync_file and returns a fence that
* represents all fence in the sync_file. On error NULL is returned.
*/
-struct fence *sync_file_get_fence(int fd)
+struct dma_fence *sync_file_get_fence(int fd)
{
struct sync_file *sync_file;
- struct fence *fence;
+ struct dma_fence *fence;
sync_file = sync_file_fdget(fd);
if (!sync_file)
return NULL;
- fence = fence_get(sync_file->fence);
+ fence = dma_fence_get(sync_file->fence);
fput(sync_file->file);
return fence;
@@ -138,22 +138,23 @@ struct fence *sync_file_get_fence(int fd)
EXPORT_SYMBOL(sync_file_get_fence);
static int sync_file_set_fence(struct sync_file *sync_file,
- struct fence **fences, int num_fences)
+ struct dma_fence **fences, int num_fences)
{
- struct fence_array *array;
+ struct dma_fence_array *array;
/*
* The reference for the fences in the new sync_file and held
* in add_fence() during the merge procedure, so for num_fences == 1
* we already own a new reference to the fence. For num_fence > 1
- * we own the reference of the fence_array creation.
+ * we own the reference of the dma_fence_array creation.
*/
if (num_fences == 1) {
sync_file->fence = fences[0];
kfree(fences);
} else {
- array = fence_array_create(num_fences, fences,
- fence_context_alloc(1), 1, false);
+ array = dma_fence_array_create(num_fences, fences,
+ dma_fence_context_alloc(1),
+ 1, false);
if (!array)
return -ENOMEM;
@@ -163,10 +164,11 @@ static int sync_file_set_fence(struct sync_file *sync_file,
return 0;
}
-static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
+static struct dma_fence **get_fences(struct sync_file *sync_file,
+ int *num_fences)
{
- if (fence_is_array(sync_file->fence)) {
- struct fence_array *array = to_fence_array(sync_file->fence);
+ if (dma_fence_is_array(sync_file->fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
*num_fences = array->num_fences;
return array->fences;
@@ -176,12 +178,13 @@ static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
return &sync_file->fence;
}
-static void add_fence(struct fence **fences, int *i, struct fence *fence)
+static void add_fence(struct dma_fence **fences,
+ int *i, struct dma_fence *fence)
{
fences[*i] = fence;
- if (!fence_is_signaled(fence)) {
- fence_get(fence);
+ if (!dma_fence_is_signaled(fence)) {
+ dma_fence_get(fence);
(*i)++;
}
}
@@ -200,7 +203,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
struct sync_file *b)
{
struct sync_file *sync_file;
- struct fence **fences, **nfences, **a_fences, **b_fences;
+ struct dma_fence **fences, **nfences, **a_fences, **b_fences;
int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
sync_file = sync_file_alloc();
@@ -226,8 +229,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
* and sync_file_create, this is a reasonable assumption.
*/
for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
- struct fence *pt_a = a_fences[i_a];
- struct fence *pt_b = b_fences[i_b];
+ struct dma_fence *pt_a = a_fences[i_a];
+ struct dma_fence *pt_b = b_fences[i_b];
if (pt_a->context < pt_b->context) {
add_fence(fences, &i, pt_a);
@@ -255,7 +258,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
add_fence(fences, &i, b_fences[i_b]);
if (i == 0)
- fences[i++] = fence_get(a_fences[0]);
+ fences[i++] = dma_fence_get(a_fences[0]);
if (num_fences > i) {
nfences = krealloc(fences, i * sizeof(*fences),
@@ -286,8 +289,8 @@ static void sync_file_free(struct kref *kref)
kref);
if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
- fence_remove_callback(sync_file->fence, &sync_file->cb);
- fence_put(sync_file->fence);
+ dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
+ dma_fence_put(sync_file->fence);
kfree(sync_file);
}
@@ -307,12 +310,12 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
if (!poll_does_not_wait(wait) &&
!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
- if (fence_add_callback(sync_file->fence, &sync_file->cb,
- fence_check_cb_func) < 0)
+ if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
+ fence_check_cb_func) < 0)
wake_up_all(&sync_file->wq);
}
- return fence_is_signaled(sync_file->fence) ? POLLIN : 0;
+ return dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0;
}
static long sync_file_ioctl_merge(struct sync_file *sync_file,
@@ -370,14 +373,14 @@ err_put_fd:
return err;
}
-static void sync_fill_fence_info(struct fence *fence,
+static void sync_fill_fence_info(struct dma_fence *fence,
struct sync_fence_info *info)
{
strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
sizeof(info->obj_name));
strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
sizeof(info->driver_name));
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
info->status = fence->status >= 0 ? 1 : fence->status;
else
info->status = 0;
@@ -389,7 +392,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
{
struct sync_file_info info;
struct sync_fence_info *fence_info = NULL;
- struct fence **fences;
+ struct dma_fence **fences;
__u32 size;
int num_fences, ret, i;
@@ -429,7 +432,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
no_fences:
strlcpy(info.name, sync_file->name, sizeof(info.name));
- info.status = fence_is_signaled(sync_file->fence);
+ info.status = dma_fence_is_signaled(sync_file->fence);
info.num_fences = num_fences;
if (copy_to_user((void __user *)arg, &info, sizeof(info)))
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 25c720454017..74579d2e796e 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -9,7 +9,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
- drm_info.o drm_debugfs.o drm_encoder_slave.o \
+ drm_info.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
drm_modeset_lock.o drm_atomic.o drm_bridge.o \
@@ -23,6 +23,7 @@ drm-$(CONFIG_PCI) += ati_pcigart.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm-$(CONFIG_AGP) += drm_agpsupport.o
+drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 248a05d02917..41bd2bf28f4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -24,7 +24,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
- amdgpu_gtt_mgr.o
+ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index b8d66670bb17..06192698bd96 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -90,7 +90,6 @@
#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25
#define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27
-#define ENCODER_OBJECT_ID_VIRTUAL 0x28
#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
@@ -120,7 +119,6 @@
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
-#define CONNECTOR_OBJECT_ID_VIRTUAL 0x17
/* deleted */
@@ -149,7 +147,6 @@
#define GRAPH_OBJECT_ENUM_ID5 0x05
#define GRAPH_OBJECT_ENUM_ID6 0x06
#define GRAPH_OBJECT_ENUM_ID7 0x07
-#define GRAPH_OBJECT_ENUM_VIRTUAL 0x08
/****************************************************/
/* Graphics Object ID Bit definition */
@@ -411,10 +408,6 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
-#define ENCODER_VIRTUAL_ENUM_VIRTUAL ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT)
-
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
/****************************************************/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 039b57e4644c..2ec7b3baeec2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -34,7 +34,7 @@
#include <linux/kref.h>
#include <linux/interval_tree.h>
#include <linux/hashtable.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
@@ -53,7 +53,11 @@
#include "amdgpu_ucode.h"
#include "amdgpu_ttm.h"
#include "amdgpu_gds.h"
+#include "amdgpu_sync.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_vm.h"
#include "amd_powerplay.h"
+#include "amdgpu_dpm.h"
#include "amdgpu_acp.h"
#include "gpu_scheduler.h"
@@ -97,6 +101,7 @@ extern char *amdgpu_disable_cu;
extern int amdgpu_sclk_deep_sleep_en;
extern char *amdgpu_virtual_display;
extern unsigned amdgpu_pp_feature_mask;
+extern int amdgpu_vram_page_split;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -107,12 +112,6 @@ extern unsigned amdgpu_pp_feature_mask;
#define AMDGPUFB_CONN_LIMIT 4
#define AMDGPU_BIOS_NUM_SCRATCH 8
-/* max number of rings */
-#define AMDGPU_MAX_RINGS 16
-#define AMDGPU_MAX_GFX_RINGS 1
-#define AMDGPU_MAX_COMPUTE_RINGS 8
-#define AMDGPU_MAX_VCE_RINGS 3
-
/* max number of IP instances */
#define AMDGPU_MAX_SDMA_INSTANCES 2
@@ -152,8 +151,6 @@ extern unsigned amdgpu_pp_feature_mask;
struct amdgpu_device;
struct amdgpu_ib;
-struct amdgpu_vm;
-struct amdgpu_ring;
struct amdgpu_cs_parser;
struct amdgpu_job;
struct amdgpu_irq_src;
@@ -198,21 +195,38 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
bool amdgpu_is_idle(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
+#define AMDGPU_MAX_IP_NUM 16
+
+struct amdgpu_ip_block_status {
+ bool valid;
+ bool sw;
+ bool hw;
+ bool late_initialized;
+ bool hang;
+};
+
struct amdgpu_ip_block_version {
- enum amd_ip_block_type type;
- u32 major;
- u32 minor;
- u32 rev;
+ const enum amd_ip_block_type type;
+ const u32 major;
+ const u32 minor;
+ const u32 rev;
const struct amd_ip_funcs *funcs;
};
+struct amdgpu_ip_block {
+ struct amdgpu_ip_block_status status;
+ const struct amdgpu_ip_block_version *version;
+};
+
int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
enum amd_ip_block_type type,
u32 major, u32 minor);
-const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
- struct amdgpu_device *adev,
- enum amd_ip_block_type type);
+struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type);
+
+int amdgpu_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version);
/* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
struct amdgpu_buffer_funcs {
@@ -286,47 +300,6 @@ struct amdgpu_ih_funcs {
void (*set_rptr)(struct amdgpu_device *adev);
};
-/* provided by hw blocks that expose a ring buffer for commands */
-struct amdgpu_ring_funcs {
- /* ring read/write ptr handling */
- u32 (*get_rptr)(struct amdgpu_ring *ring);
- u32 (*get_wptr)(struct amdgpu_ring *ring);
- void (*set_wptr)(struct amdgpu_ring *ring);
- /* validating and patching of IBs */
- int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
- /* command emit functions */
- void (*emit_ib)(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch);
- void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
- uint64_t seq, unsigned flags);
- void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
- void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
- uint64_t pd_addr);
- void (*emit_hdp_flush)(struct amdgpu_ring *ring);
- void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
- void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
- uint32_t gds_base, uint32_t gds_size,
- uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size);
- /* testing functions */
- int (*test_ring)(struct amdgpu_ring *ring);
- int (*test_ib)(struct amdgpu_ring *ring, long timeout);
- /* insert NOP packets */
- void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
- /* pad the indirect buffer to the necessary number of dw */
- void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
- unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
- void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
- /* note usage for clock and power gating */
- void (*begin_use)(struct amdgpu_ring *ring);
- void (*end_use)(struct amdgpu_ring *ring);
- void (*emit_switch_buffer) (struct amdgpu_ring *ring);
- void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
- unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring);
- unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring);
-};
-
/*
* BIOS.
*/
@@ -364,47 +337,6 @@ struct amdgpu_clock {
};
/*
- * Fences.
- */
-struct amdgpu_fence_driver {
- uint64_t gpu_addr;
- volatile uint32_t *cpu_addr;
- /* sync_seq is protected by ring emission lock */
- uint32_t sync_seq;
- atomic_t last_seq;
- bool initialized;
- struct amdgpu_irq_src *irq_src;
- unsigned irq_type;
- struct timer_list fallback_timer;
- unsigned num_fences_mask;
- spinlock_t lock;
- struct fence **fences;
-};
-
-/* some special values for the owner field */
-#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
-#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
-
-#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
-#define AMDGPU_FENCE_FLAG_INT (1 << 1)
-
-int amdgpu_fence_driver_init(struct amdgpu_device *adev);
-void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
-void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
-
-int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
- unsigned num_hw_submission);
-int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
- struct amdgpu_irq_src *irq_src,
- unsigned irq_type);
-void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
-void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
-void amdgpu_fence_process(struct amdgpu_ring *ring);
-int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
-unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
-
-/*
* BO.
*/
struct amdgpu_bo_list_entry {
@@ -427,7 +359,7 @@ struct amdgpu_bo_va_mapping {
struct amdgpu_bo_va {
/* protected by bo being reserved */
struct list_head bo_list;
- struct fence *last_pt_update;
+ struct dma_fence *last_pt_update;
unsigned ref_count;
/* protected by vm mutex and spinlock */
@@ -464,7 +396,6 @@ struct amdgpu_bo {
*/
struct list_head va;
/* Constant after initialization */
- struct amdgpu_device *adev;
struct drm_gem_object gem_base;
struct amdgpu_bo *parent;
struct amdgpu_bo *shadow;
@@ -543,7 +474,7 @@ struct amdgpu_sa_bo {
struct amdgpu_sa_manager *manager;
unsigned soffset;
unsigned eoffset;
- struct fence *fence;
+ struct dma_fence *fence;
};
/*
@@ -561,27 +492,6 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
int amdgpu_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
-/*
- * Synchronization
- */
-struct amdgpu_sync {
- DECLARE_HASHTABLE(fences, 4);
- struct fence *last_vm_update;
-};
-
-void amdgpu_sync_create(struct amdgpu_sync *sync);
-int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct fence *f);
-int amdgpu_sync_resv(struct amdgpu_device *adev,
- struct amdgpu_sync *sync,
- struct reservation_object *resv,
- void *owner);
-struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
- struct amdgpu_ring *ring);
-struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
-void amdgpu_sync_free(struct amdgpu_sync *sync);
-int amdgpu_sync_init(void);
-void amdgpu_sync_fini(void);
int amdgpu_fence_slab_init(void);
void amdgpu_fence_slab_fini(void);
@@ -703,10 +613,10 @@ struct amdgpu_flip_work {
uint64_t base;
struct drm_pending_vblank_event *event;
struct amdgpu_bo *old_abo;
- struct fence *excl;
+ struct dma_fence *excl;
unsigned shared_count;
- struct fence **shared;
- struct fence_cb cb;
+ struct dma_fence **shared;
+ struct dma_fence_cb cb;
bool async;
};
@@ -723,14 +633,6 @@ struct amdgpu_ib {
uint32_t flags;
};
-enum amdgpu_ring_type {
- AMDGPU_RING_TYPE_GFX,
- AMDGPU_RING_TYPE_COMPUTE,
- AMDGPU_RING_TYPE_SDMA,
- AMDGPU_RING_TYPE_UVD,
- AMDGPU_RING_TYPE_VCE
-};
-
extern const struct amd_sched_backend_ops amdgpu_sched_ops;
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -742,214 +644,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
- struct fence **f);
-
-struct amdgpu_ring {
- struct amdgpu_device *adev;
- const struct amdgpu_ring_funcs *funcs;
- struct amdgpu_fence_driver fence_drv;
- struct amd_gpu_scheduler sched;
-
- struct amdgpu_bo *ring_obj;
- volatile uint32_t *ring;
- unsigned rptr_offs;
- unsigned wptr;
- unsigned wptr_old;
- unsigned ring_size;
- unsigned max_dw;
- int count_dw;
- uint64_t gpu_addr;
- uint32_t align_mask;
- uint32_t ptr_mask;
- bool ready;
- u32 nop;
- u32 idx;
- u32 me;
- u32 pipe;
- u32 queue;
- struct amdgpu_bo *mqd_obj;
- u32 doorbell_index;
- bool use_doorbell;
- unsigned wptr_offs;
- unsigned fence_offs;
- uint64_t current_ctx;
- enum amdgpu_ring_type type;
- char name[16];
- unsigned cond_exe_offs;
- u64 cond_exe_gpu_addr;
- volatile u32 *cond_exe_cpu_addr;
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *ent;
-#endif
-};
-
-/*
- * VM
- */
-
-/* maximum number of VMIDs */
-#define AMDGPU_NUM_VM 16
-
-/* Maximum number of PTEs the hardware can write with one command */
-#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
-
-/* number of entries in page table */
-#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
-
-/* PTBs (Page Table Blocks) need to be aligned to 32K */
-#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
-
-/* LOG2 number of continuous pages for the fragment field */
-#define AMDGPU_LOG2_PAGES_PER_FRAG 4
-
-#define AMDGPU_PTE_VALID (1 << 0)
-#define AMDGPU_PTE_SYSTEM (1 << 1)
-#define AMDGPU_PTE_SNOOPED (1 << 2)
-
-/* VI only */
-#define AMDGPU_PTE_EXECUTABLE (1 << 4)
-
-#define AMDGPU_PTE_READABLE (1 << 5)
-#define AMDGPU_PTE_WRITEABLE (1 << 6)
-
-#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
-
-/* How to programm VM fault handling */
-#define AMDGPU_VM_FAULT_STOP_NEVER 0
-#define AMDGPU_VM_FAULT_STOP_FIRST 1
-#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
-
-struct amdgpu_vm_pt {
- struct amdgpu_bo_list_entry entry;
- uint64_t addr;
- uint64_t shadow_addr;
-};
-
-struct amdgpu_vm {
- /* tree of virtual addresses mapped */
- struct rb_root va;
-
- /* protecting invalidated */
- spinlock_t status_lock;
-
- /* BOs moved, but not yet updated in the PT */
- struct list_head invalidated;
-
- /* BOs cleared in the PT because of a move */
- struct list_head cleared;
-
- /* BO mappings freed, but not yet updated in the PT */
- struct list_head freed;
-
- /* contains the page directory */
- struct amdgpu_bo *page_directory;
- unsigned max_pde_used;
- struct fence *page_directory_fence;
- uint64_t last_eviction_counter;
-
- /* array of page tables, one for each page directory entry */
- struct amdgpu_vm_pt *page_tables;
-
- /* for id and flush management per ring */
- struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
-
- /* protecting freed */
- spinlock_t freed_lock;
-
- /* Scheduler entity for page table updates */
- struct amd_sched_entity entity;
-
- /* client id */
- u64 client_id;
-};
-
-struct amdgpu_vm_id {
- struct list_head list;
- struct fence *first;
- struct amdgpu_sync active;
- struct fence *last_flush;
- atomic64_t owner;
-
- uint64_t pd_gpu_addr;
- /* last flushed PD/PT update */
- struct fence *flushed_updates;
-
- uint32_t current_gpu_reset_count;
-
- uint32_t gds_base;
- uint32_t gds_size;
- uint32_t gws_base;
- uint32_t gws_size;
- uint32_t oa_base;
- uint32_t oa_size;
-};
-
-struct amdgpu_vm_manager {
- /* Handling of VMIDs */
- struct mutex lock;
- unsigned num_ids;
- struct list_head ids_lru;
- struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
-
- /* Handling of VM fences */
- u64 fence_context;
- unsigned seqno[AMDGPU_MAX_RINGS];
-
- uint32_t max_pfn;
- /* vram base address for page table entry */
- u64 vram_base_offset;
- /* is vm enabled? */
- bool enabled;
- /* vm pte handling */
- const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
- struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
- unsigned vm_pte_num_rings;
- atomic_t vm_pte_next_ring;
- /* client id counter */
- atomic64_t client_counter;
-};
-
-void amdgpu_vm_manager_init(struct amdgpu_device *adev);
-void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
- struct list_head *validated,
- struct amdgpu_bo_list_entry *entry);
-void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct list_head *duplicates);
-void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
-int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync, struct fence *fence,
- struct amdgpu_job *job);
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
-void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
-int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_sync *sync);
-int amdgpu_vm_bo_update(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
- bool clear);
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
- struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_bo *bo);
-int amdgpu_vm_bo_map(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
- uint64_t addr, uint64_t offset,
- uint64_t size, uint32_t flags);
-int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
- uint64_t addr);
-void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va);
+ struct dma_fence **f);
/*
* context related structures
@@ -957,7 +652,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_ctx_ring {
uint64_t sequence;
- struct fence **fences;
+ struct dma_fence **fences;
struct amd_sched_entity entity;
};
@@ -966,7 +661,7 @@ struct amdgpu_ctx {
struct amdgpu_device *adev;
unsigned reset_counter;
spinlock_t ring_lock;
- struct fence **fences;
+ struct dma_fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
bool preamble_presented;
};
@@ -982,8 +677,8 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct fence *fence);
-struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ struct dma_fence *fence);
+struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
@@ -1093,6 +788,16 @@ struct amdgpu_scratch {
/*
* GFX configurations
*/
+#define AMDGPU_GFX_MAX_SE 4
+#define AMDGPU_GFX_MAX_SH_PER_SE 2
+
+struct amdgpu_rb_config {
+ uint32_t rb_backend_disable;
+ uint32_t user_rb_backend_disable;
+ uint32_t raster_config;
+ uint32_t raster_config_1;
+};
+
struct amdgpu_gca_config {
unsigned max_shader_engines;
unsigned max_tile_pipes;
@@ -1121,6 +826,8 @@ struct amdgpu_gca_config {
uint32_t tile_mode_array[32];
uint32_t macrotile_mode_array[16];
+
+ struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
};
struct amdgpu_cu_info {
@@ -1133,6 +840,7 @@ struct amdgpu_gfx_funcs {
/* get the gpu clock counter */
uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
+ void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
};
struct amdgpu_gfx {
@@ -1181,23 +889,13 @@ struct amdgpu_gfx {
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct fence *f);
+ struct dma_fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
- struct amdgpu_ib *ib, struct fence *last_vm_update,
- struct amdgpu_job *job, struct fence **f);
+ struct amdgpu_ib *ib, struct dma_fence *last_vm_update,
+ struct amdgpu_job *job, struct dma_fence **f);
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
-int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
-void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
-void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
-void amdgpu_ring_commit(struct amdgpu_ring *ring);
-void amdgpu_ring_undo(struct amdgpu_ring *ring);
-int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned ring_size, u32 nop, u32 align_mask,
- struct amdgpu_irq_src *irq_src, unsigned irq_type,
- enum amdgpu_ring_type ring_type);
-void amdgpu_ring_fini(struct amdgpu_ring *ring);
/*
* CS.
@@ -1225,7 +923,7 @@ struct amdgpu_cs_parser {
struct amdgpu_bo_list *bo_list;
struct amdgpu_bo_list_entry vm_pd;
struct list_head validated;
- struct fence *fence;
+ struct dma_fence *fence;
uint64_t bytes_moved_threshold;
uint64_t bytes_moved;
struct amdgpu_bo_list_entry *evictable;
@@ -1245,7 +943,7 @@ struct amdgpu_job {
struct amdgpu_ring *ring;
struct amdgpu_sync sync;
struct amdgpu_ib *ibs;
- struct fence *fence; /* the hw fence */
+ struct dma_fence *fence; /* the hw fence */
uint32_t preamble_status;
uint32_t num_ibs;
void *owner;
@@ -1294,354 +992,6 @@ struct amdgpu_wb {
int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
-
-
-enum amdgpu_int_thermal_type {
- THERMAL_TYPE_NONE,
- THERMAL_TYPE_EXTERNAL,
- THERMAL_TYPE_EXTERNAL_GPIO,
- THERMAL_TYPE_RV6XX,
- THERMAL_TYPE_RV770,
- THERMAL_TYPE_ADT7473_WITH_INTERNAL,
- THERMAL_TYPE_EVERGREEN,
- THERMAL_TYPE_SUMO,
- THERMAL_TYPE_NI,
- THERMAL_TYPE_SI,
- THERMAL_TYPE_EMC2103_WITH_INTERNAL,
- THERMAL_TYPE_CI,
- THERMAL_TYPE_KV,
-};
-
-enum amdgpu_dpm_auto_throttle_src {
- AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
- AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
-};
-
-enum amdgpu_dpm_event_src {
- AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
- AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
- AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
- AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
- AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
-};
-
-#define AMDGPU_MAX_VCE_LEVELS 6
-
-enum amdgpu_vce_level {
- AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
- AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
- AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
- AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
- AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
- AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
-};
-
-struct amdgpu_ps {
- u32 caps; /* vbios flags */
- u32 class; /* vbios flags */
- u32 class2; /* vbios flags */
- /* UVD clocks */
- u32 vclk;
- u32 dclk;
- /* VCE clocks */
- u32 evclk;
- u32 ecclk;
- bool vce_active;
- enum amdgpu_vce_level vce_level;
- /* asic priv */
- void *ps_priv;
-};
-
-struct amdgpu_dpm_thermal {
- /* thermal interrupt work */
- struct work_struct work;
- /* low temperature threshold */
- int min_temp;
- /* high temperature threshold */
- int max_temp;
- /* was last interrupt low to high or high to low */
- bool high_to_low;
- /* interrupt source */
- struct amdgpu_irq_src irq;
-};
-
-enum amdgpu_clk_action
-{
- AMDGPU_SCLK_UP = 1,
- AMDGPU_SCLK_DOWN
-};
-
-struct amdgpu_blacklist_clocks
-{
- u32 sclk;
- u32 mclk;
- enum amdgpu_clk_action action;
-};
-
-struct amdgpu_clock_and_voltage_limits {
- u32 sclk;
- u32 mclk;
- u16 vddc;
- u16 vddci;
-};
-
-struct amdgpu_clock_array {
- u32 count;
- u32 *values;
-};
-
-struct amdgpu_clock_voltage_dependency_entry {
- u32 clk;
- u16 v;
-};
-
-struct amdgpu_clock_voltage_dependency_table {
- u32 count;
- struct amdgpu_clock_voltage_dependency_entry *entries;
-};
-
-union amdgpu_cac_leakage_entry {
- struct {
- u16 vddc;
- u32 leakage;
- };
- struct {
- u16 vddc1;
- u16 vddc2;
- u16 vddc3;
- };
-};
-
-struct amdgpu_cac_leakage_table {
- u32 count;
- union amdgpu_cac_leakage_entry *entries;
-};
-
-struct amdgpu_phase_shedding_limits_entry {
- u16 voltage;
- u32 sclk;
- u32 mclk;
-};
-
-struct amdgpu_phase_shedding_limits_table {
- u32 count;
- struct amdgpu_phase_shedding_limits_entry *entries;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_entry {
- u32 vclk;
- u32 dclk;
- u16 v;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_table {
- u8 count;
- struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_entry {
- u32 ecclk;
- u32 evclk;
- u16 v;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_table {
- u8 count;
- struct amdgpu_vce_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_ppm_table {
- u8 ppm_design;
- u16 cpu_core_number;
- u32 platform_tdp;
- u32 small_ac_platform_tdp;
- u32 platform_tdc;
- u32 small_ac_platform_tdc;
- u32 apu_tdp;
- u32 dgpu_tdp;
- u32 dgpu_ulv_power;
- u32 tj_max;
-};
-
-struct amdgpu_cac_tdp_table {
- u16 tdp;
- u16 configurable_tdp;
- u16 tdc;
- u16 battery_power_limit;
- u16 small_power_limit;
- u16 low_cac_leakage;
- u16 high_cac_leakage;
- u16 maximum_power_delivery_limit;
-};
-
-struct amdgpu_dpm_dynamic_state {
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
- struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
- struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
- struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
- struct amdgpu_clock_array valid_sclk_values;
- struct amdgpu_clock_array valid_mclk_values;
- struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
- struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
- u32 mclk_sclk_ratio;
- u32 sclk_mclk_delta;
- u16 vddc_vddci_delta;
- u16 min_vddc_for_pcie_gen2;
- struct amdgpu_cac_leakage_table cac_leakage_table;
- struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
- struct amdgpu_ppm_table *ppm_table;
- struct amdgpu_cac_tdp_table *cac_tdp_table;
-};
-
-struct amdgpu_dpm_fan {
- u16 t_min;
- u16 t_med;
- u16 t_high;
- u16 pwm_min;
- u16 pwm_med;
- u16 pwm_high;
- u8 t_hyst;
- u32 cycle_delay;
- u16 t_max;
- u8 control_mode;
- u16 default_max_fan_pwm;
- u16 default_fan_output_sensitivity;
- u16 fan_output_sensitivity;
- bool ucode_fan_control;
-};
-
-enum amdgpu_pcie_gen {
- AMDGPU_PCIE_GEN1 = 0,
- AMDGPU_PCIE_GEN2 = 1,
- AMDGPU_PCIE_GEN3 = 2,
- AMDGPU_PCIE_GEN_INVALID = 0xffff
-};
-
-enum amdgpu_dpm_forced_level {
- AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
- AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
- AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
- AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
-};
-
-struct amdgpu_vce_state {
- /* vce clocks */
- u32 evclk;
- u32 ecclk;
- /* gpu clocks */
- u32 sclk;
- u32 mclk;
- u8 clk_idx;
- u8 pstate;
-};
-
-struct amdgpu_dpm_funcs {
- int (*get_temperature)(struct amdgpu_device *adev);
- int (*pre_set_power_state)(struct amdgpu_device *adev);
- int (*set_power_state)(struct amdgpu_device *adev);
- void (*post_set_power_state)(struct amdgpu_device *adev);
- void (*display_configuration_changed)(struct amdgpu_device *adev);
- u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
- u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
- void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
- void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
- int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
- bool (*vblank_too_short)(struct amdgpu_device *adev);
- void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
- void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
- void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
- void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
- u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
- int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
- int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
- int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
- int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
- int (*get_sclk_od)(struct amdgpu_device *adev);
- int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
- int (*get_mclk_od)(struct amdgpu_device *adev);
- int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
-};
-
-struct amdgpu_dpm {
- struct amdgpu_ps *ps;
- /* number of valid power states */
- int num_ps;
- /* current power state that is active */
- struct amdgpu_ps *current_ps;
- /* requested power state */
- struct amdgpu_ps *requested_ps;
- /* boot up power state */
- struct amdgpu_ps *boot_ps;
- /* default uvd power state */
- struct amdgpu_ps *uvd_ps;
- /* vce requirements */
- struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
- enum amdgpu_vce_level vce_level;
- enum amd_pm_state_type state;
- enum amd_pm_state_type user_state;
- u32 platform_caps;
- u32 voltage_response_time;
- u32 backbias_response_time;
- void *priv;
- u32 new_active_crtcs;
- int new_active_crtc_count;
- u32 current_active_crtcs;
- int current_active_crtc_count;
- struct amdgpu_dpm_dynamic_state dyn_state;
- struct amdgpu_dpm_fan fan;
- u32 tdp_limit;
- u32 near_tdp_limit;
- u32 near_tdp_limit_adjusted;
- u32 sq_ramping_threshold;
- u32 cac_leakage;
- u16 tdp_od_limit;
- u32 tdp_adjustment;
- u16 load_line_slope;
- bool power_control;
- bool ac_power;
- /* special states active */
- bool thermal_active;
- bool uvd_active;
- bool vce_active;
- /* thermal handling */
- struct amdgpu_dpm_thermal thermal;
- /* forced levels */
- enum amdgpu_dpm_forced_level forced_level;
-};
-
-struct amdgpu_pm {
- struct mutex mutex;
- u32 current_sclk;
- u32 current_mclk;
- u32 default_sclk;
- u32 default_mclk;
- struct amdgpu_i2c_chan *i2c_bus;
- /* internal thermal controller on rv6xx+ */
- enum amdgpu_int_thermal_type int_thermal_type;
- struct device *int_hwmon_dev;
- /* fan control parameters */
- bool no_fan;
- u8 fan_pulses_per_revolution;
- u8 fan_min_rpm;
- u8 fan_max_rpm;
- /* dpm */
- bool dpm_enabled;
- bool sysfs_initialized;
- struct amdgpu_dpm dpm;
- const struct firmware *fw; /* SMC firmware */
- uint32_t fw_version;
- const struct amdgpu_dpm_funcs *funcs;
- uint32_t pcie_gen_mask;
- uint32_t pcie_mlw_mask;
- struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
-};
-
void amdgpu_get_pcie_info(struct amdgpu_device *adev);
/*
@@ -1939,14 +1289,6 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
-struct amdgpu_ip_block_status {
- bool valid;
- bool sw;
- bool hw;
- bool late_initialized;
- bool hang;
-};
-
struct amdgpu_device {
struct device *dev;
struct drm_device *ddev;
@@ -2102,9 +1444,8 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
- const struct amdgpu_ip_block_version *ip_blocks;
+ struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
int num_ip_blocks;
- struct amdgpu_ip_block_status *ip_block_status;
struct mutex mn_lock;
DECLARE_HASHTABLE(mn_hash, 7);
@@ -2127,6 +1468,11 @@ struct amdgpu_device {
};
+static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
+{
+ return container_of(bdev, struct amdgpu_device, mman.bdev);
+}
+
bool amdgpu_device_is_px(struct drm_device *dev);
int amdgpu_device_init(struct amdgpu_device *adev,
struct drm_device *ddev,
@@ -2278,8 +1624,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
-#define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r))
-#define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r))
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
@@ -2301,108 +1645,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
-#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
-#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
-#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
-#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
-#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
-#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
-#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
-
-#define amdgpu_dpm_read_sensor(adev, idx, value) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
- -EINVAL)
-
-#define amdgpu_dpm_get_temperature(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_temperature((adev)))
-
-#define amdgpu_dpm_set_fan_control_mode(adev, m) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
- (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
-
-#define amdgpu_dpm_get_fan_control_mode(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_fan_control_mode((adev)))
-
-#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
-
-#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
-
-#define amdgpu_dpm_get_sclk(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_sclk((adev), (l)))
-
-#define amdgpu_dpm_get_mclk(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_mclk((adev), (l)))
-
-
-#define amdgpu_dpm_force_performance_level(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->force_performance_level((adev), (l)))
-
-#define amdgpu_dpm_powergate_uvd(adev, g) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_uvd((adev), (g)))
-
-#define amdgpu_dpm_powergate_vce(adev, g) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_vce((adev), (g)))
-
-#define amdgpu_dpm_get_current_power_state(adev) \
- (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_get_performance_level(adev) \
- (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_get_pp_num_states(adev, data) \
- (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
-
-#define amdgpu_dpm_get_pp_table(adev, table) \
- (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
-
-#define amdgpu_dpm_set_pp_table(adev, buf, size) \
- (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
-
-#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
- (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
-
-#define amdgpu_dpm_force_clock_level(adev, type, level) \
- (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
-
-#define amdgpu_dpm_get_sclk_od(adev) \
- (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_set_sclk_od(adev, value) \
- (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
-
-#define amdgpu_dpm_get_mclk_od(adev) \
- ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_mclk_od(adev, value) \
- ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
-
-#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
- (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
-
#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
/* Common functions */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 892d60fb225b..2f9f96cc9f65 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -265,14 +265,14 @@ static int acp_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- const struct amdgpu_ip_block_version *ip_version =
+ const struct amdgpu_ip_block *ip_block =
amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
- if (!ip_version)
+ if (!ip_block)
return -EINVAL;
r = amd_acp_hw_init(adev->acp.cgs_device,
- ip_version->major, ip_version->minor);
+ ip_block->version->major, ip_block->version->minor);
/* -ENODEV means board uses AZ rather than ACP */
if (r == -ENODEV)
return 0;
@@ -456,7 +456,7 @@ static int acp_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs acp_ip_funcs = {
+static const struct amd_ip_funcs acp_ip_funcs = {
.name = "acp_ip",
.early_init = acp_early_init,
.late_init = NULL,
@@ -472,3 +472,12 @@ const struct amd_ip_funcs acp_ip_funcs = {
.set_clockgating_state = acp_set_clockgating_state,
.set_powergating_state = acp_set_powergating_state,
};
+
+const struct amdgpu_ip_block_version acp_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_ACP,
+ .major = 2,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &acp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
index 8a396313c86f..a288ce25c176 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
@@ -37,6 +37,6 @@ struct amdgpu_acp {
struct acp_pm_domain *acp_genpd;
};
-extern const struct amd_ip_funcs acp_ip_funcs;
+extern const struct amdgpu_ip_block_version acp_ip_block;
#endif /* __AMDGPU_ACP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 8e6bf548d689..56a86dd5789e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1115,49 +1115,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
return 0;
}
-uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev)
-{
- GET_ENGINE_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
- return le32_to_cpu(args.ulReturnEngineClock);
-}
-
-uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev)
-{
- GET_MEMORY_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
- return le32_to_cpu(args.ulReturnMemoryClock);
-}
-
-void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
- uint32_t eng_clock)
-{
- SET_ENGINE_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
-
- args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
- uint32_t mem_clock)
-{
- SET_MEMORY_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
-
- if (adev->flags & AMD_IS_APU)
- return;
-
- args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
u32 eng_clock, u32 mem_clock)
{
@@ -1256,45 +1213,6 @@ int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *
return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
}
-void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
- u16 voltage_level,
- u8 voltage_type)
-{
- union set_voltage args;
- int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
- u8 frev, crev, volt_index = voltage_level;
-
- if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
- return;
-
- /* 0xff01 is a flag rather then an actual voltage */
- if (voltage_level == 0xff01)
- return;
-
- switch (crev) {
- case 1:
- args.v1.ucVoltageType = voltage_type;
- args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
- args.v1.ucVoltageIndex = volt_index;
- break;
- case 2:
- args.v2.ucVoltageType = voltage_type;
- args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
- args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
- break;
- case 3:
- args.v3.ucVoltageType = voltage_type;
- args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
- args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return;
- }
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
u16 *leakage_id)
{
@@ -1784,6 +1702,19 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
}
+void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
+ bool hung)
+{
+ u32 tmp = RREG32(mmBIOS_SCRATCH_3);
+
+ if (hung)
+ tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+ else
+ tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+
+ WREG32(mmBIOS_SCRATCH_3, tmp);
+}
+
/* Atom needs data in little endian format
* so swap as appropriate when copying data to
* or from atom. Note that atom operates on
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 17356151db38..70e9acef5d9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -163,16 +163,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
bool strobe_mode,
struct atom_mpll_param *mpll_param);
-uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev);
-uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev);
-void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
- uint32_t eng_clock);
-void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
- uint32_t mem_clock);
-void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
- u16 voltage_level,
- u8 voltage_type);
-
void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
u32 eng_clock, u32 mem_clock);
@@ -206,6 +196,8 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
+void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
+ bool hung);
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 345305235349..cc97eee93226 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
{
unsigned long start_jiffies;
unsigned long end_jiffies;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
int i, r;
start_jiffies = jiffies;
@@ -43,17 +43,17 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
false);
if (r)
goto exit_do_move;
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r)
goto exit_do_move;
- fence_put(fence);
+ dma_fence_put(fence);
}
end_jiffies = jiffies;
r = jiffies_to_msecs(end_jiffies - start_jiffies);
exit_do_move:
if (fence)
- fence_put(fence);
+ dma_fence_put(fence);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 7a8bfa34682f..017556ca22e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -146,7 +146,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
switch(type) {
case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__VISIBLE_FB:
- flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (max_offset > adev->mc.real_vram_size)
return -EINVAL;
@@ -157,7 +158,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
break;
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
- flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+ flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
place.fpfn =
@@ -240,7 +242,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
r = amdgpu_bo_reserve(obj, false);
if (unlikely(r != 0))
return r;
- r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
+ r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
min_offset, max_offset, mcaddr);
amdgpu_bo_unreserve(obj);
return r;
@@ -624,11 +626,11 @@ static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
int i, r = -1;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_clockgating_state(
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
(void *)adev,
state);
break;
@@ -645,11 +647,11 @@ static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
int i, r = -1;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_powergating_state(
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_powergating_state(
(void *)adev,
state);
break;
@@ -685,15 +687,21 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
result = AMDGPU_UCODE_ID_CP_MEC1;
break;
case CGS_UCODE_ID_CP_MEC_JT2:
- if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11
- || adev->asic_type == CHIP_POLARIS10)
- result = AMDGPU_UCODE_ID_CP_MEC2;
- else
+ /* for VI. JT2 should be the same as JT1, because:
+ 1, MEC2 and MEC1 use exactly same FW.
+ 2, JT2 is not pached but JT1 is.
+ */
+ if (adev->asic_type >= CHIP_TOPAZ)
result = AMDGPU_UCODE_ID_CP_MEC1;
+ else
+ result = AMDGPU_UCODE_ID_CP_MEC2;
break;
case CGS_UCODE_ID_RLC_G:
result = AMDGPU_UCODE_ID_RLC_G;
break;
+ case CGS_UCODE_ID_STORAGE:
+ result = AMDGPU_UCODE_ID_STORAGE;
+ break;
default:
DRM_ERROR("Firmware type not supported\n");
}
@@ -776,12 +784,18 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
(type == CGS_UCODE_ID_CP_MEC_JT2)) {
- gpu_addr += le32_to_cpu(header->jt_offset) << 2;
+ gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
data_size = le32_to_cpu(header->jt_size) << 2;
}
- info->mc_addr = gpu_addr;
+
+ info->kptr = ucode->kaddr;
info->image_size = data_size;
+ info->mc_addr = gpu_addr;
info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+
+ if (CGS_UCODE_ID_CP_MEC == type)
+ info->image_size = (header->jt_offset) << 2;
+
info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
} else {
@@ -851,6 +865,12 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
return 0;
}
+static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
+{
+ CGS_FUNC_ADEV;
+ return amdgpu_sriov_vf(adev);
+}
+
static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
struct cgs_system_info *sys_info)
{
@@ -1204,6 +1224,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_notify_dpm_enabled,
amdgpu_cgs_call_acpi_method,
amdgpu_cgs_query_system_info,
+ amdgpu_cgs_is_virtualization_enabled
};
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index e3281d4e3e41..3af8ffb45b64 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1517,88 +1517,6 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.force = amdgpu_connector_dvi_force,
};
-static struct drm_encoder *
-amdgpu_connector_virtual_encoder(struct drm_connector *connector)
-{
- int enc_id = connector->encoder_ids[0];
- struct drm_encoder *encoder;
- int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
- if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
- return encoder;
- }
-
- /* pick the first one */
- if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
- return NULL;
-}
-
-static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector)
-{
- struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
-
- if (encoder) {
- amdgpu_connector_add_common_modes(encoder, connector);
- }
-
- return 0;
-}
-
-static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
-static int
-amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
-{
- return 0;
-}
-
-static enum drm_connector_status
-
-amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
-static int
-amdgpu_connector_virtual_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
-{
- return 0;
-}
-
-static void amdgpu_connector_virtual_force(struct drm_connector *connector)
-{
- return;
-}
-
-static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = {
- .get_modes = amdgpu_connector_virtual_get_modes,
- .mode_valid = amdgpu_connector_virtual_mode_valid,
- .best_encoder = amdgpu_connector_virtual_encoder,
-};
-
-static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = {
- .dpms = amdgpu_connector_virtual_dpms,
- .detect = amdgpu_connector_virtual_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = amdgpu_connector_virtual_set_property,
- .destroy = amdgpu_connector_destroy,
- .force = amdgpu_connector_virtual_force,
-};
-
void
amdgpu_connector_add(struct amdgpu_device *adev,
uint32_t connector_id,
@@ -1983,17 +1901,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
- case DRM_MODE_CONNECTOR_VIRTUAL:
- amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
- if (!amdgpu_dig_connector)
- goto failed;
- amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs);
- subpixel_order = SubPixelHorizontalRGB;
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
- break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 82dc8d20e28a..a370101d923f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -355,6 +355,7 @@ static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
u64 initial_bytes_moved;
uint32_t domain;
int r;
@@ -372,9 +373,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
- initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+ initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+ p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
@@ -387,9 +388,9 @@ retry:
/* Last resort, try to evict something from the current working set */
static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
- struct amdgpu_bo_list_entry *lobj)
+ struct amdgpu_bo *validated)
{
- uint32_t domain = lobj->robj->allowed_domains;
+ uint32_t domain = validated->allowed_domains;
int r;
if (!p->evictable)
@@ -400,11 +401,12 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
struct amdgpu_bo_list_entry *candidate = p->evictable;
struct amdgpu_bo *bo = candidate->robj;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
u64 initial_bytes_moved;
uint32_t other;
/* If we reached our current BO we can forget it */
- if (candidate == lobj)
+ if (candidate->robj == validated)
break;
other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
@@ -420,9 +422,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
/* Good we can try to move this BO somewhere else */
amdgpu_ttm_placement_from_domain(bo, other);
- initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+ initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+ p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
if (unlikely(r))
@@ -437,6 +439,23 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
return false;
}
+static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
+{
+ struct amdgpu_cs_parser *p = param;
+ int r;
+
+ do {
+ r = amdgpu_cs_bo_validate(p, bo);
+ } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
+ if (r)
+ return r;
+
+ if (bo->shadow)
+ r = amdgpu_cs_bo_validate(p, bo);
+
+ return r;
+}
+
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
struct list_head *validated)
{
@@ -464,18 +483,10 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
if (p->evictable == lobj)
p->evictable = NULL;
- do {
- r = amdgpu_cs_bo_validate(p, bo);
- } while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj));
+ r = amdgpu_cs_validate(p, bo);
if (r)
return r;
- if (bo->shadow) {
- r = amdgpu_cs_bo_validate(p, bo);
- if (r)
- return r;
- }
-
if (binding_userptr) {
drm_free_large(lobj->user_pages);
lobj->user_pages = NULL;
@@ -594,14 +605,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
list_splice(&need_pages, &p->validated);
}
- amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates);
-
p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
p->bytes_moved = 0;
p->evictable = list_last_entry(&p->validated,
struct amdgpu_bo_list_entry,
tv.head);
+ r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
+ amdgpu_cs_validate, p);
+ if (r) {
+ DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
+ goto error_validate;
+ }
+
r = amdgpu_cs_list_validate(p, &duplicates);
if (r) {
DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
@@ -720,7 +736,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
}
- fence_put(parser->fence);
+ dma_fence_put(parser->fence);
if (parser->ctx)
amdgpu_ctx_put(parser->ctx);
@@ -757,7 +773,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (p->bo_list) {
for (i = 0; i < p->bo_list->num_entries; i++) {
- struct fence *f;
+ struct dma_fence *f;
/* ignore duplicates */
bo = p->bo_list->array[i].robj;
@@ -807,13 +823,14 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
/* Only for UVD/VCE VM emulation */
if (ring->funcs->parse_cs) {
- p->job->vm = NULL;
for (i = 0; i < p->job->num_ibs; i++) {
r = amdgpu_ring_parse_cs(ring, p, i);
if (r)
return r;
}
- } else {
+ }
+
+ if (p->job->vm) {
p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
r = amdgpu_bo_vm_update_pte(p, vm);
@@ -902,7 +919,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
kptr += chunk_ib->va_start - offset;
- r = amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib);
+ r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
@@ -917,9 +934,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
return r;
}
- ib->gpu_addr = chunk_ib->va_start;
}
+ ib->gpu_addr = chunk_ib->va_start;
ib->length_dw = chunk_ib->ib_bytes / 4;
ib->flags = chunk_ib->flags;
j++;
@@ -927,8 +944,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
/* UVD & VCE fw doesn't support user fences */
if (parser->job->uf_addr && (
- parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
- parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
+ parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
+ parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL;
return 0;
@@ -957,7 +974,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
for (j = 0; j < num_deps; ++j) {
struct amdgpu_ring *ring;
struct amdgpu_ctx *ctx;
- struct fence *fence;
+ struct dma_fence *fence;
r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
deps[j].ip_instance,
@@ -979,7 +996,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
} else if (fence) {
r = amdgpu_sync_fence(adev, &p->job->sync,
fence);
- fence_put(fence);
+ dma_fence_put(fence);
amdgpu_ctx_put(ctx);
if (r)
return r;
@@ -1009,7 +1026,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->owner = p->filp;
job->fence_ctx = entity->fence_context;
- p->fence = fence_get(&job->base.s_fence->finished);
+ p->fence = dma_fence_get(&job->base.s_fence->finished);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job);
@@ -1092,7 +1109,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
struct amdgpu_ring *ring = NULL;
struct amdgpu_ctx *ctx;
- struct fence *fence;
+ struct dma_fence *fence;
long r;
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
@@ -1108,8 +1125,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(fence))
r = PTR_ERR(fence);
else if (fence) {
- r = fence_wait_timeout(fence, true, timeout);
- fence_put(fence);
+ r = dma_fence_wait_timeout(fence, true, timeout);
+ dma_fence_put(fence);
} else
r = 1;
@@ -1196,6 +1213,15 @@ int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (unlikely(r))
return r;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+ continue;
+
+ bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (unlikely(r))
+ return r;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a5e2fcbef0f0..400c66ba4c6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -35,7 +35,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
- sizeof(struct fence*), GFP_KERNEL);
+ sizeof(struct dma_fence*), GFP_KERNEL);
if (!ctx->fences)
return -ENOMEM;
@@ -55,18 +55,18 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
rq, amdgpu_sched_jobs);
if (r)
- break;
+ goto failed;
}
- if (i < adev->num_rings) {
- for (j = 0; j < i; j++)
- amd_sched_entity_fini(&adev->rings[j]->sched,
- &ctx->rings[j].entity);
- kfree(ctx->fences);
- ctx->fences = NULL;
- return r;
- }
return 0;
+
+failed:
+ for (j = 0; j < i; j++)
+ amd_sched_entity_fini(&adev->rings[j]->sched,
+ &ctx->rings[j].entity);
+ kfree(ctx->fences);
+ ctx->fences = NULL;
+ return r;
}
static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
@@ -79,7 +79,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
for (j = 0; j < amdgpu_sched_jobs; ++j)
- fence_put(ctx->rings[i].fences[j]);
+ dma_fence_put(ctx->rings[i].fences[j]);
kfree(ctx->fences);
ctx->fences = NULL;
@@ -241,39 +241,39 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
}
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct fence *fence)
+ struct dma_fence *fence)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
uint64_t seq = cring->sequence;
unsigned idx = 0;
- struct fence *other = NULL;
+ struct dma_fence *other = NULL;
idx = seq & (amdgpu_sched_jobs - 1);
other = cring->fences[idx];
if (other) {
signed long r;
- r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+ r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
if (r < 0)
DRM_ERROR("Error (%ld) waiting for fence!\n", r);
}
- fence_get(fence);
+ dma_fence_get(fence);
spin_lock(&ctx->ring_lock);
cring->fences[idx] = fence;
cring->sequence++;
spin_unlock(&ctx->ring_lock);
- fence_put(other);
+ dma_fence_put(other);
return seq;
}
-struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
- struct amdgpu_ring *ring, uint64_t seq)
+struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ struct amdgpu_ring *ring, uint64_t seq)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
- struct fence *fence;
+ struct dma_fence *fence;
spin_lock(&ctx->ring_lock);
@@ -288,7 +288,7 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
return NULL;
}
- fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
+ fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
spin_unlock(&ctx->ring_lock);
return fence;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7ca07e7b25c1..9e16e975f31a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -264,7 +264,8 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
if (adev->vram_scratch.robj == NULL) {
r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->vram_scratch.robj);
if (r) {
return r;
@@ -442,13 +443,9 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
static void amdgpu_wb_fini(struct amdgpu_device *adev)
{
if (adev->wb.wb_obj) {
- if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) {
- amdgpu_bo_kunmap(adev->wb.wb_obj);
- amdgpu_bo_unpin(adev->wb.wb_obj);
- amdgpu_bo_unreserve(adev->wb.wb_obj);
- }
- amdgpu_bo_unref(&adev->wb.wb_obj);
- adev->wb.wb = NULL;
+ amdgpu_bo_free_kernel(&adev->wb.wb_obj,
+ &adev->wb.gpu_addr,
+ (void **)&adev->wb.wb);
adev->wb.wb_obj = NULL;
}
}
@@ -467,33 +464,14 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
int r;
if (adev->wb.wb_obj == NULL) {
- r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &adev->wb.wb_obj);
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+ &adev->wb.wb_obj, &adev->wb.gpu_addr,
+ (void **)&adev->wb.wb);
if (r) {
dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
return r;
}
- r = amdgpu_bo_reserve(adev->wb.wb_obj, false);
- if (unlikely(r != 0)) {
- amdgpu_wb_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT,
- &adev->wb.gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->wb.wb_obj);
- dev_warn(adev->dev, "(%d) pin WB bo failed\n", r);
- amdgpu_wb_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb);
- amdgpu_bo_unreserve(adev->wb.wb_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) map WB bo failed\n", r);
- amdgpu_wb_fini(adev);
- return r;
- }
adev->wb.num_wb = AMDGPU_MAX_WB;
memset(&adev->wb.used, 0, sizeof(adev->wb.used));
@@ -1051,6 +1029,13 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_vm_block_size);
amdgpu_vm_block_size = 9;
}
+
+ if ((amdgpu_vram_page_split != -1 && amdgpu_vram_page_split < 16) ||
+ !amdgpu_check_pot_argument(amdgpu_vram_page_split)) {
+ dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
+ amdgpu_vram_page_split);
+ amdgpu_vram_page_split = 1024;
+ }
}
/**
@@ -1125,11 +1110,11 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- state);
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ state);
if (r)
return r;
break;
@@ -1145,11 +1130,11 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
- state);
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
+ state);
if (r)
return r;
break;
@@ -1164,10 +1149,10 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type) {
- r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
+ if (adev->ip_blocks[i].version->type == block_type) {
+ r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
if (r)
return r;
break;
@@ -1183,23 +1168,22 @@ bool amdgpu_is_idle(struct amdgpu_device *adev,
int i;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].type == block_type)
- return adev->ip_blocks[i].funcs->is_idle((void *)adev);
+ if (adev->ip_blocks[i].version->type == block_type)
+ return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
}
return true;
}
-const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
- struct amdgpu_device *adev,
- enum amd_ip_block_type type)
+struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type)
{
int i;
for (i = 0; i < adev->num_ip_blocks; i++)
- if (adev->ip_blocks[i].type == type)
+ if (adev->ip_blocks[i].version->type == type)
return &adev->ip_blocks[i];
return NULL;
@@ -1220,38 +1204,75 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
enum amd_ip_block_type type,
u32 major, u32 minor)
{
- const struct amdgpu_ip_block_version *ip_block;
- ip_block = amdgpu_get_ip_block(adev, type);
+ struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
- if (ip_block && ((ip_block->major > major) ||
- ((ip_block->major == major) &&
- (ip_block->minor >= minor))))
+ if (ip_block && ((ip_block->version->major > major) ||
+ ((ip_block->version->major == major) &&
+ (ip_block->version->minor >= minor))))
return 0;
return 1;
}
-static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev)
+/**
+ * amdgpu_ip_block_add
+ *
+ * @adev: amdgpu_device pointer
+ * @ip_block_version: pointer to the IP to add
+ *
+ * Adds the IP block driver information to the collection of IPs
+ * on the asic.
+ */
+int amdgpu_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version)
+{
+ if (!ip_block_version)
+ return -EINVAL;
+
+ adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
+
+ return 0;
+}
+
+static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
{
adev->enable_virtual_display = false;
if (amdgpu_virtual_display) {
struct drm_device *ddev = adev->ddev;
const char *pci_address_name = pci_name(ddev->pdev);
- char *pciaddstr, *pciaddstr_tmp, *pciaddname;
+ char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
pciaddstr_tmp = pciaddstr;
- while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) {
+ while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
+ pciaddname = strsep(&pciaddname_tmp, ",");
if (!strcmp(pci_address_name, pciaddname)) {
+ long num_crtc;
+ int res = -1;
+
adev->enable_virtual_display = true;
+
+ if (pciaddname_tmp)
+ res = kstrtol(pciaddname_tmp, 10,
+ &num_crtc);
+
+ if (!res) {
+ if (num_crtc < 1)
+ num_crtc = 1;
+ if (num_crtc > 6)
+ num_crtc = 6;
+ adev->mode_info.num_crtc = num_crtc;
+ } else {
+ adev->mode_info.num_crtc = 1;
+ }
break;
}
}
- DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n",
- amdgpu_virtual_display, pci_address_name,
- adev->enable_virtual_display);
+ DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
+ amdgpu_virtual_display, pci_address_name,
+ adev->enable_virtual_display, adev->mode_info.num_crtc);
kfree(pciaddstr);
}
@@ -1261,7 +1282,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
{
int i, r;
- amdgpu_whether_enable_virtual_display(adev);
+ amdgpu_device_enable_virtual_display(adev);
switch (adev->asic_type) {
case CHIP_TOPAZ:
@@ -1313,33 +1334,24 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
return -EINVAL;
}
- adev->ip_block_status = kcalloc(adev->num_ip_blocks,
- sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
- if (adev->ip_block_status == NULL)
- return -ENOMEM;
-
- if (adev->ip_blocks == NULL) {
- DRM_ERROR("No IP blocks found!\n");
- return r;
- }
-
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
DRM_ERROR("disabled ip block: %d\n", i);
- adev->ip_block_status[i].valid = false;
+ adev->ip_blocks[i].status.valid = false;
} else {
- if (adev->ip_blocks[i].funcs->early_init) {
- r = adev->ip_blocks[i].funcs->early_init((void *)adev);
+ if (adev->ip_blocks[i].version->funcs->early_init) {
+ r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
if (r == -ENOENT) {
- adev->ip_block_status[i].valid = false;
+ adev->ip_blocks[i].status.valid = false;
} else if (r) {
- DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("early_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
} else {
- adev->ip_block_status[i].valid = true;
+ adev->ip_blocks[i].status.valid = true;
}
} else {
- adev->ip_block_status[i].valid = true;
+ adev->ip_blocks[i].status.valid = true;
}
}
}
@@ -1355,22 +1367,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
if (r) {
- DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("sw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- adev->ip_block_status[i].sw = true;
+ adev->ip_blocks[i].status.sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
r = amdgpu_vram_scratch_init(adev);
if (r) {
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
return r;
}
- r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
if (r) {
DRM_ERROR("hw_init %d failed %d\n", i, r);
return r;
@@ -1380,22 +1393,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu_wb_init failed %d\n", r);
return r;
}
- adev->ip_block_status[i].hw = true;
+ adev->ip_blocks[i].status.hw = true;
}
}
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].sw)
+ if (!adev->ip_blocks[i].status.sw)
continue;
/* gmc hw init is done early */
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
continue;
- r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- adev->ip_block_status[i].hw = true;
+ adev->ip_blocks[i].status.hw = true;
}
return 0;
@@ -1406,25 +1420,26 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
int i = 0, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].funcs->late_init) {
- r = adev->ip_blocks[i].funcs->late_init((void *)adev);
+ if (adev->ip_blocks[i].version->funcs->late_init) {
+ r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
if (r) {
- DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("late_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- adev->ip_block_status[i].late_initialized = true;
+ adev->ip_blocks[i].status.late_initialized = true;
}
/* skip CG for VCE/UVD, it's handled specially */
- if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD &&
- adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) {
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
/* enable clockgating to save power */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_GATE);
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_GATE);
if (r) {
DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].funcs->name, r);
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
}
@@ -1439,68 +1454,71 @@ static int amdgpu_fini(struct amdgpu_device *adev)
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].hw)
+ if (!adev->ip_blocks[i].status.hw)
continue;
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
if (r) {
DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].funcs->name, r);
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].funcs->name, r);
+ adev->ip_blocks[i].version->funcs->name, r);
}
- adev->ip_block_status[i].hw = false;
+ adev->ip_blocks[i].status.hw = false;
break;
}
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].hw)
+ if (!adev->ip_blocks[i].status.hw)
continue;
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_wb_fini(adev);
amdgpu_vram_scratch_fini(adev);
}
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
if (r) {
- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
- r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
- adev->ip_block_status[i].hw = false;
+ adev->ip_blocks[i].status.hw = false;
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].sw)
+ if (!adev->ip_blocks[i].status.sw)
continue;
- r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
- adev->ip_block_status[i].sw = false;
- adev->ip_block_status[i].valid = false;
+ adev->ip_blocks[i].status.sw = false;
+ adev->ip_blocks[i].status.valid = false;
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].late_initialized)
+ if (!adev->ip_blocks[i].status.late_initialized)
continue;
- if (adev->ip_blocks[i].funcs->late_fini)
- adev->ip_blocks[i].funcs->late_fini((void *)adev);
- adev->ip_block_status[i].late_initialized = false;
+ if (adev->ip_blocks[i].version->funcs->late_fini)
+ adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
+ adev->ip_blocks[i].status.late_initialized = false;
}
return 0;
@@ -1518,21 +1536,23 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
/* ungate blocks so that suspend can properly shut them down */
if (i != AMD_IP_BLOCK_TYPE_SMC) {
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
if (r) {
- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
}
/* XXX handle errors */
- r = adev->ip_blocks[i].funcs->suspend(adev);
+ r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
if (r) {
- DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("suspend of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
}
@@ -1544,11 +1564,12 @@ static int amdgpu_resume(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- r = adev->ip_blocks[i].funcs->resume(adev);
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) {
- DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ DRM_ERROR("resume of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
}
@@ -1599,7 +1620,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->vm_manager.vm_pte_funcs = NULL;
adev->vm_manager.vm_pte_num_rings = 0;
adev->gart.gart_funcs = NULL;
- adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+ adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
adev->smc_rreg = &amdgpu_invalid_rreg;
adev->smc_wreg = &amdgpu_invalid_wreg;
@@ -1859,8 +1880,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
- kfree(adev->ip_block_status);
- adev->ip_block_status = NULL;
adev->accel_working = false;
/* free i2c buses */
amdgpu_i2c_fini(adev);
@@ -1956,7 +1975,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
r = amdgpu_suspend(adev);
- /* evict remaining vram memory */
+ /* evict remaining vram memory
+ * This second call to evict vram is to evict the gart page table
+ * using the CPU.
+ */
amdgpu_bo_evict_vram(adev);
amdgpu_atombios_scratch_regs_save(adev);
@@ -2098,13 +2120,13 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
bool asic_hang = false;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].funcs->check_soft_reset)
- adev->ip_block_status[i].hang =
- adev->ip_blocks[i].funcs->check_soft_reset(adev);
- if (adev->ip_block_status[i].hang) {
- DRM_INFO("IP block:%d is hang!\n", i);
+ if (adev->ip_blocks[i].version->funcs->check_soft_reset)
+ adev->ip_blocks[i].status.hang =
+ adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang) {
+ DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
asic_hang = true;
}
}
@@ -2116,11 +2138,11 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_block_status[i].hang &&
- adev->ip_blocks[i].funcs->pre_soft_reset) {
- r = adev->ip_blocks[i].funcs->pre_soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang &&
+ adev->ip_blocks[i].version->funcs->pre_soft_reset) {
+ r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
if (r)
return r;
}
@@ -2134,13 +2156,13 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) ||
- (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) ||
- (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) ||
- (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) {
- if (adev->ip_block_status[i].hang) {
+ if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
+ if (adev->ip_blocks[i].status.hang) {
DRM_INFO("Some block need full reset!\n");
return true;
}
@@ -2154,11 +2176,11 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_block_status[i].hang &&
- adev->ip_blocks[i].funcs->soft_reset) {
- r = adev->ip_blocks[i].funcs->soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang &&
+ adev->ip_blocks[i].version->funcs->soft_reset) {
+ r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
if (r)
return r;
}
@@ -2172,11 +2194,11 @@ static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_block_status[i].valid)
+ if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_block_status[i].hang &&
- adev->ip_blocks[i].funcs->post_soft_reset)
- r = adev->ip_blocks[i].funcs->post_soft_reset(adev);
+ if (adev->ip_blocks[i].status.hang &&
+ adev->ip_blocks[i].version->funcs->post_soft_reset)
+ r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
if (r)
return r;
}
@@ -2195,7 +2217,7 @@ bool amdgpu_need_backup(struct amdgpu_device *adev)
static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
- struct fence **fence)
+ struct dma_fence **fence)
{
uint32_t domain;
int r;
@@ -2311,30 +2333,30 @@ retry:
if (need_full_reset && amdgpu_need_backup(adev)) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_bo *bo, *tmp;
- struct fence *fence = NULL, *next = NULL;
+ struct dma_fence *fence = NULL, *next = NULL;
DRM_INFO("recover vram bo from shadow\n");
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
if (fence) {
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r) {
WARN(r, "recovery from shadow isn't comleted\n");
break;
}
}
- fence_put(fence);
+ dma_fence_put(fence);
fence = next;
}
mutex_unlock(&adev->shadow_list_lock);
if (fence) {
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r)
WARN(r, "recovery from shadow isn't comleted\n");
}
- fence_put(fence);
+ dma_fence_put(fence);
}
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -2530,6 +2552,13 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
se_bank = (*pos >> 24) & 0x3FF;
sh_bank = (*pos >> 34) & 0x3FF;
instance_bank = (*pos >> 44) & 0x3FF;
+
+ if (se_bank == 0x3FF)
+ se_bank = 0xFFFFFFFF;
+ if (sh_bank == 0x3FF)
+ sh_bank = 0xFFFFFFFF;
+ if (instance_bank == 0x3FF)
+ instance_bank = 0xFFFFFFFF;
use_bank = 1;
} else {
use_bank = 0;
@@ -2538,8 +2567,8 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
*pos &= 0x3FFFF;
if (use_bank) {
- if (sh_bank >= adev->gfx.config.max_sh_per_se ||
- se_bank >= adev->gfx.config.max_shader_engines)
+ if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
return -EINVAL;
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se_bank,
@@ -2586,10 +2615,45 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
+ bool pm_pg_lock, use_bank;
+ unsigned instance_bank, sh_bank, se_bank;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ /* are we reading registers for which a PG lock is necessary? */
+ pm_pg_lock = (*pos >> 23) & 1;
+
+ if (*pos & (1ULL << 62)) {
+ se_bank = (*pos >> 24) & 0x3FF;
+ sh_bank = (*pos >> 34) & 0x3FF;
+ instance_bank = (*pos >> 44) & 0x3FF;
+
+ if (se_bank == 0x3FF)
+ se_bank = 0xFFFFFFFF;
+ if (sh_bank == 0x3FF)
+ sh_bank = 0xFFFFFFFF;
+ if (instance_bank == 0x3FF)
+ instance_bank = 0xFFFFFFFF;
+ use_bank = 1;
+ } else {
+ use_bank = 0;
+ }
+
+ *pos &= 0x3FFFF;
+
+ if (use_bank) {
+ if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
+ return -EINVAL;
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se_bank,
+ sh_bank, instance_bank);
+ }
+
+ if (pm_pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
while (size) {
uint32_t value;
@@ -2608,6 +2672,14 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
size -= 4;
}
+ if (use_bank) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (pm_pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
return result;
}
@@ -2870,6 +2942,56 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
return !r ? 4 : r;
}
+static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int r, x;
+ ssize_t result=0;
+ uint32_t offset, se, sh, cu, wave, simd, data[32];
+
+ if (size & 3 || *pos & 3)
+ return -EINVAL;
+
+ /* decode offset */
+ offset = (*pos & 0x7F);
+ se = ((*pos >> 7) & 0xFF);
+ sh = ((*pos >> 15) & 0xFF);
+ cu = ((*pos >> 23) & 0xFF);
+ wave = ((*pos >> 31) & 0xFF);
+ simd = ((*pos >> 37) & 0xFF);
+
+ /* switch to the specific se/sh/cu */
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+
+ x = 0;
+ if (adev->gfx.funcs->read_wave_data)
+ adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
+
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ if (!x)
+ return -EINVAL;
+
+ while (size && (offset < x * 4)) {
+ uint32_t value;
+
+ value = data[offset >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ offset += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
@@ -2907,6 +3029,12 @@ static const struct file_operations amdgpu_debugfs_sensors_fops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_wave_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_wave_read,
+ .llseek = default_llseek
+};
+
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops,
@@ -2914,6 +3042,7 @@ static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_smc_fops,
&amdgpu_debugfs_gca_config_fops,
&amdgpu_debugfs_sensors_fops,
+ &amdgpu_debugfs_wave_fops,
};
static const char *debugfs_regs_names[] = {
@@ -2923,6 +3052,7 @@ static const char *debugfs_regs_names[] = {
"amdgpu_regs_smc",
"amdgpu_gca_config",
"amdgpu_sensors",
+ "amdgpu_wave",
};
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 083e2b429872..741144fcc7bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -35,29 +35,29 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
-static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb)
+static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amdgpu_flip_work *work =
container_of(cb, struct amdgpu_flip_work, cb);
- fence_put(f);
+ dma_fence_put(f);
schedule_work(&work->flip_work.work);
}
static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
- struct fence **f)
+ struct dma_fence **f)
{
- struct fence *fence= *f;
+ struct dma_fence *fence= *f;
if (fence == NULL)
return false;
*f = NULL;
- if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
+ if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
return true;
- fence_put(fence);
+ dma_fence_put(fence);
return false;
}
@@ -68,9 +68,9 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
struct amdgpu_flip_work *work =
container_of(delayed_work, struct amdgpu_flip_work, flip_work);
struct amdgpu_device *adev = work->adev;
- struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
+ struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
- struct drm_crtc *crtc = &amdgpuCrtc->base;
+ struct drm_crtc *crtc = &amdgpu_crtc->base;
unsigned long flags;
unsigned i;
int vpos, hpos;
@@ -85,14 +85,14 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
/* Wait until we're out of the vertical blank period before the one
* targeted by the flip
*/
- if (amdgpuCrtc->enabled &&
+ if (amdgpu_crtc->enabled &&
(amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
&vpos, &hpos, NULL, NULL,
&crtc->hwmode)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(work->target_vblank -
- amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) {
+ amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
return;
}
@@ -104,12 +104,12 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
/* Set the flip status */
- amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
- amdgpuCrtc->crtc_id, amdgpuCrtc, work);
+ amdgpu_crtc->crtc_id, amdgpu_crtc, work);
}
@@ -244,9 +244,9 @@ unreserve:
cleanup:
amdgpu_bo_unref(&work->old_abo);
- fence_put(work->excl);
+ dma_fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
- fence_put(work->shared[i]);
+ dma_fence_put(work->shared[i]);
kfree(work->shared);
kfree(work);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 14f57d9915e3..6ca0333ca4c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -553,9 +553,10 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
}
- for (i = 0; i < states->numEntries; i++) {
- if (i >= AMDGPU_MAX_VCE_LEVELS)
- break;
+ adev->pm.dpm.num_of_vce_states =
+ states->numEntries > AMD_MAX_VCE_LEVELS ?
+ AMD_MAX_VCE_LEVELS : states->numEntries;
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
vce_clk = (VCEClockInfo *)
((u8 *)&array->entries[0] +
(state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
@@ -955,3 +956,12 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes)
return encoded_lanes[lanes];
}
+
+struct amd_vce_state*
+amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
+{
+ if (idx < adev->pm.dpm.num_of_vce_states)
+ return &adev->pm.dpm.vce_states[idx];
+
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 3738a96c2619..bd85e35998e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -23,6 +23,446 @@
#ifndef __AMDGPU_DPM_H__
#define __AMDGPU_DPM_H__
+enum amdgpu_int_thermal_type {
+ THERMAL_TYPE_NONE,
+ THERMAL_TYPE_EXTERNAL,
+ THERMAL_TYPE_EXTERNAL_GPIO,
+ THERMAL_TYPE_RV6XX,
+ THERMAL_TYPE_RV770,
+ THERMAL_TYPE_ADT7473_WITH_INTERNAL,
+ THERMAL_TYPE_EVERGREEN,
+ THERMAL_TYPE_SUMO,
+ THERMAL_TYPE_NI,
+ THERMAL_TYPE_SI,
+ THERMAL_TYPE_EMC2103_WITH_INTERNAL,
+ THERMAL_TYPE_CI,
+ THERMAL_TYPE_KV,
+};
+
+enum amdgpu_dpm_auto_throttle_src {
+ AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
+ AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
+};
+
+enum amdgpu_dpm_event_src {
+ AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
+ AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
+ AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
+ AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
+ AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
+};
+
+struct amdgpu_ps {
+ u32 caps; /* vbios flags */
+ u32 class; /* vbios flags */
+ u32 class2; /* vbios flags */
+ /* UVD clocks */
+ u32 vclk;
+ u32 dclk;
+ /* VCE clocks */
+ u32 evclk;
+ u32 ecclk;
+ bool vce_active;
+ enum amd_vce_level vce_level;
+ /* asic priv */
+ void *ps_priv;
+};
+
+struct amdgpu_dpm_thermal {
+ /* thermal interrupt work */
+ struct work_struct work;
+ /* low temperature threshold */
+ int min_temp;
+ /* high temperature threshold */
+ int max_temp;
+ /* was last interrupt low to high or high to low */
+ bool high_to_low;
+ /* interrupt source */
+ struct amdgpu_irq_src irq;
+};
+
+enum amdgpu_clk_action
+{
+ AMDGPU_SCLK_UP = 1,
+ AMDGPU_SCLK_DOWN
+};
+
+struct amdgpu_blacklist_clocks
+{
+ u32 sclk;
+ u32 mclk;
+ enum amdgpu_clk_action action;
+};
+
+struct amdgpu_clock_and_voltage_limits {
+ u32 sclk;
+ u32 mclk;
+ u16 vddc;
+ u16 vddci;
+};
+
+struct amdgpu_clock_array {
+ u32 count;
+ u32 *values;
+};
+
+struct amdgpu_clock_voltage_dependency_entry {
+ u32 clk;
+ u16 v;
+};
+
+struct amdgpu_clock_voltage_dependency_table {
+ u32 count;
+ struct amdgpu_clock_voltage_dependency_entry *entries;
+};
+
+union amdgpu_cac_leakage_entry {
+ struct {
+ u16 vddc;
+ u32 leakage;
+ };
+ struct {
+ u16 vddc1;
+ u16 vddc2;
+ u16 vddc3;
+ };
+};
+
+struct amdgpu_cac_leakage_table {
+ u32 count;
+ union amdgpu_cac_leakage_entry *entries;
+};
+
+struct amdgpu_phase_shedding_limits_entry {
+ u16 voltage;
+ u32 sclk;
+ u32 mclk;
+};
+
+struct amdgpu_phase_shedding_limits_table {
+ u32 count;
+ struct amdgpu_phase_shedding_limits_entry *entries;
+};
+
+struct amdgpu_uvd_clock_voltage_dependency_entry {
+ u32 vclk;
+ u32 dclk;
+ u16 v;
+};
+
+struct amdgpu_uvd_clock_voltage_dependency_table {
+ u8 count;
+ struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
+};
+
+struct amdgpu_vce_clock_voltage_dependency_entry {
+ u32 ecclk;
+ u32 evclk;
+ u16 v;
+};
+
+struct amdgpu_vce_clock_voltage_dependency_table {
+ u8 count;
+ struct amdgpu_vce_clock_voltage_dependency_entry *entries;
+};
+
+struct amdgpu_ppm_table {
+ u8 ppm_design;
+ u16 cpu_core_number;
+ u32 platform_tdp;
+ u32 small_ac_platform_tdp;
+ u32 platform_tdc;
+ u32 small_ac_platform_tdc;
+ u32 apu_tdp;
+ u32 dgpu_tdp;
+ u32 dgpu_ulv_power;
+ u32 tj_max;
+};
+
+struct amdgpu_cac_tdp_table {
+ u16 tdp;
+ u16 configurable_tdp;
+ u16 tdc;
+ u16 battery_power_limit;
+ u16 small_power_limit;
+ u16 low_cac_leakage;
+ u16 high_cac_leakage;
+ u16 maximum_power_delivery_limit;
+};
+
+struct amdgpu_dpm_dynamic_state {
+ struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
+ struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
+ struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
+ struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
+ struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
+ struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
+ struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
+ struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
+ struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
+ struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
+ struct amdgpu_clock_array valid_sclk_values;
+ struct amdgpu_clock_array valid_mclk_values;
+ struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
+ struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
+ u32 mclk_sclk_ratio;
+ u32 sclk_mclk_delta;
+ u16 vddc_vddci_delta;
+ u16 min_vddc_for_pcie_gen2;
+ struct amdgpu_cac_leakage_table cac_leakage_table;
+ struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
+ struct amdgpu_ppm_table *ppm_table;
+ struct amdgpu_cac_tdp_table *cac_tdp_table;
+};
+
+struct amdgpu_dpm_fan {
+ u16 t_min;
+ u16 t_med;
+ u16 t_high;
+ u16 pwm_min;
+ u16 pwm_med;
+ u16 pwm_high;
+ u8 t_hyst;
+ u32 cycle_delay;
+ u16 t_max;
+ u8 control_mode;
+ u16 default_max_fan_pwm;
+ u16 default_fan_output_sensitivity;
+ u16 fan_output_sensitivity;
+ bool ucode_fan_control;
+};
+
+enum amdgpu_pcie_gen {
+ AMDGPU_PCIE_GEN1 = 0,
+ AMDGPU_PCIE_GEN2 = 1,
+ AMDGPU_PCIE_GEN3 = 2,
+ AMDGPU_PCIE_GEN_INVALID = 0xffff
+};
+
+enum amdgpu_dpm_forced_level {
+ AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
+ AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
+ AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
+ AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
+};
+
+struct amdgpu_dpm_funcs {
+ int (*get_temperature)(struct amdgpu_device *adev);
+ int (*pre_set_power_state)(struct amdgpu_device *adev);
+ int (*set_power_state)(struct amdgpu_device *adev);
+ void (*post_set_power_state)(struct amdgpu_device *adev);
+ void (*display_configuration_changed)(struct amdgpu_device *adev);
+ u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
+ u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
+ void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
+ void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
+ int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
+ bool (*vblank_too_short)(struct amdgpu_device *adev);
+ void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
+ void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
+ void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
+ void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
+ u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
+ int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
+ int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
+ int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
+ int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
+ int (*get_sclk_od)(struct amdgpu_device *adev);
+ int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
+ int (*get_mclk_od)(struct amdgpu_device *adev);
+ int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
+ int (*check_state_equal)(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal);
+
+ struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx);
+};
+
+#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
+#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
+#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
+#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
+#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
+#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
+#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
+
+#define amdgpu_dpm_read_sensor(adev, idx, value) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
+ -EINVAL)
+
+#define amdgpu_dpm_get_temperature(adev) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
+ (adev)->pm.funcs->get_temperature((adev)))
+
+#define amdgpu_dpm_set_fan_control_mode(adev, m) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
+ (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
+
+#define amdgpu_dpm_get_fan_control_mode(adev) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
+ (adev)->pm.funcs->get_fan_control_mode((adev)))
+
+#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
+ (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
+
+#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
+ (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
+
+#define amdgpu_dpm_get_sclk(adev, l) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
+ (adev)->pm.funcs->get_sclk((adev), (l)))
+
+#define amdgpu_dpm_get_mclk(adev, l) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
+ (adev)->pm.funcs->get_mclk((adev), (l)))
+
+
+#define amdgpu_dpm_force_performance_level(adev, l) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
+ (adev)->pm.funcs->force_performance_level((adev), (l)))
+
+#define amdgpu_dpm_powergate_uvd(adev, g) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
+ (adev)->pm.funcs->powergate_uvd((adev), (g)))
+
+#define amdgpu_dpm_powergate_vce(adev, g) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
+ (adev)->pm.funcs->powergate_vce((adev), (g)))
+
+#define amdgpu_dpm_get_current_power_state(adev) \
+ (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_get_performance_level(adev) \
+ (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_get_pp_num_states(adev, data) \
+ (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
+
+#define amdgpu_dpm_get_pp_table(adev, table) \
+ (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
+
+#define amdgpu_dpm_set_pp_table(adev, buf, size) \
+ (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
+
+#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
+ (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
+
+#define amdgpu_dpm_force_clock_level(adev, type, level) \
+ (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
+
+#define amdgpu_dpm_get_sclk_od(adev) \
+ (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_set_sclk_od(adev, value) \
+ (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
+
+#define amdgpu_dpm_get_mclk_od(adev) \
+ ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_set_mclk_od(adev, value) \
+ ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
+
+#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
+ (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
+
+#define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal))
+
+#define amdgpu_dpm_get_vce_clock_state(adev, i) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \
+ (adev)->pm.funcs->get_vce_clock_state((adev), (i)))
+
+struct amdgpu_dpm {
+ struct amdgpu_ps *ps;
+ /* number of valid power states */
+ int num_ps;
+ /* current power state that is active */
+ struct amdgpu_ps *current_ps;
+ /* requested power state */
+ struct amdgpu_ps *requested_ps;
+ /* boot up power state */
+ struct amdgpu_ps *boot_ps;
+ /* default uvd power state */
+ struct amdgpu_ps *uvd_ps;
+ /* vce requirements */
+ u32 num_of_vce_states;
+ struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS];
+ enum amd_vce_level vce_level;
+ enum amd_pm_state_type state;
+ enum amd_pm_state_type user_state;
+ enum amd_pm_state_type last_state;
+ enum amd_pm_state_type last_user_state;
+ u32 platform_caps;
+ u32 voltage_response_time;
+ u32 backbias_response_time;
+ void *priv;
+ u32 new_active_crtcs;
+ int new_active_crtc_count;
+ u32 current_active_crtcs;
+ int current_active_crtc_count;
+ struct amdgpu_dpm_dynamic_state dyn_state;
+ struct amdgpu_dpm_fan fan;
+ u32 tdp_limit;
+ u32 near_tdp_limit;
+ u32 near_tdp_limit_adjusted;
+ u32 sq_ramping_threshold;
+ u32 cac_leakage;
+ u16 tdp_od_limit;
+ u32 tdp_adjustment;
+ u16 load_line_slope;
+ bool power_control;
+ bool ac_power;
+ /* special states active */
+ bool thermal_active;
+ bool uvd_active;
+ bool vce_active;
+ /* thermal handling */
+ struct amdgpu_dpm_thermal thermal;
+ /* forced levels */
+ enum amdgpu_dpm_forced_level forced_level;
+};
+
+struct amdgpu_pm {
+ struct mutex mutex;
+ u32 current_sclk;
+ u32 current_mclk;
+ u32 default_sclk;
+ u32 default_mclk;
+ struct amdgpu_i2c_chan *i2c_bus;
+ /* internal thermal controller on rv6xx+ */
+ enum amdgpu_int_thermal_type int_thermal_type;
+ struct device *int_hwmon_dev;
+ /* fan control parameters */
+ bool no_fan;
+ u8 fan_pulses_per_revolution;
+ u8 fan_min_rpm;
+ u8 fan_max_rpm;
+ /* dpm */
+ bool dpm_enabled;
+ bool sysfs_initialized;
+ struct amdgpu_dpm dpm;
+ const struct firmware *fw; /* SMC firmware */
+ uint32_t fw_version;
+ const struct amdgpu_dpm_funcs *funcs;
+ uint32_t pcie_gen_mask;
+ uint32_t pcie_mlw_mask;
+ struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
+};
+
#define R600_SSTU_DFLT 0
#define R600_SST_DFLT 0x00C8
@@ -82,4 +522,7 @@ u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
u16 default_lanes);
u8 amdgpu_encode_pci_lane_width(u32 lanes);
+struct amd_vce_state*
+amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 71ed27eb3dde..6bb4d9e9afe4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -58,9 +58,10 @@
* - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
* - 3.7.0 - Add support for VCE clock list packet
* - 3.8.0 - Add support raster config init in the kernel
+ * - 3.9.0 - Add support for memory query info about VRAM and GTT.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 8
+#define KMS_DRIVER_MINOR 9
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -85,6 +86,7 @@ int amdgpu_vm_size = 64;
int amdgpu_vm_block_size = -1;
int amdgpu_vm_fault_stop = 0;
int amdgpu_vm_debug = 0;
+int amdgpu_vram_page_split = 1024;
int amdgpu_exp_hw_support = 0;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
@@ -165,6 +167,9 @@ module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
+MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)");
+module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
+
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
@@ -201,7 +206,8 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
-MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)");
+MODULE_PARM_DESC(virtual_display,
+ "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
static const struct pci_device_id pciidlist[] = {
@@ -381,6 +387,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
/* fiji */
{0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
+ {0x1002, 0x730F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
/* carrizo */
{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 9fb8aa4d6bae..38bdc2d300a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -90,12 +90,12 @@ static struct fb_ops amdgpufb_ops = {
};
-int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled)
+int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int cpp, bool tiled)
{
int aligned = width;
int pitch_mask = 0;
- switch (bpp / 8) {
+ switch (cpp) {
case 1:
pitch_mask = 255;
break;
@@ -110,7 +110,7 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile
aligned += pitch_mask;
aligned &= ~pitch_mask;
- return aligned;
+ return aligned * cpp;
}
static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
@@ -139,20 +139,21 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
int ret;
int aligned_size, size;
int height = mode_cmd->height;
- u32 bpp, depth;
+ u32 cpp;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
/* need to align pitch with crtc limits */
- mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, bpp,
- fb_tiled) * ((bpp + 1) / 8);
+ mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
+ fb_tiled);
height = ALIGN(mode_cmd->height, 8);
size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = amdgpu_gem_object_create(adev, aligned_size, 0,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
true, &gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 77b34ec92632..97928d7281f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -48,7 +48,7 @@
*/
struct amdgpu_fence {
- struct fence base;
+ struct dma_fence base;
/* RB, DMA, etc. */
struct amdgpu_ring *ring;
@@ -74,8 +74,8 @@ void amdgpu_fence_slab_fini(void)
/*
* Cast helper
*/
-static const struct fence_ops amdgpu_fence_ops;
-static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
+static const struct dma_fence_ops amdgpu_fence_ops;
+static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
{
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
@@ -131,11 +131,11 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_fence *fence;
- struct fence *old, **ptr;
+ struct dma_fence *old, **ptr;
uint32_t seq;
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
@@ -144,10 +144,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
seq = ++ring->fence_drv.sync_seq;
fence->ring = ring;
- fence_init(&fence->base, &amdgpu_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx,
- seq);
+ dma_fence_init(&fence->base, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx,
+ seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, AMDGPU_FENCE_FLAG_INT);
@@ -156,12 +156,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
* emitting the fence would mess up the hardware ring buffer.
*/
old = rcu_dereference_protected(*ptr, 1);
- if (old && !fence_is_signaled(old)) {
+ if (old && !dma_fence_is_signaled(old)) {
DRM_INFO("rcu slot is busy\n");
- fence_wait(old, false);
+ dma_fence_wait(old, false);
}
- rcu_assign_pointer(*ptr, fence_get(&fence->base));
+ rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
*f = &fence->base;
@@ -212,7 +212,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
seq &= drv->num_fences_mask;
do {
- struct fence *fence, **ptr;
+ struct dma_fence *fence, **ptr;
++last_seq;
last_seq &= drv->num_fences_mask;
@@ -225,13 +225,13 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
if (!fence)
continue;
- r = fence_signal(fence);
+ r = dma_fence_signal(fence);
if (!r)
- FENCE_TRACE(fence, "signaled from irq context\n");
+ DMA_FENCE_TRACE(fence, "signaled from irq context\n");
else
BUG();
- fence_put(fence);
+ dma_fence_put(fence);
} while (last_seq != seq);
}
@@ -261,7 +261,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{
uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
- struct fence *fence, **ptr;
+ struct dma_fence *fence, **ptr;
int r;
if (!seq)
@@ -270,14 +270,14 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
rcu_read_lock();
fence = rcu_dereference(*ptr);
- if (!fence || !fence_get_rcu(fence)) {
+ if (!fence || !dma_fence_get_rcu(fence)) {
rcu_read_unlock();
return 0;
}
rcu_read_unlock();
- r = fence_wait(fence, false);
- fence_put(fence);
+ r = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
return r;
}
@@ -453,7 +453,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
amd_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
- fence_put(ring->fence_drv.fences[j]);
+ dma_fence_put(ring->fence_drv.fences[j]);
kfree(ring->fence_drv.fences);
ring->fence_drv.fences = NULL;
ring->fence_drv.initialized = false;
@@ -542,12 +542,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
* Common fence implementation
*/
-static const char *amdgpu_fence_get_driver_name(struct fence *fence)
+static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
{
return "amdgpu";
}
-static const char *amdgpu_fence_get_timeline_name(struct fence *f)
+static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
return (const char *)fence->ring->name;
@@ -561,7 +561,7 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f)
* to fence_queue that checks if this fence is signaled, and if so it
* signals the fence and removes itself.
*/
-static bool amdgpu_fence_enable_signaling(struct fence *f)
+static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
struct amdgpu_ring *ring = fence->ring;
@@ -569,7 +569,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
if (!timer_pending(&ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(ring);
- FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
+ DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
return true;
}
@@ -583,7 +583,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
*/
static void amdgpu_fence_free(struct rcu_head *rcu)
{
- struct fence *f = container_of(rcu, struct fence, rcu);
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
struct amdgpu_fence *fence = to_amdgpu_fence(f);
kmem_cache_free(amdgpu_fence_slab, fence);
}
@@ -596,16 +596,16 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
-static void amdgpu_fence_release(struct fence *f)
+static void amdgpu_fence_release(struct dma_fence *f)
{
call_rcu(&f->rcu, amdgpu_fence_free);
}
-static const struct fence_ops amdgpu_fence_ops = {
+static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
.enable_signaling = amdgpu_fence_enable_signaling,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = amdgpu_fence_release,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 21a1242fc13b..964d2a946ed5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -126,7 +126,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
if (adev->gart.robj == NULL) {
r = amdgpu_bo_create(adev, adev->gart.table_size,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->gart.robj);
if (r) {
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index a7ea9a3b454e..cd62f6ffde2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -116,10 +116,11 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
* Call from drm_gem_handle_create which appear in both new and open ioctl
* case.
*/
-int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+int amdgpu_gem_object_open(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
{
struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = abo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va;
@@ -142,7 +143,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
@@ -407,10 +408,8 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_amdgpu_bo(gobj);
- if (timeout == 0)
- ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
- else
- ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
+ ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
+ timeout);
/* ret == 0 means not signaled,
* ret > 0 means signaled
@@ -470,6 +469,16 @@ out:
return r;
}
+static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
+{
+ unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+
+ /* if anything is swapped out don't swap it in here,
+ just abort and wait for the next CS */
+
+ return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0;
+}
+
/**
* amdgpu_gem_va_update_vm -update the bo_va in its VM
*
@@ -480,7 +489,8 @@ out:
* vital here, so they are not reported back to userspace.
*/
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va, uint32_t operation)
+ struct amdgpu_bo_va *bo_va,
+ uint32_t operation)
{
struct ttm_validate_buffer tv, *entry;
struct amdgpu_bo_list_entry vm_pd;
@@ -503,7 +513,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (r)
goto error_print;
- amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates);
list_for_each_entry(entry, &list, head) {
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
/* if anything is swapped out don't swap it in here,
@@ -511,13 +520,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (domain == AMDGPU_GEM_DOMAIN_CPU)
goto error_unreserve;
}
- list_for_each_entry(entry, &duplicates, head) {
- domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
- /* if anything is swapped out don't swap it in here,
- just abort and wait for the next CS */
- if (domain == AMDGPU_GEM_DOMAIN_CPU)
- goto error_unreserve;
- }
+ r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
+ NULL);
+ if (r)
+ goto error_unreserve;
r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
if (r)
@@ -538,8 +544,6 @@ error_print:
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
-
-
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -549,7 +553,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
- struct ttm_validate_buffer tv, tv_pd;
+ struct amdgpu_bo_list_entry vm_pd;
+ struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
uint32_t invalid_flags, va_flags = 0;
@@ -594,9 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
tv.shared = true;
list_add(&tv.head, &list);
- tv_pd.bo = &fpriv->vm.page_directory->tbo;
- tv_pd.shared = true;
- list_add(&tv_pd.head, &list);
+ amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) {
@@ -704,7 +707,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
uint32_t handle;
int r;
- args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
+ args->pitch = amdgpu_align_pitch(adev, args->width,
+ DIV_ROUND_UP(args->bpp, 8), 0);
args->size = (u64)args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index a074edd95c70..01a42b6a69a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -24,6 +24,7 @@
*/
#include <drm/drmP.h>
#include "amdgpu.h"
+#include "amdgpu_gfx.h"
/*
* GPU scratch registers helpers function.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 51321e154c09..e02044086445 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -27,6 +27,7 @@
int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
-unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh);
+void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
+ unsigned max_sh);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index f86c84427778..3c634f02a3d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -168,6 +168,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
return -ENOMEM;
node->start = AMDGPU_BO_INVALID_OFFSET;
+ node->size = mem->num_pages;
mem->mm_node = node;
if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 6a6c86c9c169..216a9572d946 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -89,7 +89,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* Free an IB (all asics).
*/
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct fence *f)
+ struct dma_fence *f)
{
amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
}
@@ -116,8 +116,8 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
* to SI there was just a DE IB.
*/
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
- struct amdgpu_ib *ibs, struct fence *last_vm_update,
- struct amdgpu_job *job, struct fence **f)
+ struct amdgpu_ib *ibs, struct dma_fence *last_vm_update,
+ struct amdgpu_job *job, struct dma_fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
@@ -152,8 +152,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL;
}
- alloc_size = amdgpu_ring_get_dma_frame_size(ring) +
- num_ibs * amdgpu_ring_get_emit_ib_size(ring);
+ alloc_size = ring->funcs->emit_frame_size + num_ibs *
+ ring->funcs->emit_ib_size;
r = amdgpu_ring_alloc(ring, alloc_size);
if (r) {
@@ -161,7 +161,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return r;
}
- if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec)
+ if (ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring);
if (vm) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 8c5807994073..a0de6286c453 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -81,7 +81,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
void amdgpu_job_free_resources(struct amdgpu_job *job)
{
- struct fence *f;
+ struct dma_fence *f;
unsigned i;
/* use sched fence if available */
@@ -95,7 +95,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
- fence_put(job->fence);
+ dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
kfree(job);
}
@@ -104,14 +104,14 @@ void amdgpu_job_free(struct amdgpu_job *job)
{
amdgpu_job_free_resources(job);
- fence_put(job->fence);
+ dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
kfree(job);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
- struct fence **f)
+ struct dma_fence **f)
{
int r;
job->ring = ring;
@@ -125,19 +125,19 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
job->owner = owner;
job->fence_ctx = entity->fence_context;
- *f = fence_get(&job->base.s_fence->finished);
+ *f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
amd_sched_entity_push_job(&job->base);
return 0;
}
-static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
{
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
- struct fence *fence = amdgpu_sync_get_fence(&job->sync);
+ struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync);
if (fence == NULL && vm && !job->vm_id) {
struct amdgpu_ring *ring = job->ring;
@@ -155,9 +155,9 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
return fence;
}
-static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
{
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
struct amdgpu_job *job;
int r;
@@ -176,8 +176,8 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
/* if gpu reset, hw fence will be replaced here */
- fence_put(job->fence);
- job->fence = fence_get(fence);
+ dma_fence_put(job->fence);
+ job->fence = dma_fence_get(fence);
amdgpu_job_free_resources(job);
return fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 203d98b00555..55c413a55a40 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -306,10 +306,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (adev->ip_blocks[i].type == type &&
- adev->ip_block_status[i].valid) {
- ip.hw_ip_version_major = adev->ip_blocks[i].major;
- ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
+ if (adev->ip_blocks[i].version->type == type &&
+ adev->ip_blocks[i].status.valid) {
+ ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
+ ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
ip.capabilities_flags = 0;
ip.available_rings = ring_mask;
ip.ib_start_alignment = ib_start_alignment;
@@ -345,8 +345,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
for (i = 0; i < adev->num_ip_blocks; i++)
- if (adev->ip_blocks[i].type == type &&
- adev->ip_block_status[i].valid &&
+ if (adev->ip_blocks[i].version->type == type &&
+ adev->ip_blocks[i].status.valid &&
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
count++;
@@ -411,6 +411,36 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return copy_to_user(out, &vram_gtt,
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_MEMORY: {
+ struct drm_amdgpu_memory_info mem;
+
+ memset(&mem, 0, sizeof(mem));
+ mem.vram.total_heap_size = adev->mc.real_vram_size;
+ mem.vram.usable_heap_size =
+ adev->mc.real_vram_size - adev->vram_pin_size;
+ mem.vram.heap_usage = atomic64_read(&adev->vram_usage);
+ mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
+
+ mem.cpu_accessible_vram.total_heap_size =
+ adev->mc.visible_vram_size;
+ mem.cpu_accessible_vram.usable_heap_size =
+ adev->mc.visible_vram_size -
+ (adev->vram_pin_size - adev->invisible_pin_size);
+ mem.cpu_accessible_vram.heap_usage =
+ atomic64_read(&adev->vram_vis_usage);
+ mem.cpu_accessible_vram.max_allocation =
+ mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
+
+ mem.gtt.total_heap_size = adev->mc.gtt_size;
+ mem.gtt.usable_heap_size =
+ adev->mc.gtt_size - adev->gart_pin_size;
+ mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
+ mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
+
+ return copy_to_user(out, &mem,
+ min((size_t)size, sizeof(mem)))
+ ? -EFAULT : 0;
+ }
case AMDGPU_INFO_READ_MMR_REG: {
unsigned n, alloc_size;
uint32_t *regs;
@@ -473,6 +503,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.ids_flags = 0;
if (adev->flags & AMD_IS_APU)
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
+ if (amdgpu_sriov_vf(adev))
+ dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
@@ -492,6 +524,24 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return copy_to_user(out, &dev_info,
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_VCE_CLOCK_TABLE: {
+ unsigned i;
+ struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
+ struct amd_vce_state *vce_state;
+
+ for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
+ vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
+ if (vce_state) {
+ vce_clk_table.entries[i].sclk = vce_state->sclk;
+ vce_clk_table.entries[i].mclk = vce_state->mclk;
+ vce_clk_table.entries[i].eclk = vce_state->evclk;
+ vce_clk_table.num_valid_entries++;
+ }
+ }
+
+ return copy_to_user(out, &vce_clk_table,
+ min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 32fa7b7913f7..7ea3cacf9f9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -285,7 +285,7 @@ free_rmn:
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
unsigned long end = addr + amdgpu_bo_size(bo) - 1;
- struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_mn *rmn;
struct amdgpu_mn_node *node = NULL;
struct list_head bos;
@@ -340,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
*/
void amdgpu_mn_unregister(struct amdgpu_bo *bo)
{
- struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_mn *rmn;
struct list_head *head;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 7b0eff7d060b..1e23334b07fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -341,8 +341,6 @@ struct amdgpu_mode_info {
int num_dig; /* number of dig blocks */
int disp_priority;
const struct amdgpu_display_funcs *funcs;
- struct hrtimer vblank_timer;
- enum amdgpu_interrupt_state vsync_timer_enabled;
};
#define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -413,6 +411,9 @@ struct amdgpu_crtc {
u32 wm_high;
u32 lb_vblank_lead_lines;
struct drm_display_mode hw_mode;
+ /* for virtual dce */
+ struct hrtimer vblank_timer;
+ enum amdgpu_interrupt_state vsync_timer_enabled;
};
struct amdgpu_encoder_atom_dig {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index f3efb1c5dae9..1479d09bd4dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -88,18 +88,19 @@ static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo;
bo = container_of(tbo, struct amdgpu_bo, tbo);
- amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
+ amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent);
if (!list_empty(&bo->shadow_list)) {
- mutex_lock(&bo->adev->shadow_list_lock);
+ mutex_lock(&adev->shadow_list_lock);
list_del_init(&bo->shadow_list);
- mutex_unlock(&bo->adev->shadow_list_lock);
+ mutex_unlock(&adev->shadow_list_lock);
}
kfree(bo->metadata);
kfree(bo);
@@ -121,12 +122,17 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+ unsigned lpfn = 0;
+
+ /* This forces a reallocation if the flag wasn't set before */
+ if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+ lpfn = adev->mc.real_vram_size >> PAGE_SHIFT;
if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
adev->mc.visible_vram_size < adev->mc.real_vram_size) {
places[c].fpfn = visible_pfn;
- places[c].lpfn = 0;
+ places[c].lpfn = lpfn;
places[c].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_TOPDOWN;
@@ -134,7 +140,7 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
}
places[c].fpfn = 0;
- places[c].lpfn = 0;
+ places[c].lpfn = lpfn;
places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
@@ -205,8 +211,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
- amdgpu_ttm_placement_init(abo->adev, &abo->placement,
- abo->placements, domain, abo->flags);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+
+ amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
+ domain, abo->flags);
}
static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
@@ -245,7 +253,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
int r;
r = amdgpu_bo_create(adev, size, align, true, domain,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, bo_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
@@ -351,7 +360,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
kfree(bo);
return r;
}
- bo->adev = adev;
INIT_LIST_HEAD(&bo->shadow_list);
INIT_LIST_HEAD(&bo->va);
bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
@@ -383,7 +391,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
- struct fence *fence;
+ struct dma_fence *fence;
if (adev->mman.buffer_funcs_ring == NULL ||
!adev->mman.buffer_funcs_ring->ready) {
@@ -403,9 +411,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
amdgpu_bo_fence(bo, fence, false);
amdgpu_bo_unreserve(bo);
- fence_put(bo->tbo.moving);
- bo->tbo.moving = fence_get(fence);
- fence_put(fence);
+ dma_fence_put(bo->tbo.moving);
+ bo->tbo.moving = dma_fence_get(fence);
+ dma_fence_put(fence);
}
*bo_ptr = bo;
@@ -491,7 +499,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence,
+ struct dma_fence **fence,
bool direct)
{
@@ -523,7 +531,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence,
+ struct dma_fence **fence,
bool direct)
{
@@ -616,6 +624,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
u64 min_offset, u64 max_offset,
u64 *gpu_addr)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i;
unsigned fpfn, lpfn;
@@ -643,18 +652,20 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return 0;
}
+
+ bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
/* force to pin into visible video ram */
if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
(!max_offset || max_offset >
- bo->adev->mc.visible_vram_size)) {
+ adev->mc.visible_vram_size)) {
if (WARN_ON_ONCE(min_offset >
- bo->adev->mc.visible_vram_size))
+ adev->mc.visible_vram_size))
return -EINVAL;
fpfn = min_offset >> PAGE_SHIFT;
- lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
+ lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
} else {
fpfn = min_offset >> PAGE_SHIFT;
lpfn = max_offset >> PAGE_SHIFT;
@@ -669,12 +680,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r)) {
- dev_err(bo->adev->dev, "%p pin failed\n", bo);
+ dev_err(adev->dev, "%p pin failed\n", bo);
goto error;
}
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (unlikely(r)) {
- dev_err(bo->adev->dev, "%p bind failed\n", bo);
+ dev_err(adev->dev, "%p bind failed\n", bo);
goto error;
}
@@ -682,11 +693,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (gpu_addr != NULL)
*gpu_addr = amdgpu_bo_gpu_offset(bo);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
- bo->adev->vram_pin_size += amdgpu_bo_size(bo);
+ adev->vram_pin_size += amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+ adev->invisible_pin_size += amdgpu_bo_size(bo);
} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
- bo->adev->gart_pin_size += amdgpu_bo_size(bo);
+ adev->gart_pin_size += amdgpu_bo_size(bo);
}
error:
@@ -700,10 +711,11 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
int amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i;
if (!bo->pin_count) {
- dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
+ dev_warn(adev->dev, "%p unpin not necessary\n", bo);
return 0;
}
bo->pin_count--;
@@ -715,16 +727,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r)) {
- dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
+ dev_err(adev->dev, "%p validate failed for unpin\n", bo);
goto error;
}
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
- bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
+ adev->vram_pin_size -= amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+ adev->invisible_pin_size -= amdgpu_bo_size(bo);
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
- bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
+ adev->gart_pin_size -= amdgpu_bo_size(bo);
}
error:
@@ -854,6 +866,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
struct ttm_mem_reg *old_mem = &bo->mem;
@@ -861,21 +874,21 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
abo = container_of(bo, struct amdgpu_bo, tbo);
- amdgpu_vm_bo_invalidate(abo->adev, abo);
+ amdgpu_vm_bo_invalidate(adev, abo);
/* update statistics */
if (!new_mem)
return;
/* move_notify is called before move happens */
- amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem);
+ amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
- struct amdgpu_device *adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
unsigned long offset, size, lpfn;
int i, r;
@@ -884,13 +897,14 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return 0;
abo = container_of(bo, struct amdgpu_bo, tbo);
- adev = abo->adev;
if (bo->mem.mem_type != TTM_PL_VRAM)
return 0;
size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.start << PAGE_SHIFT;
- if ((offset + size) <= adev->mc.visible_vram_size)
+ /* TODO: figure out how to map scattered VRAM to the CPU */
+ if ((offset + size) <= adev->mc.visible_vram_size &&
+ (abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -898,6 +912,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return -EINVAL;
/* hurrah the memory is not visible ! */
+ abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
for (i = 0; i < abo->placement.num_placement; i++) {
@@ -931,7 +946,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
* @shared: true if fence should be added shared
*
*/
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared)
{
struct reservation_object *resv = bo->tbo.resv;
@@ -959,6 +974,8 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
!bo->pin_count);
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
+ WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
return bo->tbo.offset;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 8255034d73eb..5cbf59ec0f68 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -71,12 +71,13 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
*/
static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r;
r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
- dev_err(bo->adev->dev, "%p reserve failed\n", bo);
+ dev_err(adev->dev, "%p reserve failed\n", bo);
return r;
}
return 0;
@@ -156,19 +157,19 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence, bool direct);
+ struct dma_fence **fence, bool direct);
int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
- struct fence **fence,
+ struct dma_fence **fence,
bool direct);
@@ -200,7 +201,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
unsigned size, unsigned align);
void amdgpu_sa_bo_free(struct amdgpu_device *adev,
struct amdgpu_sa_bo **sa_bo,
- struct fence *fence);
+ struct dma_fence *fence);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index accc908bdc88..274f3309aec9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -986,10 +986,10 @@ restart_search:
static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
{
- int i;
struct amdgpu_ps *ps;
enum amd_pm_state_type dpm_state;
int ret;
+ bool equal;
/* if dpm init failed */
if (!adev->pm.dpm_enabled)
@@ -1009,46 +1009,6 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
else
return;
- /* no need to reprogram if nothing changed unless we are on BTC+ */
- if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
- /* vce just modifies an existing state so force a change */
- if (ps->vce_active != adev->pm.dpm.vce_active)
- goto force;
- if (adev->flags & AMD_IS_APU) {
- /* for APUs if the num crtcs changed but state is the same,
- * all we need to do is update the display configuration.
- */
- if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
- /* update display watermarks based on new power state */
- amdgpu_display_bandwidth_update(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
- }
- return;
- } else {
- /* for BTC+ if the num crtcs hasn't changed and state is the same,
- * nothing to do, if the num crtcs is > 1 and state is the same,
- * update display configuration.
- */
- if (adev->pm.dpm.new_active_crtcs ==
- adev->pm.dpm.current_active_crtcs) {
- return;
- } else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
- (adev->pm.dpm.new_active_crtc_count > 1)) {
- /* update display watermarks based on new power state */
- amdgpu_display_bandwidth_update(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
- return;
- }
- }
- }
-
-force:
if (amdgpu_dpm == 1) {
printk("switching from power state:\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
@@ -1059,31 +1019,21 @@ force:
/* update whether vce is active */
ps->vce_active = adev->pm.dpm.vce_active;
+ amdgpu_dpm_display_configuration_changed(adev);
+
ret = amdgpu_dpm_pre_set_power_state(adev);
if (ret)
return;
- /* update display watermarks based on new power state */
- amdgpu_display_bandwidth_update(adev);
+ if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)))
+ equal = false;
- /* wait for the rings to drain */
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->ready)
- amdgpu_fence_wait_empty(ring);
- }
+ if (equal)
+ return;
- /* program the new power state */
amdgpu_dpm_set_power_state(adev);
-
- /* update current power state */
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
-
amdgpu_dpm_post_set_power_state(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
-
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
@@ -1135,7 +1085,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.vce_active = true;
/* XXX select vce level based on ring/task */
- adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
+ adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex);
} else {
mutex_lock(&adev->pm.mutex);
@@ -1276,20 +1226,20 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
struct drm_device *ddev = adev->ddev;
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
+ int i = 0;
if (!adev->pm.dpm_enabled)
return;
- if (adev->pp_enabled) {
- int i = 0;
+ amdgpu_display_bandwidth_update(adev);
- amdgpu_display_bandwidth_update(adev);
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->ready)
- amdgpu_fence_wait_empty(ring);
- }
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (ring && ring->ready)
+ amdgpu_fence_wait_empty(ring);
+ }
+ if (adev->pp_enabled) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
} else {
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 7532ff822aa7..fa6baf31a35d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -299,7 +299,7 @@ static int amdgpu_pp_soft_reset(void *handle)
return ret;
}
-const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
+static const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.name = "amdgpu_powerplay",
.early_init = amdgpu_pp_early_init,
.late_init = amdgpu_pp_late_init,
@@ -316,3 +316,12 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.set_clockgating_state = amdgpu_pp_set_clockgating_state,
.set_powergating_state = amdgpu_pp_set_powergating_state,
};
+
+const struct amdgpu_ip_block_version amdgpu_pp_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
index da5cf47cfd99..c0c4bfdcdb14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
@@ -23,11 +23,11 @@
*
*/
-#ifndef __AMDGPU_POPWERPLAY_H__
-#define __AMDGPU_POPWERPLAY_H__
+#ifndef __AMDGPU_POWERPLAY_H__
+#define __AMDGPU_POWERPLAY_H__
#include "amd_shared.h"
-extern const struct amd_ip_funcs amdgpu_pp_ip_funcs;
+extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block;
-#endif /* __AMDSOC_DM_H__ */
+#endif /* __AMDGPU_POWERPLAY_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 3cb5e903cd62..4c992826d2d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -65,7 +65,7 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
{
/* Align requested size with padding so unlock_commit can
* pad safely */
- ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+ ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
/* Make sure we aren't trying to allocate more space
* than the maximum for one submission
@@ -94,7 +94,7 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
int i;
for (i = 0; i < count; i++)
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
@@ -106,8 +106,8 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- while (ib->length_dw & ring->align_mask)
- ib->ptr[ib->length_dw++] = ring->nop;
+ while (ib->length_dw & ring->funcs->align_mask)
+ ib->ptr[ib->length_dw++] = ring->funcs->nop;
}
/**
@@ -125,8 +125,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
uint32_t count;
/* We pad to match fetch size */
- count = ring->align_mask + 1 - (ring->wptr & ring->align_mask);
- count %= ring->align_mask + 1;
+ count = ring->funcs->align_mask + 1 -
+ (ring->wptr & ring->funcs->align_mask);
+ count %= ring->funcs->align_mask + 1;
ring->funcs->insert_nop(ring, count);
mb();
@@ -163,9 +164,8 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
* Returns 0 on success, error on failure.
*/
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned max_dw, u32 nop, u32 align_mask,
- struct amdgpu_irq_src *irq_src, unsigned irq_type,
- enum amdgpu_ring_type ring_type)
+ unsigned max_dw, struct amdgpu_irq_src *irq_src,
+ unsigned irq_type)
{
int r;
@@ -216,9 +216,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->ring_size = roundup_pow_of_two(max_dw * 4 *
amdgpu_sched_hw_submission);
- ring->align_mask = align_mask;
- ring->nop = nop;
- ring->type = ring_type;
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
new file mode 100644
index 000000000000..f2ad49c8e85b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_RING_H__
+#define __AMDGPU_RING_H__
+
+#include "gpu_scheduler.h"
+
+/* max number of rings */
+#define AMDGPU_MAX_RINGS 16
+#define AMDGPU_MAX_GFX_RINGS 1
+#define AMDGPU_MAX_COMPUTE_RINGS 8
+#define AMDGPU_MAX_VCE_RINGS 3
+
+/* some special values for the owner field */
+#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
+#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
+
+#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
+#define AMDGPU_FENCE_FLAG_INT (1 << 1)
+
+enum amdgpu_ring_type {
+ AMDGPU_RING_TYPE_GFX,
+ AMDGPU_RING_TYPE_COMPUTE,
+ AMDGPU_RING_TYPE_SDMA,
+ AMDGPU_RING_TYPE_UVD,
+ AMDGPU_RING_TYPE_VCE
+};
+
+struct amdgpu_device;
+struct amdgpu_ring;
+struct amdgpu_ib;
+struct amdgpu_cs_parser;
+
+/*
+ * Fences.
+ */
+struct amdgpu_fence_driver {
+ uint64_t gpu_addr;
+ volatile uint32_t *cpu_addr;
+ /* sync_seq is protected by ring emission lock */
+ uint32_t sync_seq;
+ atomic_t last_seq;
+ bool initialized;
+ struct amdgpu_irq_src *irq_src;
+ unsigned irq_type;
+ struct timer_list fallback_timer;
+ unsigned num_fences_mask;
+ spinlock_t lock;
+ struct dma_fence **fences;
+};
+
+int amdgpu_fence_driver_init(struct amdgpu_device *adev);
+void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
+void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
+
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
+ unsigned num_hw_submission);
+int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
+ struct amdgpu_irq_src *irq_src,
+ unsigned irq_type);
+void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
+void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
+void amdgpu_fence_process(struct amdgpu_ring *ring);
+int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
+unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
+
+/*
+ * Rings.
+ */
+
+/* provided by hw blocks that expose a ring buffer for commands */
+struct amdgpu_ring_funcs {
+ enum amdgpu_ring_type type;
+ uint32_t align_mask;
+ u32 nop;
+
+ /* ring read/write ptr handling */
+ u32 (*get_rptr)(struct amdgpu_ring *ring);
+ u32 (*get_wptr)(struct amdgpu_ring *ring);
+ void (*set_wptr)(struct amdgpu_ring *ring);
+ /* validating and patching of IBs */
+ int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+ /* constants to calculate how many DW are needed for an emit */
+ unsigned emit_frame_size;
+ unsigned emit_ib_size;
+ /* command emit functions */
+ void (*emit_ib)(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch);
+ void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
+ uint64_t seq, unsigned flags);
+ void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
+ void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
+ uint64_t pd_addr);
+ void (*emit_hdp_flush)(struct amdgpu_ring *ring);
+ void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
+ void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
+ uint32_t gds_base, uint32_t gds_size,
+ uint32_t gws_base, uint32_t gws_size,
+ uint32_t oa_base, uint32_t oa_size);
+ /* testing functions */
+ int (*test_ring)(struct amdgpu_ring *ring);
+ int (*test_ib)(struct amdgpu_ring *ring, long timeout);
+ /* insert NOP packets */
+ void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
+ /* pad the indirect buffer to the necessary number of dw */
+ void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+ unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
+ void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
+ /* note usage for clock and power gating */
+ void (*begin_use)(struct amdgpu_ring *ring);
+ void (*end_use)(struct amdgpu_ring *ring);
+ void (*emit_switch_buffer) (struct amdgpu_ring *ring);
+ void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
+};
+
+struct amdgpu_ring {
+ struct amdgpu_device *adev;
+ const struct amdgpu_ring_funcs *funcs;
+ struct amdgpu_fence_driver fence_drv;
+ struct amd_gpu_scheduler sched;
+
+ struct amdgpu_bo *ring_obj;
+ volatile uint32_t *ring;
+ unsigned rptr_offs;
+ unsigned wptr;
+ unsigned wptr_old;
+ unsigned ring_size;
+ unsigned max_dw;
+ int count_dw;
+ uint64_t gpu_addr;
+ uint32_t ptr_mask;
+ bool ready;
+ u32 idx;
+ u32 me;
+ u32 pipe;
+ u32 queue;
+ struct amdgpu_bo *mqd_obj;
+ u32 doorbell_index;
+ bool use_doorbell;
+ unsigned wptr_offs;
+ unsigned fence_offs;
+ uint64_t current_ctx;
+ char name[16];
+ unsigned cond_exe_offs;
+ u64 cond_exe_gpu_addr;
+ volatile u32 *cond_exe_cpu_addr;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *ent;
+#endif
+};
+
+int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
+void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
+void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+void amdgpu_ring_commit(struct amdgpu_ring *ring);
+void amdgpu_ring_undo(struct amdgpu_ring *ring);
+int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+ unsigned ring_size, struct amdgpu_irq_src *irq_src,
+ unsigned irq_type);
+void amdgpu_ring_fini(struct amdgpu_ring *ring);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index d8af37a845f4..fd26c4b8d793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -147,7 +147,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
}
list_del_init(&sa_bo->olist);
list_del_init(&sa_bo->flist);
- fence_put(sa_bo->fence);
+ dma_fence_put(sa_bo->fence);
kfree(sa_bo);
}
@@ -161,7 +161,7 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
if (sa_bo->fence == NULL ||
- !fence_is_signaled(sa_bo->fence)) {
+ !dma_fence_is_signaled(sa_bo->fence)) {
return;
}
amdgpu_sa_bo_remove_locked(sa_bo);
@@ -244,7 +244,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
}
static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
- struct fence **fences,
+ struct dma_fence **fences,
unsigned *tries)
{
struct amdgpu_sa_bo *best_bo = NULL;
@@ -272,7 +272,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
sa_bo = list_first_entry(&sa_manager->flist[i],
struct amdgpu_sa_bo, flist);
- if (!fence_is_signaled(sa_bo->fence)) {
+ if (!dma_fence_is_signaled(sa_bo->fence)) {
fences[i] = sa_bo->fence;
continue;
}
@@ -314,7 +314,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
struct amdgpu_sa_bo **sa_bo,
unsigned size, unsigned align)
{
- struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
+ struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
unsigned count;
int i, r;
@@ -356,14 +356,14 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
if (fences[i])
- fences[count++] = fence_get(fences[i]);
+ fences[count++] = dma_fence_get(fences[i]);
if (count) {
spin_unlock(&sa_manager->wq.lock);
- t = fence_wait_any_timeout(fences, count, false,
- MAX_SCHEDULE_TIMEOUT);
+ t = dma_fence_wait_any_timeout(fences, count, false,
+ MAX_SCHEDULE_TIMEOUT);
for (i = 0; i < count; ++i)
- fence_put(fences[i]);
+ dma_fence_put(fences[i]);
r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock);
@@ -384,7 +384,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
}
void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
- struct fence *fence)
+ struct dma_fence *fence)
{
struct amdgpu_sa_manager *sa_manager;
@@ -394,10 +394,10 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
sa_manager = (*sa_bo)->manager;
spin_lock(&sa_manager->wq.lock);
- if (fence && !fence_is_signaled(fence)) {
+ if (fence && !dma_fence_is_signaled(fence)) {
uint32_t idx;
- (*sa_bo)->fence = fence_get(fence);
+ (*sa_bo)->fence = dma_fence_get(fence);
idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 5c8d3022fb87..ed814e6d0207 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -34,7 +34,7 @@
struct amdgpu_sync_entry {
struct hlist_node node;
- struct fence *fence;
+ struct dma_fence *fence;
};
static struct kmem_cache *amdgpu_sync_slab;
@@ -60,7 +60,8 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
*
* Test if the fence was issued by us.
*/
-static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
+static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
+ struct dma_fence *f)
{
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
@@ -81,7 +82,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
*
* Extract who originally created the fence.
*/
-static void *amdgpu_sync_get_owner(struct fence *f)
+static void *amdgpu_sync_get_owner(struct dma_fence *f)
{
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
@@ -99,13 +100,14 @@ static void *amdgpu_sync_get_owner(struct fence *f)
*
* Either keep the existing fence or the new one, depending which one is later.
*/
-static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
+static void amdgpu_sync_keep_later(struct dma_fence **keep,
+ struct dma_fence *fence)
{
- if (*keep && fence_is_later(*keep, fence))
+ if (*keep && dma_fence_is_later(*keep, fence))
return;
- fence_put(*keep);
- *keep = fence_get(fence);
+ dma_fence_put(*keep);
+ *keep = dma_fence_get(fence);
}
/**
@@ -117,7 +119,7 @@ static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
* Tries to add the fence to an existing hash entry. Returns true when an entry
* was found, false otherwise.
*/
-static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
+static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
{
struct amdgpu_sync_entry *e;
@@ -139,7 +141,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
*
*/
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct fence *f)
+ struct dma_fence *f)
{
struct amdgpu_sync_entry *e;
@@ -158,7 +160,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return -ENOMEM;
hash_add(sync->fences, &e->node, f->context);
- e->fence = fence_get(f);
+ e->fence = dma_fence_get(f);
return 0;
}
@@ -177,7 +179,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
void *owner)
{
struct reservation_object_list *flist;
- struct fence *f;
+ struct dma_fence *f;
void *fence_owner;
unsigned i;
int r = 0;
@@ -231,15 +233,15 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
* Returns the next fence not signaled yet without removing it from the sync
* object.
*/
-struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
- struct amdgpu_ring *ring)
+struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
- struct fence *f = e->fence;
+ struct dma_fence *f = e->fence;
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
if (ring && s_fence) {
@@ -247,16 +249,16 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
* when they are scheduled.
*/
if (s_fence->sched == &ring->sched) {
- if (fence_is_signaled(&s_fence->scheduled))
+ if (dma_fence_is_signaled(&s_fence->scheduled))
continue;
return &s_fence->scheduled;
}
}
- if (fence_is_signaled(f)) {
+ if (dma_fence_is_signaled(f)) {
hash_del(&e->node);
- fence_put(f);
+ dma_fence_put(f);
kmem_cache_free(amdgpu_sync_slab, e);
continue;
}
@@ -274,11 +276,11 @@ struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
*
* Get and removes the next fence from the sync object not signaled yet.
*/
-struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
- struct fence *f;
+ struct dma_fence *f;
int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
@@ -288,10 +290,10 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
hash_del(&e->node);
kmem_cache_free(amdgpu_sync_slab, e);
- if (!fence_is_signaled(f))
+ if (!dma_fence_is_signaled(f))
return f;
- fence_put(f);
+ dma_fence_put(f);
}
return NULL;
}
@@ -311,11 +313,11 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
hash_for_each_safe(sync->fences, i, tmp, e, node) {
hash_del(&e->node);
- fence_put(e->fence);
+ dma_fence_put(e->fence);
kmem_cache_free(amdgpu_sync_slab, e);
}
- fence_put(sync->last_vm_update);
+ dma_fence_put(sync->last_vm_update);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
new file mode 100644
index 000000000000..605be266e07f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_SYNC_H__
+#define __AMDGPU_SYNC_H__
+
+#include <linux/hashtable.h>
+
+struct dma_fence;
+struct reservation_object;
+struct amdgpu_device;
+struct amdgpu_ring;
+
+/*
+ * Container for fences used to sync command submissions.
+ */
+struct amdgpu_sync {
+ DECLARE_HASHTABLE(fences, 4);
+ struct dma_fence *last_vm_update;
+};
+
+void amdgpu_sync_create(struct amdgpu_sync *sync);
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct dma_fence *f);
+int amdgpu_sync_resv(struct amdgpu_device *adev,
+ struct amdgpu_sync *sync,
+ struct reservation_object *resv,
+ void *owner);
+struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring);
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
+void amdgpu_sync_free(struct amdgpu_sync *sync);
+int amdgpu_sync_init(void);
+void amdgpu_sync_fini(void);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index b827c75e95de..e05a24325eeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -78,7 +78,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
void *gtt_map, *vram_map;
void **gtt_start, **gtt_end;
void **vram_start, **vram_end;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
@@ -118,13 +118,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r) {
DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
goto out_lclean_unpin;
}
- fence_put(fence);
+ dma_fence_put(fence);
r = amdgpu_bo_kmap(vram_obj, &vram_map);
if (r) {
@@ -163,13 +163,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
- r = fence_wait(fence, false);
+ r = dma_fence_wait(fence, false);
if (r) {
DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
goto out_lclean_unpin;
}
- fence_put(fence);
+ dma_fence_put(fence);
r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
@@ -216,7 +216,7 @@ out_lclean:
amdgpu_bo_unref(&gtt_obj[i]);
}
if (fence)
- fence_put(fence);
+ dma_fence_put(fence);
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 067e5e683bb3..bb964a8ff938 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -104,7 +104,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__field(struct amdgpu_device *, adev)
__field(struct amd_sched_job *, sched_job)
__field(struct amdgpu_ib *, ib)
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
__field(char *, ring_name)
__field(u32, num_ibs)
),
@@ -129,7 +129,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__field(struct amdgpu_device *, adev)
__field(struct amd_sched_job *, sched_job)
__field(struct amdgpu_ib *, ib)
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
__field(char *, ring_name)
__field(u32, num_ibs)
),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dcaf691f56b5..1821c05484d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -51,16 +51,6 @@
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
-static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev)
-{
- struct amdgpu_mman *mman;
- struct amdgpu_device *adev;
-
- mman = container_of(bdev, struct amdgpu_mman, bdev);
- adev = container_of(mman, struct amdgpu_device, mman);
- return adev;
-}
-
/*
* Global memory.
@@ -150,7 +140,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
{
struct amdgpu_device *adev;
- adev = amdgpu_get_adev(bdev);
+ adev = amdgpu_ttm_adev(bdev);
switch (type) {
case TTM_PL_SYSTEM:
@@ -168,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
- man->func = &ttm_bo_manager_func;
+ man->func = &amdgpu_vram_mgr_func;
man->gpu_offset = adev->mc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -195,6 +185,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
static struct ttm_place placements = {
.fpfn = 0,
@@ -213,7 +204,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
abo = container_of(bo, struct amdgpu_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- if (abo->adev->mman.buffer_funcs_ring->ready == false) {
+ if (adev->mman.buffer_funcs_ring->ready == false) {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
} else {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
@@ -229,7 +220,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
* allocating address space for the BO.
*/
abo->placements[i].lpfn =
- abo->adev->mc.gtt_size >> PAGE_SHIFT;
+ adev->mc.gtt_size >> PAGE_SHIFT;
}
}
break;
@@ -260,63 +251,115 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
new_mem->mm_node = NULL;
}
-static int amdgpu_move_blit(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem,
- struct ttm_mem_reg *old_mem)
+static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
+ struct drm_mm_node *mm_node,
+ struct ttm_mem_reg *mem,
+ uint64_t *addr)
{
- struct amdgpu_device *adev;
- struct amdgpu_ring *ring;
- uint64_t old_start, new_start;
- struct fence *fence;
int r;
- adev = amdgpu_get_adev(bo->bdev);
- ring = adev->mman.buffer_funcs_ring;
-
- switch (old_mem->mem_type) {
+ switch (mem->mem_type) {
case TTM_PL_TT:
- r = amdgpu_ttm_bind(bo, old_mem);
+ r = amdgpu_ttm_bind(bo, mem);
if (r)
return r;
case TTM_PL_VRAM:
- old_start = (u64)old_mem->start << PAGE_SHIFT;
- old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
+ *addr = mm_node->start << PAGE_SHIFT;
+ *addr += bo->bdev->man[mem->mem_type].gpu_offset;
break;
default:
- DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+ DRM_ERROR("Unknown placement %d\n", mem->mem_type);
return -EINVAL;
}
- switch (new_mem->mem_type) {
- case TTM_PL_TT:
- r = amdgpu_ttm_bind(bo, new_mem);
- if (r)
- return r;
- case TTM_PL_VRAM:
- new_start = (u64)new_mem->start << PAGE_SHIFT;
- new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
- break;
- default:
- DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
- return -EINVAL;
- }
+ return 0;
+}
+
+static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem,
+ struct ttm_mem_reg *old_mem)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+
+ struct drm_mm_node *old_mm, *new_mm;
+ uint64_t old_start, old_size, new_start, new_size;
+ unsigned long num_pages;
+ struct dma_fence *fence = NULL;
+ int r;
+
+ BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+
if (!ring->ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
- BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+ old_mm = old_mem->mm_node;
+ r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start);
+ if (r)
+ return r;
+ old_size = old_mm->size;
+
- r = amdgpu_copy_buffer(ring, old_start, new_start,
- new_mem->num_pages * PAGE_SIZE, /* bytes */
- bo->resv, &fence, false);
+ new_mm = new_mem->mm_node;
+ r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start);
if (r)
return r;
+ new_size = new_mm->size;
+
+ num_pages = new_mem->num_pages;
+ while (num_pages) {
+ unsigned long cur_pages = min(old_size, new_size);
+ struct dma_fence *next;
+
+ r = amdgpu_copy_buffer(ring, old_start, new_start,
+ cur_pages * PAGE_SIZE,
+ bo->resv, &next, false);
+ if (r)
+ goto error;
+
+ dma_fence_put(fence);
+ fence = next;
+
+ num_pages -= cur_pages;
+ if (!num_pages)
+ break;
+
+ old_size -= cur_pages;
+ if (!old_size) {
+ r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem,
+ &old_start);
+ if (r)
+ goto error;
+ old_size = old_mm->size;
+ } else {
+ old_start += cur_pages * PAGE_SIZE;
+ }
+
+ new_size -= cur_pages;
+ if (!new_size) {
+ r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem,
+ &new_start);
+ if (r)
+ goto error;
+
+ new_size = new_mm->size;
+ } else {
+ new_start += cur_pages * PAGE_SIZE;
+ }
+ }
r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
- fence_put(fence);
+ dma_fence_put(fence);
+ return r;
+
+error:
+ if (fence)
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
return r;
}
@@ -332,7 +375,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
struct ttm_placement placement;
int r;
- adev = amdgpu_get_adev(bo->bdev);
+ adev = amdgpu_ttm_adev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@@ -379,7 +422,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
struct ttm_place placements;
int r;
- adev = amdgpu_get_adev(bo->bdev);
+ adev = amdgpu_ttm_adev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@@ -422,7 +465,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
if (WARN_ON_ONCE(abo->pin_count > 0))
return -EINVAL;
- adev = amdgpu_get_adev(bo->bdev);
+ adev = amdgpu_ttm_adev(bo->bdev);
/* remember the eviction */
if (evict)
@@ -475,7 +518,7 @@ memcpy:
static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- struct amdgpu_device *adev = amdgpu_get_adev(bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
mem->bus.addr = NULL;
mem->bus.offset = 0;
@@ -607,7 +650,7 @@ release_pages:
/* prepare the sg table with the user pages */
static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned nents;
int r;
@@ -639,7 +682,7 @@ release_sg:
static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct sg_page_iter sg_iter;
@@ -799,7 +842,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt;
- adev = amdgpu_get_adev(bdev);
+ adev = amdgpu_ttm_adev(bdev);
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL) {
@@ -843,7 +886,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
return 0;
}
- adev = amdgpu_get_adev(ttm->bdev);
+ adev = amdgpu_ttm_adev(ttm->bdev);
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@@ -889,7 +932,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (slave)
return;
- adev = amdgpu_get_adev(ttm->bdev);
+ adev = amdgpu_ttm_adev(ttm->bdev);
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@@ -1012,7 +1055,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
{
- struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
unsigned i, j;
for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
@@ -1029,7 +1072,7 @@ static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo)
{
- struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
unsigned log2_size = min(ilog2(tbo->num_pages),
AMDGPU_TTM_LRU_SIZE - 1);
@@ -1060,12 +1103,37 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
return res;
}
+static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place)
+{
+ if (bo->mem.mem_type == TTM_PL_VRAM &&
+ bo->mem.start == AMDGPU_BO_INVALID_OFFSET) {
+ unsigned long num_pages = bo->mem.num_pages;
+ struct drm_mm_node *node = bo->mem.mm_node;
+
+ /* Check each drm MM node individually */
+ while (num_pages) {
+ if (place->fpfn < (node->start + node->size) &&
+ !(place->lpfn && place->lpfn <= node->start))
+ return true;
+
+ num_pages -= node->size;
+ ++node;
+ }
+
+ return false;
+ }
+
+ return ttm_bo_eviction_valuable(bo, place);
+}
+
static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
.invalidate_caches = &amdgpu_invalidate_caches,
.init_mem_type = &amdgpu_init_mem_type,
+ .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
.move = &amdgpu_bo_move,
.verify_access = &amdgpu_verify_access,
@@ -1119,7 +1187,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->stollen_vga_memory);
if (r) {
return r;
@@ -1247,7 +1316,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
- struct fence **fence, bool direct_submit)
+ struct dma_fence **fence, bool direct_submit)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
@@ -1294,7 +1363,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
if (direct_submit) {
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
NULL, NULL, fence);
- job->fence = fence_get(*fence);
+ job->fence = dma_fence_get(*fence);
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
amdgpu_job_free(job);
@@ -1315,9 +1384,9 @@ error_free:
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct reservation_object *resv,
- struct fence **fence)
+ struct dma_fence **fence)
{
- struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_job *job;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 9812c805326c..98ee384f0fca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -66,6 +66,7 @@ struct amdgpu_mman {
};
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
+extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *tbo,
@@ -77,11 +78,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
- struct fence **fence, bool direct_submit);
+ struct dma_fence **fence, bool direct_submit);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct reservation_object *resv,
- struct fence **fence);
+ struct dma_fence **fence);
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index cb3d252f3c78..0f0b38191fac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -228,6 +228,9 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
ucode->mc_addr = mc_addr;
ucode->kaddr = kptr;
+ if (ucode->ucode_id == AMDGPU_UCODE_ID_STORAGE)
+ return 0;
+
header = (const struct common_firmware_header *)ucode->fw->data;
memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
le32_to_cpu(header->ucode_array_offset_bytes)),
@@ -236,6 +239,31 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
return 0;
}
+static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
+ uint64_t mc_addr, void *kptr)
+{
+ const struct gfx_firmware_header_v1_0 *header = NULL;
+ const struct common_firmware_header *comm_hdr = NULL;
+ uint8_t* src_addr = NULL;
+ uint8_t* dst_addr = NULL;
+
+ if (NULL == ucode->fw)
+ return 0;
+
+ comm_hdr = (const struct common_firmware_header *)ucode->fw->data;
+ header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+ dst_addr = ucode->kaddr +
+ ALIGN(le32_to_cpu(comm_hdr->ucode_size_bytes),
+ PAGE_SIZE);
+ src_addr = (uint8_t *)ucode->fw->data +
+ le32_to_cpu(comm_hdr->ucode_array_offset_bytes) +
+ (le32_to_cpu(header->jt_offset) * 4);
+ memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4);
+
+ return 0;
+}
+
+
int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
{
struct amdgpu_bo **bo = &adev->firmware.fw_buf;
@@ -247,7 +275,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
const struct common_firmware_header *header = NULL;
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
+ amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ 0, NULL, NULL, bo);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
goto failed;
@@ -259,7 +288,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
goto failed_reserve;
}
- err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr);
+ err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ &fw_mc_addr);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
goto failed_pin;
@@ -279,6 +309,13 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
header = (const struct common_firmware_header *)ucode->fw->data;
amdgpu_ucode_init_single_fw(ucode, fw_mc_addr + fw_offset,
fw_buf_ptr + fw_offset);
+ if (i == AMDGPU_UCODE_ID_CP_MEC1) {
+ const struct gfx_firmware_header_v1_0 *cp_hdr;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+ amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset,
+ fw_buf_ptr + fw_offset);
+ fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
+ }
fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index e468be4e28fa..a8a4230729f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -130,6 +130,7 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_CP_MEC1,
AMDGPU_UCODE_ID_CP_MEC2,
AMDGPU_UCODE_ID_RLC_G,
+ AMDGPU_UCODE_ID_STORAGE,
AMDGPU_UCODE_ID_MAXIMUM,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e3281cacc586..fb270c7e7171 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -333,7 +333,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
for (i = 0; i < adev->uvd.max_handles; ++i) {
uint32_t handle = atomic_read(&adev->uvd.handles[i]);
if (handle != 0 && adev->uvd.filp[i] == filp) {
- struct fence *fence;
+ struct dma_fence *fence;
r = amdgpu_uvd_get_destroy_msg(ring, handle,
false, &fence);
@@ -342,8 +342,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
continue;
}
- fence_wait(fence, false);
- fence_put(fence);
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
adev->uvd.filp[i] = NULL;
atomic_set(&adev->uvd.handles[i], 0);
@@ -876,6 +876,9 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
int r;
+ parser->job->vm = NULL;
+ ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+
if (ib->length_dw % 16) {
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
ib->length_dw);
@@ -909,14 +912,14 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
}
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
- bool direct, struct fence **fence)
+ bool direct, struct dma_fence **fence)
{
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head head;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
struct amdgpu_device *adev = ring->adev;
uint64_t addr;
int i, r;
@@ -931,7 +934,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r)
return r;
- if (!bo->adev->uvd.address_64_bit) {
+ if (!ring->adev->uvd.address_64_bit) {
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_uvd_force_into_uvd_segment(bo);
}
@@ -960,7 +963,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = fence_get(f);
+ job->fence = dma_fence_get(f);
if (r)
goto err_free;
@@ -975,9 +978,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ttm_eu_fence_buffer_objects(&ticket, &head, f);
if (fence)
- *fence = fence_get(f);
+ *fence = dma_fence_get(f);
amdgpu_bo_unref(&bo);
- fence_put(f);
+ dma_fence_put(f);
return 0;
@@ -993,7 +996,7 @@ err:
crash the vcpu so just try to emmit a dummy create/destroy msg to
avoid this */
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence)
+ struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo;
@@ -1002,7 +1005,8 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &bo);
if (r)
return r;
@@ -1042,7 +1046,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
}
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence)
+ bool direct, struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo;
@@ -1051,7 +1055,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &bo);
if (r)
return r;
@@ -1128,7 +1133,7 @@ void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
*/
int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct fence *fence;
+ struct dma_fence *fence;
long r;
r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
@@ -1143,7 +1148,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto error;
}
- r = fence_wait_timeout(fence, false, timeout);
+ r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
@@ -1154,7 +1159,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
}
- fence_put(fence);
+ dma_fence_put(fence);
error:
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index c850009602d1..6249ba1bde2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
int amdgpu_uvd_suspend(struct amdgpu_device *adev);
int amdgpu_uvd_resume(struct amdgpu_device *adev);
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence);
+ struct dma_fence **fence);
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence);
+ bool direct, struct dma_fence **fence);
void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
struct drm_file *filp);
int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 7fe8fd884f06..69b66b9e7f57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -157,7 +157,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->vce.vcpu_bo);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
@@ -395,12 +396,12 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
* Open up a stream for HW test
*/
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence)
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint64_t dummy;
int i, r;
@@ -450,14 +451,14 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = fence_get(f);
+ job->fence = dma_fence_get(f);
if (r)
goto err;
amdgpu_job_free(job);
if (fence)
- *fence = fence_get(f);
- fence_put(f);
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
return 0;
err:
@@ -476,12 +477,12 @@ err:
* Close up a stream for HW test or if userspace failed to do so
*/
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence)
+ bool direct, struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -513,7 +514,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = fence_get(f);
+ job->fence = dma_fence_get(f);
if (r)
goto err;
@@ -526,8 +527,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
}
if (fence)
- *fence = fence_get(f);
- fence_put(f);
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
return 0;
err:
@@ -641,6 +642,9 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
uint32_t *size = &tmp;
int i, r, idx = 0;
+ p->job->vm = NULL;
+ ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+
r = amdgpu_cs_sysvm_access_required(p);
if (r)
return r;
@@ -788,6 +792,96 @@ out:
}
/**
+ * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
+ *
+ * @p: parser context
+ *
+ */
+int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
+{
+ struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
+ int session_idx = -1;
+ uint32_t destroyed = 0;
+ uint32_t created = 0;
+ uint32_t allocated = 0;
+ uint32_t tmp, handle = 0;
+ int i, r = 0, idx = 0;
+
+ while (idx < ib->length_dw) {
+ uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
+ uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
+
+ if ((len < 8) || (len & 3)) {
+ DRM_ERROR("invalid VCE command length (%d)!\n", len);
+ r = -EINVAL;
+ goto out;
+ }
+
+ switch (cmd) {
+ case 0x00000001: /* session */
+ handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
+ session_idx = amdgpu_vce_validate_handle(p, handle,
+ &allocated);
+ if (session_idx < 0) {
+ r = session_idx;
+ goto out;
+ }
+ break;
+
+ case 0x01000001: /* create */
+ created |= 1 << session_idx;
+ if (destroyed & (1 << session_idx)) {
+ destroyed &= ~(1 << session_idx);
+ allocated |= 1 << session_idx;
+
+ } else if (!(allocated & (1 << session_idx))) {
+ DRM_ERROR("Handle already in use!\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ break;
+
+ case 0x02000001: /* destroy */
+ destroyed |= 1 << session_idx;
+ break;
+
+ default:
+ break;
+ }
+
+ if (session_idx == -1) {
+ DRM_ERROR("no session command at start of IB\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ idx += len / 4;
+ }
+
+ if (allocated & ~created) {
+ DRM_ERROR("New session without create command!\n");
+ r = -ENOENT;
+ }
+
+out:
+ if (!r) {
+ /* No error, free all destroyed handle slots */
+ tmp = destroyed;
+ amdgpu_ib_free(p->adev, ib, NULL);
+ } else {
+ /* Error during parsing, free all allocated handle slots */
+ tmp = allocated;
+ }
+
+ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
+ if (tmp & (1 << i))
+ atomic_set(&p->adev->vce.handles[i], 0);
+
+ return r;
+}
+
+/**
* amdgpu_vce_ring_emit_ib - execute indirect buffer
*
* @ring: engine to use
@@ -823,18 +917,6 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
amdgpu_ring_write(ring, VCE_CMD_END);
}
-unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 4; /* amdgpu_vce_ring_emit_ib */
-}
-
-unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
-}
-
/**
* amdgpu_vce_ring_test_ring - test if VCE ring is working
*
@@ -883,7 +965,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
*/
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
long r;
/* skip vce ring1/2 ib test for now, since it's not reliable */
@@ -902,7 +984,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto error;
}
- r = fence_wait_timeout(fence, false, timeout);
+ r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
@@ -913,6 +995,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
}
error:
- fence_put(fence);
+ dma_fence_put(fence);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 12729d2852df..d98041f7508d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -29,11 +29,12 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence);
+ struct dma_fence **fence);
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct fence **fence);
+ bool direct, struct dma_fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 968c4260d7a7..337c5b31d18d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -25,7 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <linux/fence-array.h>
+#include <linux/dma-fence-array.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -116,38 +116,43 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
+ * amdgpu_vm_validate_pt_bos - validate the page table BOs
*
* @adev: amdgpu device pointer
* @vm: vm providing the BOs
- * @duplicates: head of duplicates list
+ * @validate: callback to do the validation
+ * @param: parameter for the validation callback
*
- * Add the page directory to the BO duplicates list
- * for command submission.
+ * Validate the page table BOs on command submission if neccessary.
*/
-void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct list_head *duplicates)
+int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int (*validate)(void *p, struct amdgpu_bo *bo),
+ void *param)
{
uint64_t num_evictions;
unsigned i;
+ int r;
/* We only need to validate the page tables
* if they aren't already valid.
*/
num_evictions = atomic64_read(&adev->num_evictions);
if (num_evictions == vm->last_eviction_counter)
- return;
+ return 0;
/* add the vm page table to the list */
for (i = 0; i <= vm->max_pde_used; ++i) {
- struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+ struct amdgpu_bo *bo = vm->page_tables[i].bo;
- if (!entry->robj)
+ if (!bo)
continue;
- list_add(&entry->tv.head, duplicates);
+ r = validate(param, bo);
+ if (r)
+ return r;
}
+ return 0;
}
/**
@@ -166,12 +171,12 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
spin_lock(&glob->lru_lock);
for (i = 0; i <= vm->max_pde_used; ++i) {
- struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+ struct amdgpu_bo *bo = vm->page_tables[i].bo;
- if (!entry->robj)
+ if (!bo)
continue;
- ttm_bo_move_to_lru_tail(&entry->robj->tbo);
+ ttm_bo_move_to_lru_tail(&bo->tbo);
}
spin_unlock(&glob->lru_lock);
}
@@ -194,14 +199,14 @@ static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
* Allocate an id for the vm, adding fences to the sync obj as necessary.
*/
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync, struct fence *fence,
+ struct amdgpu_sync *sync, struct dma_fence *fence,
struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
uint64_t fence_context = adev->fence_context + ring->idx;
- struct fence *updates = sync->last_vm_update;
+ struct dma_fence *updates = sync->last_vm_update;
struct amdgpu_vm_id *id, *idle;
- struct fence **fences;
+ struct dma_fence **fences;
unsigned i;
int r = 0;
@@ -225,17 +230,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (&idle->list == &adev->vm_manager.ids_lru) {
u64 fence_context = adev->vm_manager.fence_context + ring->idx;
unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
- struct fence_array *array;
+ struct dma_fence_array *array;
unsigned j;
for (j = 0; j < i; ++j)
- fence_get(fences[j]);
+ dma_fence_get(fences[j]);
- array = fence_array_create(i, fences, fence_context,
+ array = dma_fence_array_create(i, fences, fence_context,
seqno, true);
if (!array) {
for (j = 0; j < i; ++j)
- fence_put(fences[j]);
+ dma_fence_put(fences[j]);
kfree(fences);
r = -ENOMEM;
goto error;
@@ -243,7 +248,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
r = amdgpu_sync_fence(ring->adev, sync, &array->base);
- fence_put(&array->base);
+ dma_fence_put(&array->base);
if (r)
goto error;
@@ -257,7 +262,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Check if we can use a VMID already assigned to this VM */
i = ring->idx;
do {
- struct fence *flushed;
+ struct dma_fence *flushed;
id = vm->ids[i++];
if (i == AMDGPU_MAX_RINGS)
@@ -279,12 +284,12 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
continue;
if (id->last_flush->context != fence_context &&
- !fence_is_signaled(id->last_flush))
+ !dma_fence_is_signaled(id->last_flush))
continue;
flushed = id->flushed_updates;
if (updates &&
- (!flushed || fence_is_later(updates, flushed)))
+ (!flushed || dma_fence_is_later(updates, flushed)))
continue;
/* Good we can use this VMID. Remember this submission as
@@ -315,14 +320,14 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
- fence_put(id->first);
- id->first = fence_get(fence);
+ dma_fence_put(id->first);
+ id->first = dma_fence_get(fence);
- fence_put(id->last_flush);
+ dma_fence_put(id->last_flush);
id->last_flush = NULL;
- fence_put(id->flushed_updates);
- id->flushed_updates = fence_get(updates);
+ dma_fence_put(id->flushed_updates);
+ id->flushed_updates = dma_fence_get(updates);
id->pd_gpu_addr = job->vm_pd_addr;
id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
@@ -341,9 +346,9 @@ error:
static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- const struct amdgpu_ip_block_version *ip_block;
+ const struct amdgpu_ip_block *ip_block;
- if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
+ if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
/* only compute rings */
return false;
@@ -351,10 +356,10 @@ static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
if (!ip_block)
return false;
- if (ip_block->major <= 7) {
+ if (ip_block->version->major <= 7) {
/* gfx7 has no workaround */
return true;
- } else if (ip_block->major == 8) {
+ } else if (ip_block->version->major == 8) {
if (adev->gfx.mec_fw_version >= 673)
/* gfx8 is fixed in MEC firmware 673 */
return false;
@@ -393,7 +398,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
amdgpu_vm_is_gpu_reset(adev, id))) {
- struct fence *fence;
+ struct dma_fence *fence;
trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
@@ -403,7 +408,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
return r;
mutex_lock(&adev->vm_manager.lock);
- fence_put(id->last_flush);
+ dma_fence_put(id->last_flush);
id->last_flush = fence;
mutex_unlock(&adev->vm_manager.lock);
}
@@ -537,7 +542,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_bo *bo)
{
struct amdgpu_ring *ring;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
unsigned entries;
@@ -578,7 +583,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
goto error_free;
amdgpu_bo_fence(bo, fence, true);
- fence_put(fence);
+ dma_fence_put(fence);
return 0;
error_free:
@@ -612,32 +617,35 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
return result;
}
-static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- bool shadow)
+/*
+ * amdgpu_vm_update_pdes - make sure that page directory is valid
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
{
+ struct amdgpu_bo *shadow;
struct amdgpu_ring *ring;
- struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
- vm->page_directory;
- uint64_t pd_addr;
+ uint64_t pd_addr, shadow_addr;
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
- uint64_t last_pde = ~0, last_pt = ~0;
+ uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
unsigned count = 0, pt_idx, ndw;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
int r;
- if (!pd)
- return 0;
-
- r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem);
- if (r)
- return r;
-
- pd_addr = amdgpu_bo_gpu_offset(pd);
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ shadow = vm->page_directory->shadow;
/* padding, etc. */
ndw = 64;
@@ -645,6 +653,17 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
/* assume the worst case */
ndw += vm->max_pde_used * 6;
+ pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
+ if (shadow) {
+ r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
+ if (r)
+ return r;
+ shadow_addr = amdgpu_bo_gpu_offset(shadow);
+ ndw *= 2;
+ } else {
+ shadow_addr = 0;
+ }
+
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
if (r)
return r;
@@ -655,30 +674,26 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
- struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
+ struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
uint64_t pde, pt;
if (bo == NULL)
continue;
if (bo->shadow) {
- struct amdgpu_bo *shadow = bo->shadow;
+ struct amdgpu_bo *pt_shadow = bo->shadow;
- r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
+ r = amdgpu_ttm_bind(&pt_shadow->tbo,
+ &pt_shadow->tbo.mem);
if (r)
return r;
}
pt = amdgpu_bo_gpu_offset(bo);
- if (!shadow) {
- if (vm->page_tables[pt_idx].addr == pt)
- continue;
- vm->page_tables[pt_idx].addr = pt;
- } else {
- if (vm->page_tables[pt_idx].shadow_addr == pt)
- continue;
- vm->page_tables[pt_idx].shadow_addr = pt;
- }
+ if (vm->page_tables[pt_idx].addr == pt)
+ continue;
+
+ vm->page_tables[pt_idx].addr = pt;
pde = pd_addr + pt_idx * 8;
if (((last_pde + 8 * count) != pde) ||
@@ -686,6 +701,13 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
if (count) {
+ if (shadow)
+ amdgpu_vm_do_set_ptes(&params,
+ last_shadow,
+ last_pt, count,
+ incr,
+ AMDGPU_PTE_VALID);
+
amdgpu_vm_do_set_ptes(&params, last_pde,
last_pt, count, incr,
AMDGPU_PTE_VALID);
@@ -693,34 +715,44 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
count = 1;
last_pde = pde;
+ last_shadow = shadow_addr + pt_idx * 8;
last_pt = pt;
} else {
++count;
}
}
- if (count)
+ if (count) {
+ if (vm->page_directory->shadow)
+ amdgpu_vm_do_set_ptes(&params, last_shadow, last_pt,
+ count, incr, AMDGPU_PTE_VALID);
+
amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
count, incr, AMDGPU_PTE_VALID);
+ }
- if (params.ib->length_dw != 0) {
- amdgpu_ring_pad_ib(ring, params.ib);
- amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
+ if (params.ib->length_dw == 0) {
+ amdgpu_job_free(job);
+ return 0;
+ }
+
+ amdgpu_ring_pad_ib(ring, params.ib);
+ amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
+ AMDGPU_FENCE_OWNER_VM);
+ if (shadow)
+ amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
AMDGPU_FENCE_OWNER_VM);
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &fence);
- if (r)
- goto error_free;
- amdgpu_bo_fence(pd, fence, true);
- fence_put(vm->page_directory_fence);
- vm->page_directory_fence = fence_get(fence);
- fence_put(fence);
+ WARN_ON(params.ib->length_dw > ndw);
+ r = amdgpu_job_submit(job, ring, &vm->entity,
+ AMDGPU_FENCE_OWNER_VM, &fence);
+ if (r)
+ goto error_free;
- } else {
- amdgpu_job_free(job);
- }
+ amdgpu_bo_fence(vm->page_directory, fence, true);
+ dma_fence_put(vm->page_directory_fence);
+ vm->page_directory_fence = dma_fence_get(fence);
+ dma_fence_put(fence);
return 0;
@@ -729,29 +761,6 @@ error_free:
return r;
}
-/*
- * amdgpu_vm_update_pdes - make sure that page directory is valid
- *
- * @adev: amdgpu_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- *
- * Allocates new page tables if necessary
- * and updates the page directory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
-{
- int r;
-
- r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
- if (r)
- return r;
- return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
-}
-
/**
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
@@ -781,11 +790,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
/* initialize the variables */
addr = start;
pt_idx = addr >> amdgpu_vm_block_size;
- pt = vm->page_tables[pt_idx].entry.robj;
+ pt = vm->page_tables[pt_idx].bo;
if (params->shadow) {
if (!pt->shadow)
return;
- pt = vm->page_tables[pt_idx].entry.robj->shadow;
+ pt = pt->shadow;
}
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
@@ -804,11 +813,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
/* walk over the address space and update the page tables */
while (addr < end) {
pt_idx = addr >> amdgpu_vm_block_size;
- pt = vm->page_tables[pt_idx].entry.robj;
+ pt = vm->page_tables[pt_idx].bo;
if (params->shadow) {
if (!pt->shadow)
return;
- pt = vm->page_tables[pt_idx].entry.robj->shadow;
+ pt = pt->shadow;
}
if ((addr & ~mask) == (end & ~mask))
@@ -929,20 +938,20 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
- struct fence *exclusive,
+ struct dma_fence *exclusive,
uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
uint32_t flags, uint64_t addr,
- struct fence **fence)
+ struct dma_fence **fence)
{
struct amdgpu_ring *ring;
void *owner = AMDGPU_FENCE_OWNER_VM;
unsigned nptes, ncmds, ndw;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
int r;
memset(&params, 0, sizeof(params));
@@ -1045,10 +1054,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
amdgpu_bo_fence(vm->page_directory, f, true);
if (fence) {
- fence_put(*fence);
- *fence = fence_get(f);
+ dma_fence_put(*fence);
+ *fence = dma_fence_get(f);
}
- fence_put(f);
+ dma_fence_put(f);
return 0;
error_free:
@@ -1065,8 +1074,8 @@ error_free:
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @mapping: mapped range and flags to use for the update
- * @addr: addr to set the area to
* @flags: HW flags for the mapping
+ * @nodes: array of drm_mm_nodes with the MC addresses
* @fence: optional resulting fence
*
* Split the mapping into smaller chunks so that each update fits
@@ -1074,17 +1083,16 @@ error_free:
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
- struct fence *exclusive,
+ struct dma_fence *exclusive,
uint32_t gtt_flags,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
- uint32_t flags, uint64_t addr,
- struct fence **fence)
+ uint32_t flags,
+ struct drm_mm_node *nodes,
+ struct dma_fence **fence)
{
- const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
-
- uint64_t src = 0, start = mapping->it.start;
+ uint64_t pfn, src = 0, start = mapping->it.start;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -1097,23 +1105,40 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_update(mapping);
- if (pages_addr) {
- if (flags == gtt_flags)
- src = adev->gart.table_addr + (addr >> 12) * 8;
- addr = 0;
+ pfn = mapping->offset >> PAGE_SHIFT;
+ if (nodes) {
+ while (pfn >= nodes->size) {
+ pfn -= nodes->size;
+ ++nodes;
+ }
}
- addr += mapping->offset;
- if (!pages_addr || src)
- return amdgpu_vm_bo_update_mapping(adev, exclusive,
- src, pages_addr, vm,
- start, mapping->it.last,
- flags, addr, fence);
+ do {
+ uint64_t max_entries;
+ uint64_t addr, last;
+
+ if (nodes) {
+ addr = nodes->start << PAGE_SHIFT;
+ max_entries = (nodes->size - pfn) *
+ (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ } else {
+ addr = 0;
+ max_entries = S64_MAX;
+ }
- while (start != mapping->it.last + 1) {
- uint64_t last;
+ if (pages_addr) {
+ if (flags == gtt_flags)
+ src = adev->gart.table_addr +
+ (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
+ else
+ max_entries = min(max_entries, 16ull * 1024ull);
+ addr = 0;
+ } else if (flags & AMDGPU_PTE_VALID) {
+ addr += adev->vm_manager.vram_base_offset;
+ }
+ addr += pfn << PAGE_SHIFT;
- last = min((uint64_t)mapping->it.last, start + max_size - 1);
+ last = min((uint64_t)mapping->it.last, start + max_entries - 1);
r = amdgpu_vm_bo_update_mapping(adev, exclusive,
src, pages_addr, vm,
start, last, flags, addr,
@@ -1121,9 +1146,14 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (r)
return r;
+ pfn += last - start + 1;
+ if (nodes && nodes->size == pfn) {
+ pfn = 0;
+ ++nodes;
+ }
start = last + 1;
- addr += max_size * AMDGPU_GPU_PAGE_SIZE;
- }
+
+ } while (unlikely(start != mapping->it.last + 1));
return 0;
}
@@ -1147,40 +1177,30 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
dma_addr_t *pages_addr = NULL;
uint32_t gtt_flags, flags;
struct ttm_mem_reg *mem;
- struct fence *exclusive;
- uint64_t addr;
+ struct drm_mm_node *nodes;
+ struct dma_fence *exclusive;
int r;
if (clear) {
mem = NULL;
- addr = 0;
+ nodes = NULL;
exclusive = NULL;
} else {
struct ttm_dma_tt *ttm;
mem = &bo_va->bo->tbo.mem;
- addr = (u64)mem->start << PAGE_SHIFT;
- switch (mem->mem_type) {
- case TTM_PL_TT:
+ nodes = mem->mm_node;
+ if (mem->mem_type == TTM_PL_TT) {
ttm = container_of(bo_va->bo->tbo.ttm, struct
ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
- break;
-
- case TTM_PL_VRAM:
- addr += adev->vm_manager.vram_base_offset;
- break;
-
- default:
- break;
}
-
exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
}
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
- adev == bo_va->bo->adev) ? flags : 0;
+ adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? flags : 0;
spin_lock(&vm->status_lock);
if (!list_empty(&bo_va->vm_status))
@@ -1190,7 +1210,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_split_mapping(adev, exclusive,
gtt_flags, pages_addr, vm,
- mapping, flags, addr,
+ mapping, flags, nodes,
&bo_va->last_pt_update);
if (r)
return r;
@@ -1405,18 +1425,17 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* walk over the address space and allocate the page tables */
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
struct reservation_object *resv = vm->page_directory->tbo.resv;
- struct amdgpu_bo_list_entry *entry;
struct amdgpu_bo *pt;
- entry = &vm->page_tables[pt_idx].entry;
- if (entry->robj)
+ if (vm->page_tables[pt_idx].bo)
continue;
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW,
+ AMDGPU_GEM_CREATE_SHADOW |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, resv, &pt);
if (r)
goto error_free;
@@ -1442,11 +1461,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
}
}
- entry->robj = pt;
- entry->priority = 0;
- entry->tv.bo = &entry->robj->tbo;
- entry->tv.shared = true;
- entry->user_pages = NULL;
+ vm->page_tables[pt_idx].bo = pt;
vm->page_tables[pt_idx].addr = 0;
}
@@ -1547,7 +1562,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
kfree(mapping);
}
- fence_put(bo_va->last_pt_update);
+ dma_fence_put(bo_va->last_pt_update);
kfree(bo_va);
}
@@ -1626,7 +1641,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
r = amdgpu_bo_create(adev, pd_size, align, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW,
+ AMDGPU_GEM_CREATE_SHADOW |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &vm->page_directory);
if (r)
goto error_free_sched_entity;
@@ -1697,7 +1713,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
- struct amdgpu_bo *pt = vm->page_tables[i].entry.robj;
+ struct amdgpu_bo *pt = vm->page_tables[i].bo;
if (!pt)
continue;
@@ -1709,7 +1725,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&vm->page_directory->shadow);
amdgpu_bo_unref(&vm->page_directory);
- fence_put(vm->page_directory_fence);
+ dma_fence_put(vm->page_directory_fence);
}
/**
@@ -1733,7 +1749,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
&adev->vm_manager.ids_lru);
}
- adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+ adev->vm_manager.fence_context =
+ dma_fence_context_alloc(AMDGPU_MAX_RINGS);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
adev->vm_manager.seqno[i] = 0;
@@ -1755,9 +1772,9 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_NUM_VM; ++i) {
struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
- fence_put(adev->vm_manager.ids[i].first);
+ dma_fence_put(adev->vm_manager.ids[i].first);
amdgpu_sync_free(&adev->vm_manager.ids[i].active);
- fence_put(id->flushed_updates);
- fence_put(id->last_flush);
+ dma_fence_put(id->flushed_updates);
+ dma_fence_put(id->last_flush);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
new file mode 100644
index 000000000000..adbc2f5e5c7f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_VM_H__
+#define __AMDGPU_VM_H__
+
+#include <linux/rbtree.h>
+
+#include "gpu_scheduler.h"
+#include "amdgpu_sync.h"
+#include "amdgpu_ring.h"
+
+struct amdgpu_bo_va;
+struct amdgpu_job;
+struct amdgpu_bo_list_entry;
+
+/*
+ * GPUVM handling
+ */
+
+/* maximum number of VMIDs */
+#define AMDGPU_NUM_VM 16
+
+/* Maximum number of PTEs the hardware can write with one command */
+#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
+
+/* number of entries in page table */
+#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
+
+/* PTBs (Page Table Blocks) need to be aligned to 32K */
+#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
+
+/* LOG2 number of continuous pages for the fragment field */
+#define AMDGPU_LOG2_PAGES_PER_FRAG 4
+
+#define AMDGPU_PTE_VALID (1 << 0)
+#define AMDGPU_PTE_SYSTEM (1 << 1)
+#define AMDGPU_PTE_SNOOPED (1 << 2)
+
+/* VI only */
+#define AMDGPU_PTE_EXECUTABLE (1 << 4)
+
+#define AMDGPU_PTE_READABLE (1 << 5)
+#define AMDGPU_PTE_WRITEABLE (1 << 6)
+
+#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
+
+/* How to programm VM fault handling */
+#define AMDGPU_VM_FAULT_STOP_NEVER 0
+#define AMDGPU_VM_FAULT_STOP_FIRST 1
+#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
+
+struct amdgpu_vm_pt {
+ struct amdgpu_bo *bo;
+ uint64_t addr;
+};
+
+struct amdgpu_vm {
+ /* tree of virtual addresses mapped */
+ struct rb_root va;
+
+ /* protecting invalidated */
+ spinlock_t status_lock;
+
+ /* BOs moved, but not yet updated in the PT */
+ struct list_head invalidated;
+
+ /* BOs cleared in the PT because of a move */
+ struct list_head cleared;
+
+ /* BO mappings freed, but not yet updated in the PT */
+ struct list_head freed;
+
+ /* contains the page directory */
+ struct amdgpu_bo *page_directory;
+ unsigned max_pde_used;
+ struct dma_fence *page_directory_fence;
+ uint64_t last_eviction_counter;
+
+ /* array of page tables, one for each page directory entry */
+ struct amdgpu_vm_pt *page_tables;
+
+ /* for id and flush management per ring */
+ struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
+
+ /* protecting freed */
+ spinlock_t freed_lock;
+
+ /* Scheduler entity for page table updates */
+ struct amd_sched_entity entity;
+
+ /* client id */
+ u64 client_id;
+};
+
+struct amdgpu_vm_id {
+ struct list_head list;
+ struct dma_fence *first;
+ struct amdgpu_sync active;
+ struct dma_fence *last_flush;
+ atomic64_t owner;
+
+ uint64_t pd_gpu_addr;
+ /* last flushed PD/PT update */
+ struct dma_fence *flushed_updates;
+
+ uint32_t current_gpu_reset_count;
+
+ uint32_t gds_base;
+ uint32_t gds_size;
+ uint32_t gws_base;
+ uint32_t gws_size;
+ uint32_t oa_base;
+ uint32_t oa_size;
+};
+
+struct amdgpu_vm_manager {
+ /* Handling of VMIDs */
+ struct mutex lock;
+ unsigned num_ids;
+ struct list_head ids_lru;
+ struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
+
+ /* Handling of VM fences */
+ u64 fence_context;
+ unsigned seqno[AMDGPU_MAX_RINGS];
+
+ uint32_t max_pfn;
+ /* vram base address for page table entry */
+ u64 vram_base_offset;
+ /* is vm enabled? */
+ bool enabled;
+ /* vm pte handling */
+ const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
+ struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
+ unsigned vm_pte_num_rings;
+ atomic_t vm_pte_next_ring;
+ /* client id counter */
+ atomic64_t client_counter;
+};
+
+void amdgpu_vm_manager_init(struct amdgpu_device *adev);
+void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
+ struct list_head *validated,
+ struct amdgpu_bo_list_entry *entry);
+int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int (*callback)(void *p, struct amdgpu_bo *bo),
+ void *param);
+void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync, struct dma_fence *fence,
+ struct amdgpu_job *job);
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
+void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_sync *sync);
+int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ bool clear);
+void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo);
+int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ uint64_t addr, uint64_t offset,
+ uint64_t size, uint32_t flags);
+int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ uint64_t addr);
+void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
new file mode 100644
index 000000000000..180eed7c8bca
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#include <drm/drmP.h>
+#include "amdgpu.h"
+
+struct amdgpu_vram_mgr {
+ struct drm_mm mm;
+ spinlock_t lock;
+};
+
+/**
+ * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
+ *
+ * @man: TTM memory type manager
+ * @p_size: maximum size of VRAM
+ *
+ * Allocate and initialize the VRAM manager.
+ */
+static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
+ unsigned long p_size)
+{
+ struct amdgpu_vram_mgr *mgr;
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
+
+ drm_mm_init(&mgr->mm, 0, p_size);
+ spin_lock_init(&mgr->lock);
+ man->priv = mgr;
+ return 0;
+}
+
+/**
+ * amdgpu_vram_mgr_fini - free and destroy VRAM manager
+ *
+ * @man: TTM memory type manager
+ *
+ * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
+ * allocated inside it.
+ */
+static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+
+ spin_lock(&mgr->lock);
+ if (!drm_mm_clean(&mgr->mm)) {
+ spin_unlock(&mgr->lock);
+ return -EBUSY;
+ }
+
+ drm_mm_takedown(&mgr->mm);
+ spin_unlock(&mgr->lock);
+ kfree(mgr);
+ man->priv = NULL;
+ return 0;
+}
+
+/**
+ * amdgpu_vram_mgr_new - allocate new ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: the resulting mem object
+ *
+ * Allocate VRAM for the given BO.
+ */
+static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *tbo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem)
+{
+ struct amdgpu_bo *bo = container_of(tbo, struct amdgpu_bo, tbo);
+ struct amdgpu_vram_mgr *mgr = man->priv;
+ struct drm_mm *mm = &mgr->mm;
+ struct drm_mm_node *nodes;
+ enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT;
+ enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+ unsigned long lpfn, num_nodes, pages_per_node, pages_left;
+ unsigned i;
+ int r;
+
+ lpfn = place->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ||
+ amdgpu_vram_page_split == -1) {
+ pages_per_node = ~0ul;
+ num_nodes = 1;
+ } else {
+ pages_per_node = max((uint32_t)amdgpu_vram_page_split,
+ mem->page_alignment);
+ num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
+ }
+
+ nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
+ if (!nodes)
+ return -ENOMEM;
+
+ if (place->flags & TTM_PL_FLAG_TOPDOWN) {
+ sflags = DRM_MM_SEARCH_BELOW;
+ aflags = DRM_MM_CREATE_TOP;
+ }
+
+ pages_left = mem->num_pages;
+
+ spin_lock(&mgr->lock);
+ for (i = 0; i < num_nodes; ++i) {
+ unsigned long pages = min(pages_left, pages_per_node);
+ uint32_t alignment = mem->page_alignment;
+
+ if (pages == pages_per_node)
+ alignment = pages_per_node;
+ else
+ sflags |= DRM_MM_SEARCH_BEST;
+
+ r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages,
+ alignment, 0,
+ place->fpfn, lpfn,
+ sflags, aflags);
+ if (unlikely(r))
+ goto error;
+
+ pages_left -= pages;
+ }
+ spin_unlock(&mgr->lock);
+
+ mem->start = num_nodes == 1 ? nodes[0].start : AMDGPU_BO_INVALID_OFFSET;
+ mem->mm_node = nodes;
+
+ return 0;
+
+error:
+ while (i--)
+ drm_mm_remove_node(&nodes[i]);
+ spin_unlock(&mgr->lock);
+
+ kfree(nodes);
+ return r == -ENOSPC ? 0 : r;
+}
+
+/**
+ * amdgpu_vram_mgr_del - free ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: TTM memory object
+ *
+ * Free the allocated VRAM again.
+ */
+static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+ struct drm_mm_node *nodes = mem->mm_node;
+ unsigned pages = mem->num_pages;
+
+ if (!mem->mm_node)
+ return;
+
+ spin_lock(&mgr->lock);
+ while (pages) {
+ pages -= nodes->size;
+ drm_mm_remove_node(nodes);
+ ++nodes;
+ }
+ spin_unlock(&mgr->lock);
+
+ kfree(mem->mm_node);
+ mem->mm_node = NULL;
+}
+
+/**
+ * amdgpu_vram_mgr_debug - dump VRAM table
+ *
+ * @man: TTM memory type manager
+ * @prefix: text prefix
+ *
+ * Dump the table content using printk.
+ */
+static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
+ const char *prefix)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+
+ spin_lock(&mgr->lock);
+ drm_mm_debug_table(&mgr->mm, prefix);
+ spin_unlock(&mgr->lock);
+}
+
+const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
+ amdgpu_vram_mgr_init,
+ amdgpu_vram_mgr_fini,
+ amdgpu_vram_mgr_new,
+ amdgpu_vram_mgr_del,
+ amdgpu_vram_mgr_debug
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index f7d236f95e74..8c9bc75a9c2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -31,6 +31,7 @@
#include "atom.h"
#include "atom-bits.h"
#include "atombios_encoders.h"
+#include "atombios_crtc.h"
#include "amdgpu_atombios.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 5be788b269e2..1caff75ab9fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -887,9 +887,6 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
{
struct ci_power_info *pi = ci_get_pi(adev);
- if (pi->uvd_power_gated == gate)
- return;
-
pi->uvd_power_gated = gate;
ci_update_uvd_dpm(adev, gate);
@@ -960,6 +957,12 @@ static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
sclk = ps->performance_levels[0].sclk;
}
+ if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
+ sclk = adev->pm.pm_display_cfg.min_core_set_clock;
+
+ if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
+ mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
+
if (rps->vce_active) {
if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
@@ -2201,6 +2204,11 @@ static int ci_upload_firmware(struct amdgpu_device *adev)
struct ci_power_info *pi = ci_get_pi(adev);
int i, ret;
+ if (amdgpu_ci_is_smc_running(adev)) {
+ DRM_INFO("smc is running, no need to load smc firmware\n");
+ return 0;
+ }
+
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
break;
@@ -4190,8 +4198,15 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
{
struct ci_power_info *pi = ci_get_pi(adev);
u32 tmp;
+ int ret = 0;
if (!gate) {
+ /* turn the clocks on when decoding */
+ ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_UNGATE);
+ if (ret)
+ return ret;
+
if (pi->caps_uvd_dpm ||
(adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
pi->smc_state_table.UvdBootLevel = 0;
@@ -4203,9 +4218,17 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
WREG32_SMC(ixDPM_TABLE_475, tmp);
+ ret = ci_enable_uvd_dpm(adev, true);
+ } else {
+ ret = ci_enable_uvd_dpm(adev, false);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
}
- return ci_enable_uvd_dpm(adev, !gate);
+ return ret;
}
static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
@@ -4247,13 +4270,12 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
ret = ci_enable_vce_dpm(adev, true);
} else {
+ ret = ci_enable_vce_dpm(adev, false);
+ if (ret)
+ return ret;
/* turn the clocks off when not encoding */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
- if (ret)
- return ret;
-
- ret = ci_enable_vce_dpm(adev, false);
}
}
return ret;
@@ -5219,6 +5241,7 @@ static void ci_update_current_ps(struct amdgpu_device *adev,
pi->current_rps = *rps;
pi->current_ps = *new_ps;
pi->current_rps.ps_priv = &pi->current_ps;
+ adev->pm.dpm.current_ps = &pi->current_rps;
}
static void ci_update_requested_ps(struct amdgpu_device *adev,
@@ -5230,6 +5253,7 @@ static void ci_update_requested_ps(struct amdgpu_device *adev,
pi->requested_rps = *rps;
pi->requested_ps = *new_ps;
pi->requested_rps.ps_priv = &pi->requested_ps;
+ adev->pm.dpm.requested_ps = &pi->requested_rps;
}
static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
@@ -5267,8 +5291,6 @@ static int ci_dpm_enable(struct amdgpu_device *adev)
struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
int ret;
- if (amdgpu_ci_is_smc_running(adev))
- return -EINVAL;
if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
ci_enable_voltage_control(adev);
ret = ci_construct_voltage_tables(adev);
@@ -5689,7 +5711,7 @@ static int ci_parse_power_table(struct amdgpu_device *adev)
adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
- for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
u32 sclk, mclk;
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
clock_info = (union pplib_clock_info *)
@@ -6094,6 +6116,56 @@ static void ci_dpm_print_power_state(struct amdgpu_device *adev,
amdgpu_dpm_print_ps_status(adev, rps);
}
+static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
+ const struct ci_pl *ci_cpl2)
+{
+ return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
+ (ci_cpl1->sclk == ci_cpl2->sclk) &&
+ (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
+ (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
+}
+
+static int ci_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ struct ci_ps *ci_cps;
+ struct ci_ps *ci_rps;
+ int i;
+
+ if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
+ return -EINVAL;
+
+ ci_cps = ci_get_ps(cps);
+ ci_rps = ci_get_ps(rps);
+
+ if (ci_cps == NULL) {
+ *equal = false;
+ return 0;
+ }
+
+ if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
+
+ *equal = false;
+ return 0;
+ }
+
+ for (i = 0; i < ci_cps->performance_level_count; i++) {
+ if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
+ &(ci_rps->performance_levels[i]))) {
+ *equal = false;
+ return 0;
+ }
+ }
+
+ /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+ *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
+ *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
+
+ return 0;
+}
+
static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
{
struct ci_power_info *pi = ci_get_pi(adev);
@@ -6289,12 +6361,19 @@ static int ci_dpm_suspend(void *handle)
if (adev->pm.dpm_enabled) {
mutex_lock(&adev->pm.mutex);
- /* disable dpm */
- ci_dpm_disable(adev);
- /* reset the power state */
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+ AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
+ amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+ AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
+ adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
+ adev->pm.dpm.last_state = adev->pm.dpm.state;
+ adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
+ adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
mutex_unlock(&adev->pm.mutex);
+ amdgpu_pm_compute_clocks(adev);
+
}
+
return 0;
}
@@ -6312,6 +6391,8 @@ static int ci_dpm_resume(void *handle)
adev->pm.dpm_enabled = false;
else
adev->pm.dpm_enabled = true;
+ adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
+ adev->pm.dpm.state = adev->pm.dpm.last_state;
mutex_unlock(&adev->pm.mutex);
if (adev->pm.dpm_enabled)
amdgpu_pm_compute_clocks(adev);
@@ -6646,6 +6727,8 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
.set_sclk_od = ci_dpm_set_sclk_od,
.get_mclk_od = ci_dpm_get_mclk_od,
.set_mclk_od = ci_dpm_set_mclk_od,
+ .check_state_equal = ci_check_state_equal,
+ .get_vce_clock_state = amdgpu_get_vce_clock_state,
};
static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -6664,3 +6747,12 @@ static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
}
+
+const struct amdgpu_ip_block_version ci_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &ci_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index a845b6a93b79..302df85893ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1189,18 +1189,6 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
return r;
}
-static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
-{
- u32 tmp = RREG32(mmBIOS_SCRATCH_3);
-
- if (hung)
- tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
- else
- tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-
- WREG32(mmBIOS_SCRATCH_3, tmp);
-}
-
/**
* cik_asic_reset - soft reset GPU
*
@@ -1213,11 +1201,12 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu
static int cik_asic_reset(struct amdgpu_device *adev)
{
int r;
- cik_set_bios_scratch_engine_hung(adev, true);
+
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
r = cik_gpu_pci_config_reset(adev);
- cik_set_bios_scratch_engine_hung(adev, false);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return r;
}
@@ -1641,745 +1630,6 @@ static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
}
-static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 3,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 3,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 3,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &gfx_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &cik_sdma_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 4,
- .minor = 2,
- .rev = 0,
- .funcs = &uvd_v4_2_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v2_0_ip_funcs,
- },
-};
-
-int cik_set_ip_blocks(struct amdgpu_device *adev)
-{
- if (adev->enable_virtual_display) {
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- adev->ip_blocks = bonaire_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd);
- break;
- case CHIP_HAWAII:
- adev->ip_blocks = hawaii_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd);
- break;
- case CHIP_KAVERI:
- adev->ip_blocks = kaveri_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd);
- break;
- case CHIP_KABINI:
- adev->ip_blocks = kabini_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd);
- break;
- case CHIP_MULLINS:
- adev->ip_blocks = mullins_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- } else {
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- adev->ip_blocks = bonaire_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
- break;
- case CHIP_HAWAII:
- adev->ip_blocks = hawaii_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
- break;
- case CHIP_KAVERI:
- adev->ip_blocks = kaveri_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
- break;
- case CHIP_KABINI:
- adev->ip_blocks = kabini_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
- break;
- case CHIP_MULLINS:
- adev->ip_blocks = mullins_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static const struct amdgpu_asic_funcs cik_asic_funcs =
{
.read_disabled_bios = &cik_read_disabled_bios,
@@ -2612,7 +1862,7 @@ static int cik_common_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cik_common_ip_funcs = {
+static const struct amd_ip_funcs cik_common_ip_funcs = {
.name = "cik_common",
.early_init = cik_common_early_init,
.late_init = NULL,
@@ -2628,3 +1878,79 @@ const struct amd_ip_funcs cik_common_ip_funcs = {
.set_clockgating_state = cik_common_set_clockgating_state,
.set_powergating_state = cik_common_set_powergating_state,
};
+
+static const struct amdgpu_ip_block_version cik_common_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+};
+
+int cik_set_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ case CHIP_HAWAII:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ case CHIP_KAVERI:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_1_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ amdgpu_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v8_3_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index 5ebd2d7a0327..c4989f51ecef 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -24,8 +24,6 @@
#ifndef __CIK_H__
#define __CIK_H__
-extern const struct amd_ip_funcs cik_common_ip_funcs;
-
void cik_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int cik_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index be3d6f79a864..319b32cdea84 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -413,7 +413,7 @@ static int cik_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cik_ih_ip_funcs = {
+static const struct amd_ip_funcs cik_ih_ip_funcs = {
.name = "cik_ih",
.early_init = cik_ih_early_init,
.late_init = NULL,
@@ -441,3 +441,12 @@ static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
if (adev->irq.ih_funcs == NULL)
adev->irq.ih_funcs = &cik_ih_funcs;
}
+
+const struct amdgpu_ip_block_version cik_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.h b/drivers/gpu/drm/amd/amdgpu/cik_ih.h
index 6b0f375ec244..1d9ddee2868e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.h
@@ -24,6 +24,6 @@
#ifndef __CIK_IH_H__
#define __CIK_IH_H__
-extern const struct amd_ip_funcs cik_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version cik_ih_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index cb952acc7133..4c34dbc7a254 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -206,10 +206,10 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
- amdgpu_ring_write(ring, ring->nop |
+ amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_NOP_COUNT(count - 1));
else
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
@@ -622,7 +622,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -655,7 +655,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -675,7 +675,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -848,22 +848,6 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
-static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 4; /* cik_sdma_ring_emit_ib */
-}
-
-static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6 + /* cik_sdma_ring_emit_hdp_flush */
- 3 + /* cik_sdma_ring_emit_hdp_invalidate */
- 6 + /* cik_sdma_ring_emit_pipeline_sync */
- 12 + /* cik_sdma_ring_emit_vm_flush */
- 9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
-}
-
static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
bool enable)
{
@@ -959,11 +943,10 @@ static int cik_sdma_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -1207,7 +1190,7 @@ static int cik_sdma_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cik_sdma_ip_funcs = {
+static const struct amd_ip_funcs cik_sdma_ip_funcs = {
.name = "cik_sdma",
.early_init = cik_sdma_early_init,
.late_init = NULL,
@@ -1225,10 +1208,19 @@ const struct amd_ip_funcs cik_sdma_ip_funcs = {
};
static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0),
.get_rptr = cik_sdma_ring_get_rptr,
.get_wptr = cik_sdma_ring_get_wptr,
.set_wptr = cik_sdma_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 6 + /* cik_sdma_ring_emit_hdp_flush */
+ 3 + /* cik_sdma_ring_emit_hdp_invalidate */
+ 6 + /* cik_sdma_ring_emit_pipeline_sync */
+ 12 + /* cik_sdma_ring_emit_vm_flush */
+ 9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */
.emit_ib = cik_sdma_ring_emit_ib,
.emit_fence = cik_sdma_ring_emit_fence,
.emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
@@ -1239,8 +1231,6 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
.test_ib = cik_sdma_ring_test_ib,
.insert_nop = cik_sdma_ring_insert_nop,
.pad_ib = cik_sdma_ring_pad_ib,
- .get_emit_ib_size = cik_sdma_ring_get_emit_ib_size,
- .get_dma_frame_size = cik_sdma_ring_get_dma_frame_size,
};
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
@@ -1352,3 +1342,12 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version cik_sdma_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
index 027727c677b8..a4a8fe01410b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
@@ -24,6 +24,6 @@
#ifndef __CIK_SDMA_H__
#define __CIK_SDMA_H__
-extern const struct amd_ip_funcs cik_sdma_ip_funcs;
+extern const struct amdgpu_ip_block_version cik_sdma_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 8659852aea9e..6cbd913fd12e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -43,6 +43,14 @@
#define CRTC4_REGISTER_OFFSET (0x477c - 0x1b7c)
#define CRTC5_REGISTER_OFFSET (0x4a7c - 0x1b7c)
+/* hpd instance offsets */
+#define HPD0_REGISTER_OFFSET (0x1807 - 0x1807)
+#define HPD1_REGISTER_OFFSET (0x180a - 0x1807)
+#define HPD2_REGISTER_OFFSET (0x180d - 0x1807)
+#define HPD3_REGISTER_OFFSET (0x1810 - 0x1807)
+#define HPD4_REGISTER_OFFSET (0x1813 - 0x1807)
+#define HPD5_REGISTER_OFFSET (0x1816 - 0x1807)
+
#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 3c082e143730..352b5fad5a06 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1250,7 +1250,8 @@ static void cz_update_current_ps(struct amdgpu_device *adev,
pi->current_ps = *ps;
pi->current_rps = *rps;
- pi->current_rps.ps_priv = ps;
+ pi->current_rps.ps_priv = &pi->current_ps;
+ adev->pm.dpm.current_ps = &pi->current_rps;
}
@@ -1262,7 +1263,8 @@ static void cz_update_requested_ps(struct amdgpu_device *adev,
pi->requested_ps = *ps;
pi->requested_rps = *rps;
- pi->requested_rps.ps_priv = ps;
+ pi->requested_rps.ps_priv = &pi->requested_ps;
+ adev->pm.dpm.requested_ps = &pi->requested_rps;
}
@@ -2257,6 +2259,18 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
}
}
+static int cz_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ if (equal == NULL)
+ return -EINVAL;
+
+ *equal = false;
+ return 0;
+}
+
const struct amd_ip_funcs cz_dpm_ip_funcs = {
.name = "cz_dpm",
.early_init = cz_dpm_early_init,
@@ -2289,6 +2303,7 @@ static const struct amdgpu_dpm_funcs cz_dpm_funcs = {
.vblank_too_short = NULL,
.powergate_uvd = cz_dpm_powergate_uvd,
.powergate_vce = cz_dpm_powergate_vce,
+ .check_state_equal = cz_check_state_equal,
};
static void cz_dpm_set_funcs(struct amdgpu_device *adev)
@@ -2296,3 +2311,12 @@ static void cz_dpm_set_funcs(struct amdgpu_device *adev)
if (NULL == adev->pm.funcs)
adev->pm.funcs = &cz_dpm_funcs;
}
+
+const struct amdgpu_ip_block_version cz_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cz_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 3d23a70b6432..fe7cbb24da7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -394,7 +394,7 @@ static int cz_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs cz_ih_ip_funcs = {
+static const struct amd_ip_funcs cz_ih_ip_funcs = {
.name = "cz_ih",
.early_init = cz_ih_early_init,
.late_init = NULL,
@@ -423,3 +423,11 @@ static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &cz_ih_funcs;
}
+const struct amdgpu_ip_block_version cz_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cz_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.h b/drivers/gpu/drm/amd/amdgpu/cz_ih.h
index fc4057a2ecb9..14be7753221b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.h
@@ -24,6 +24,6 @@
#ifndef __CZ_IH_H__
#define __CZ_IH_H__
-extern const struct amd_ip_funcs cz_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version cz_ih_ip_block;
#endif /* __CZ_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 9260caef74fa..679dd7320279 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -31,6 +31,7 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#include "dce_v10_0.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
@@ -330,33 +331,12 @@ static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev,
enum amdgpu_hpd_id hpd)
{
- int idx;
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return connected;
- }
- if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
+ if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
connected = true;
@@ -376,37 +356,16 @@ static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
{
u32 tmp;
bool connected = dce_v10_0_hpd_sense(adev, hpd);
- int idx;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return;
- }
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
if (connected)
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
else
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -422,33 +381,12 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -457,24 +395,24 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
- tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_CONNECT_INT_DELAY,
AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_DISCONNECT_INT_DELAY,
AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
- WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq,
@@ -495,37 +433,16 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
amdgpu_irq_put(adev, &adev->hpd_irq,
amdgpu_connector->hpd.hpd);
@@ -3548,7 +3465,7 @@ static int dce_v10_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v10_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.name = "dce_v10_0",
.early_init = dce_v10_0_early_init,
.late_init = NULL,
@@ -3879,3 +3796,21 @@ static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v10_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 10,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v10_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v10_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 10,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &dce_v10_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
index e3dc04d293e4..7a0747789f1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
@@ -24,7 +24,9 @@
#ifndef __DCE_V10_0_H__
#define __DCE_V10_0_H__
-extern const struct amd_ip_funcs dce_v10_0_ip_funcs;
+
+extern const struct amdgpu_ip_block_version dce_v10_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v10_1_ip_block;
void dce_v10_0_disable_dce(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 367739bd1927..807dfedb3610 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -31,6 +31,7 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#include "dce_v11_0.h"
#include "dce/dce_11_0_d.h"
#include "dce/dce_11_0_sh_mask.h"
@@ -346,33 +347,12 @@ static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
enum amdgpu_hpd_id hpd)
{
- int idx;
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return connected;
- }
- if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
+ if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
connected = true;
@@ -392,37 +372,16 @@ static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
{
u32 tmp;
bool connected = dce_v11_0_hpd_sense(adev, hpd);
- int idx;
- switch (hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (hpd >= adev->mode_info.num_hpd)
return;
- }
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
if (connected)
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
else
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -438,33 +397,12 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -473,24 +411,24 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
- tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_CONNECT_INT_DELAY,
AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
DC_HPD_DISCONNECT_INT_DELAY,
AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
- WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
@@ -510,37 +448,16 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
u32 tmp;
- int idx;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- idx = 0;
- break;
- case AMDGPU_HPD_2:
- idx = 1;
- break;
- case AMDGPU_HPD_3:
- idx = 2;
- break;
- case AMDGPU_HPD_4:
- idx = 3;
- break;
- case AMDGPU_HPD_5:
- idx = 4;
- break;
- case AMDGPU_HPD_6:
- idx = 5;
- break;
- default:
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
- }
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+ tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+ WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
@@ -3605,7 +3522,7 @@ static int dce_v11_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v11_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.name = "dce_v11_0",
.early_init = dce_v11_0_early_init,
.late_init = NULL,
@@ -3935,3 +3852,21 @@ static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v11_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 11,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v11_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v11_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 11,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &dce_v11_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
index 1f58a65ba2ef..0d878ca3acba 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
@@ -24,7 +24,8 @@
#ifndef __DCE_V11_0_H__
#define __DCE_V11_0_H__
-extern const struct amd_ip_funcs dce_v11_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v11_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v11_2_ip_block;
void dce_v11_0_disable_dce(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 15f9fc0514b2..57423332bf75 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -46,6 +46,16 @@ static const u32 crtc_offsets[6] =
SI_CRTC5_REGISTER_OFFSET
};
+static const u32 hpd_offsets[] =
+{
+ DC_HPD1_INT_STATUS - DC_HPD1_INT_STATUS,
+ DC_HPD2_INT_STATUS - DC_HPD1_INT_STATUS,
+ DC_HPD3_INT_STATUS - DC_HPD1_INT_STATUS,
+ DC_HPD4_INT_STATUS - DC_HPD1_INT_STATUS,
+ DC_HPD5_INT_STATUS - DC_HPD1_INT_STATUS,
+ DC_HPD6_INT_STATUS - DC_HPD1_INT_STATUS,
+};
+
static const uint32_t dig_offsets[] = {
SI_CRTC0_REGISTER_OFFSET,
SI_CRTC1_REGISTER_OFFSET,
@@ -94,15 +104,6 @@ static const struct {
.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
} };
-static const uint32_t hpd_int_control_offsets[6] = {
- DC_HPD1_INT_CONTROL,
- DC_HPD2_INT_CONTROL,
- DC_HPD3_INT_CONTROL,
- DC_HPD4_INT_CONTROL,
- DC_HPD5_INT_CONTROL,
- DC_HPD6_INT_CONTROL,
-};
-
static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
u32 block_offset, u32 reg)
{
@@ -257,34 +258,11 @@ static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
{
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_2:
- if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_3:
- if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_4:
- if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_5:
- if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- case AMDGPU_HPD_6:
- if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
- connected = true;
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return connected;
+
+ if (RREG32(DC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPDx_SENSE)
+ connected = true;
return connected;
}
@@ -303,58 +281,15 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
u32 tmp;
bool connected = dce_v6_0_hpd_sense(adev, hpd);
- switch (hpd) {
- case AMDGPU_HPD_1:
- tmp = RREG32(DC_HPD1_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD1_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- tmp = RREG32(DC_HPD2_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD2_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- tmp = RREG32(DC_HPD3_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD3_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- tmp = RREG32(DC_HPD4_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD4_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- tmp = RREG32(DC_HPD5_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD5_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- tmp = RREG32(DC_HPD6_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPDx_INT_POLARITY;
- else
- tmp |= DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD6_INT_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return;
+
+ tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -369,34 +304,17 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
- u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
- DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(DC_HPD1_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- WREG32(DC_HPD2_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- WREG32(DC_HPD3_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- WREG32(DC_HPD4_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- WREG32(DC_HPD5_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- WREG32(DC_HPD6_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp |= DC_HPDx_EN;
+ WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -405,34 +323,9 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
-
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
- break;
- default:
- continue;
- }
-
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPDx_INT_EN;
+ WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
@@ -454,32 +347,18 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(DC_HPD1_CONTROL, 0);
- break;
- case AMDGPU_HPD_2:
- WREG32(DC_HPD2_CONTROL, 0);
- break;
- case AMDGPU_HPD_3:
- WREG32(DC_HPD3_CONTROL, 0);
- break;
- case AMDGPU_HPD_4:
- WREG32(DC_HPD4_CONTROL, 0);
- break;
- case AMDGPU_HPD_5:
- WREG32(DC_HPD5_CONTROL, 0);
- break;
- case AMDGPU_HPD_6:
- WREG32(DC_HPD6_CONTROL, 0);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPDx_EN;
+ WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
+
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
}
@@ -611,12 +490,55 @@ static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
- if (!render)
+ if (!render)
WREG32(R_000300_VGA_RENDER_CONTROL,
RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
}
+static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
+{
+ int num_crtc = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ num_crtc = 6;
+ break;
+ case CHIP_OLAND:
+ num_crtc = 2;
+ break;
+ default:
+ num_crtc = 0;
+ }
+ return num_crtc;
+}
+
+void dce_v6_0_disable_dce(struct amdgpu_device *adev)
+{
+ /*Disable VGA render and enabled crtc, if has DCE engine*/
+ if (amdgpu_atombios_has_dce_engine_info(adev)) {
+ u32 tmp;
+ int crtc_enabled, i;
+
+ dce_v6_0_set_vga_render_state(adev, false);
+
+ /*Disable crtc*/
+ for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
+ crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) &
+ EVERGREEN_CRTC_MASTER_EN;
+ if (crtc_enabled) {
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ }
+ }
+}
+
static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
{
@@ -2338,21 +2260,20 @@ static int dce_v6_0_early_init(void *handle)
dce_v6_0_set_display_funcs(adev);
dce_v6_0_set_irq_funcs(adev);
+ adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
+
switch (adev->asic_type) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
- adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
case CHIP_OLAND:
- adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 2;
adev->mode_info.num_dig = 2;
break;
default:
- /* FIXME: not supported yet */
return -EINVAL;
}
@@ -2582,42 +2503,23 @@ static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+ u32 dc_hpd_int_cntl;
- switch (type) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
- break;
- default:
+ if (type >= adev->mode_info.num_hpd) {
DRM_DEBUG("invalid hdp %d\n", type);
return 0;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]);
+ dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
+ WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]);
+ dc_hpd_int_cntl |= DC_HPDx_INT_EN;
+ WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
default:
break;
@@ -2790,7 +2692,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t disp_int, mask, int_control, tmp;
+ uint32_t disp_int, mask, tmp;
unsigned hpd;
if (entry->src_data >= adev->mode_info.num_hpd) {
@@ -2801,12 +2703,11 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
hpd = entry->src_data;
disp_int = RREG32(interrupt_status_offsets[hpd].reg);
mask = interrupt_status_offsets[hpd].hpd;
- int_control = hpd_int_control_offsets[hpd];
if (disp_int & mask) {
- tmp = RREG32(int_control);
+ tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
- WREG32(int_control, tmp);
+ WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
schedule_work(&adev->hotplug_work);
DRM_INFO("IH: HPD%d\n", hpd + 1);
}
@@ -2827,7 +2728,7 @@ static int dce_v6_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v6_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
.name = "dce_v6_0",
.early_init = dce_v6_0_early_init,
.late_init = NULL,
@@ -3168,3 +3069,21 @@ static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v6_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 6,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &dce_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
index 6a5528105bb6..7b546b596de1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
@@ -24,6 +24,9 @@
#ifndef __DCE_V6_0_H__
#define __DCE_V6_0_H__
-extern const struct amd_ip_funcs dce_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v6_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v6_4_ip_block;
+
+void dce_v6_0_disable_dce(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 8c4d808db0f1..6f7656d525e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -31,6 +31,7 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#include "dce_v8_0.h"
#include "dce/dce_8_0_d.h"
#include "dce/dce_8_0_sh_mask.h"
@@ -56,6 +57,16 @@ static const u32 crtc_offsets[6] =
CRTC5_REGISTER_OFFSET
};
+static const u32 hpd_offsets[] =
+{
+ HPD0_REGISTER_OFFSET,
+ HPD1_REGISTER_OFFSET,
+ HPD2_REGISTER_OFFSET,
+ HPD3_REGISTER_OFFSET,
+ HPD4_REGISTER_OFFSET,
+ HPD5_REGISTER_OFFSET
+};
+
static const uint32_t dig_offsets[] = {
CRTC0_REGISTER_OFFSET,
CRTC1_REGISTER_OFFSET,
@@ -104,15 +115,6 @@ static const struct {
.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
} };
-static const uint32_t hpd_int_control_offsets[6] = {
- mmDC_HPD1_INT_CONTROL,
- mmDC_HPD2_INT_CONTROL,
- mmDC_HPD3_INT_CONTROL,
- mmDC_HPD4_INT_CONTROL,
- mmDC_HPD5_INT_CONTROL,
- mmDC_HPD6_INT_CONTROL,
-};
-
static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
u32 block_offset, u32 reg)
{
@@ -278,34 +280,12 @@ static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
{
bool connected = false;
- switch (hpd) {
- case AMDGPU_HPD_1:
- if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_2:
- if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_3:
- if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_4:
- if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_5:
- if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK)
- connected = true;
- break;
- case AMDGPU_HPD_6:
- if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK)
- connected = true;
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return connected;
+
+ if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
+ DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
+ connected = true;
return connected;
}
@@ -324,58 +304,15 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
u32 tmp;
bool connected = dce_v8_0_hpd_sense(adev, hpd);
- switch (hpd) {
- case AMDGPU_HPD_1:
- tmp = RREG32(mmDC_HPD1_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
- WREG32(mmDC_HPD1_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- tmp = RREG32(mmDC_HPD2_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
- WREG32(mmDC_HPD2_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- tmp = RREG32(mmDC_HPD3_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
- WREG32(mmDC_HPD3_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- tmp = RREG32(mmDC_HPD4_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
- WREG32(mmDC_HPD4_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- tmp = RREG32(mmDC_HPD5_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
- WREG32(mmDC_HPD5_INT_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- tmp = RREG32(mmDC_HPD6_INT_CONTROL);
- if (connected)
- tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
- else
- tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
- WREG32(mmDC_HPD6_INT_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (hpd >= adev->mode_info.num_hpd)
+ return;
+
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ if (connected)
+ tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+ else
+ tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
@@ -390,35 +327,17 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
- u32 tmp = (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT) |
- (0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT) |
- DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(mmDC_HPD1_CONTROL, tmp);
- break;
- case AMDGPU_HPD_2:
- WREG32(mmDC_HPD2_CONTROL, tmp);
- break;
- case AMDGPU_HPD_3:
- WREG32(mmDC_HPD3_CONTROL, tmp);
- break;
- case AMDGPU_HPD_4:
- WREG32(mmDC_HPD4_CONTROL, tmp);
- break;
- case AMDGPU_HPD_5:
- WREG32(mmDC_HPD5_CONTROL, tmp);
- break;
- case AMDGPU_HPD_6:
- WREG32(mmDC_HPD6_CONTROL, tmp);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+ WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -427,34 +346,9 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
-
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
- break;
- default:
- continue;
- }
-
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
- dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
@@ -475,32 +369,18 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ u32 tmp;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- switch (amdgpu_connector->hpd.hpd) {
- case AMDGPU_HPD_1:
- WREG32(mmDC_HPD1_CONTROL, 0);
- break;
- case AMDGPU_HPD_2:
- WREG32(mmDC_HPD2_CONTROL, 0);
- break;
- case AMDGPU_HPD_3:
- WREG32(mmDC_HPD3_CONTROL, 0);
- break;
- case AMDGPU_HPD_4:
- WREG32(mmDC_HPD4_CONTROL, 0);
- break;
- case AMDGPU_HPD_5:
- WREG32(mmDC_HPD5_CONTROL, 0);
- break;
- case AMDGPU_HPD_6:
- WREG32(mmDC_HPD6_CONTROL, 0);
- break;
- default:
- break;
- }
+ if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+ continue;
+
+ tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+ tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+ WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
+
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
}
@@ -3198,42 +3078,23 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+ u32 dc_hpd_int_cntl;
- switch (type) {
- case AMDGPU_HPD_1:
- dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
- break;
- case AMDGPU_HPD_2:
- dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
- break;
- case AMDGPU_HPD_3:
- dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
- break;
- case AMDGPU_HPD_4:
- dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
- break;
- case AMDGPU_HPD_5:
- dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
- break;
- case AMDGPU_HPD_6:
- dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
- break;
- default:
+ if (type >= adev->mode_info.num_hpd) {
DRM_DEBUG("invalid hdp %d\n", type);
return 0;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
- WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
default:
break;
@@ -3406,7 +3267,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t disp_int, mask, int_control, tmp;
+ uint32_t disp_int, mask, tmp;
unsigned hpd;
if (entry->src_data >= adev->mode_info.num_hpd) {
@@ -3417,12 +3278,11 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
hpd = entry->src_data;
disp_int = RREG32(interrupt_status_offsets[hpd].reg);
mask = interrupt_status_offsets[hpd].hpd;
- int_control = hpd_int_control_offsets[hpd];
if (disp_int & mask) {
- tmp = RREG32(int_control);
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
- WREG32(int_control, tmp);
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
schedule_work(&adev->hotplug_work);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
@@ -3443,7 +3303,7 @@ static int dce_v8_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_v8_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
.name = "dce_v8_0",
.early_init = dce_v8_0_early_init,
.late_init = NULL,
@@ -3773,3 +3633,48 @@ static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
}
+
+const struct amdgpu_ip_block_version dce_v8_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_3_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_5_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &dce_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
index 7d0770c3a49b..13b802dd946a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
@@ -24,7 +24,11 @@
#ifndef __DCE_V8_0_H__
#define __DCE_V8_0_H__
-extern const struct amd_ip_funcs dce_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_1_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_2_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_3_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_5_ip_block;
void dce_v8_0_disable_dce(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index c2bd9f045532..cc85676a68d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -27,6 +27,9 @@
#include "atom.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#ifdef CONFIG_DRM_AMDGPU_SI
+#include "dce_v6_0.h"
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
#include "dce_v8_0.h"
#endif
@@ -34,11 +37,13 @@
#include "dce_v11_0.h"
#include "dce_virtual.h"
+#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
+
+
static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
-static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry);
+static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
+ int index);
/**
* dce_virtual_vblank_wait - vblank wait asic callback.
@@ -99,6 +104,14 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ dce_v6_0_disable_dce(adev);
+ break;
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
@@ -119,6 +132,9 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
dce_v11_0_disable_dce(adev);
break;
case CHIP_TOPAZ:
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_HAINAN:
+#endif
/* no DCE */
return;
default:
@@ -195,10 +211,9 @@ static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
amdgpu_crtc->enabled = true;
- /* Make sure VBLANK and PFLIP interrupts are still enabled */
+ /* Make sure VBLANK interrupts are still enabled */
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
- amdgpu_irq_update(adev, &adev->pageflip_irq, type);
drm_vblank_on(dev, amdgpu_crtc->crtc_id);
break;
case DRM_MODE_DPMS_STANDBY:
@@ -264,24 +279,6 @@ static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_encoder *encoder;
-
- /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- amdgpu_crtc->encoder = encoder;
- amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
- break;
- }
- }
- if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
- return false;
- }
-
return true;
}
@@ -341,6 +338,7 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
+ amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
return 0;
@@ -350,48 +348,128 @@ static int dce_virtual_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
dce_virtual_set_display_funcs(adev);
dce_virtual_set_irq_funcs(adev);
- adev->mode_info.num_crtc = 1;
adev->mode_info.num_hpd = 1;
adev->mode_info.num_dig = 1;
return 0;
}
-static bool dce_virtual_get_connector_info(struct amdgpu_device *adev)
+static struct drm_encoder *
+dce_virtual_encoder(struct drm_connector *connector)
{
- struct amdgpu_i2c_bus_rec ddc_bus;
- struct amdgpu_router router;
- struct amdgpu_hpd hpd;
+ int enc_id = connector->encoder_ids[0];
+ struct drm_encoder *encoder;
+ int i;
- /* look up gpio for ddc, hpd */
- ddc_bus.valid = false;
- hpd.hpd = AMDGPU_HPD_NONE;
- /* needed for aux chan transactions */
- ddc_bus.hpd = hpd.hpd;
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
- memset(&router, 0, sizeof(router));
- router.ddc_valid = false;
- router.cd_valid = false;
- amdgpu_display_add_connector(adev,
- 0,
- ATOM_DEVICE_CRT1_SUPPORT,
- DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus,
- CONNECTOR_OBJECT_ID_VIRTUAL,
- &hpd,
- &router);
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
+ continue;
- amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL,
- ATOM_DEVICE_CRT1_SUPPORT,
- 0);
+ if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+ return encoder;
+ }
- amdgpu_link_encoder_connector(adev->ddev);
+ /* pick the first one */
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
+ return NULL;
+}
+
+static int dce_virtual_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode = NULL;
+ unsigned i;
+ static const struct mode_size {
+ int w;
+ int h;
+ } common_modes[17] = {
+ { 640, 480},
+ { 720, 480},
+ { 800, 600},
+ { 848, 480},
+ {1024, 768},
+ {1152, 768},
+ {1280, 720},
+ {1280, 800},
+ {1280, 854},
+ {1280, 960},
+ {1280, 1024},
+ {1440, 900},
+ {1400, 1050},
+ {1680, 1050},
+ {1600, 1200},
+ {1920, 1080},
+ {1920, 1200}
+ };
+
+ for (i = 0; i < 17; i++) {
+ mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+ drm_mode_probed_add(connector, mode);
+ }
- return true;
+ return 0;
+}
+
+static int dce_virtual_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static int
+dce_virtual_dpms(struct drm_connector *connector, int mode)
+{
+ return 0;
}
+static enum drm_connector_status
+dce_virtual_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static int
+dce_virtual_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static void dce_virtual_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static void dce_virtual_force(struct drm_connector *connector)
+{
+ return;
+}
+
+static const struct drm_connector_helper_funcs dce_virtual_connector_helper_funcs = {
+ .get_modes = dce_virtual_get_modes,
+ .mode_valid = dce_virtual_mode_valid,
+ .best_encoder = dce_virtual_encoder,
+};
+
+static const struct drm_connector_funcs dce_virtual_connector_funcs = {
+ .dpms = dce_virtual_dpms,
+ .detect = dce_virtual_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = dce_virtual_set_property,
+ .destroy = dce_virtual_destroy,
+ .force = dce_virtual_force,
+};
+
static int dce_virtual_sw_init(void *handle)
{
int r, i;
@@ -420,16 +498,16 @@ static int dce_virtual_sw_init(void *handle)
adev->ddev->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384;
- /* allocate crtcs */
+ /* allocate crtcs, encoders, connectors */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = dce_virtual_crtc_init(adev, i);
if (r)
return r;
+ r = dce_virtual_connector_encoder_init(adev, i);
+ if (r)
+ return r;
}
- dce_virtual_get_connector_info(adev);
- amdgpu_print_display_setup(adev->ddev);
-
drm_kms_helper_poll_init(adev->ddev);
adev->mode_info.mode_config_initialized = true;
@@ -496,7 +574,7 @@ static int dce_virtual_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs dce_virtual_ip_funcs = {
+static const struct amd_ip_funcs dce_virtual_ip_funcs = {
.name = "dce_virtual",
.early_init = dce_virtual_early_init,
.late_init = NULL,
@@ -526,8 +604,8 @@ static void dce_virtual_encoder_commit(struct drm_encoder *encoder)
static void
dce_virtual_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
return;
}
@@ -547,10 +625,6 @@ static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
- /* set the active encoder to connector routing */
- amdgpu_encoder_set_active_device(encoder);
-
return true;
}
@@ -576,45 +650,40 @@ static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
.destroy = dce_virtual_encoder_destroy,
};
-static void dce_virtual_encoder_add(struct amdgpu_device *adev,
- uint32_t encoder_enum,
- uint32_t supported_device,
- u16 caps)
+static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
+ int index)
{
- struct drm_device *dev = adev->ddev;
struct drm_encoder *encoder;
- struct amdgpu_encoder *amdgpu_encoder;
-
- /* see if we already added it */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- amdgpu_encoder = to_amdgpu_encoder(encoder);
- if (amdgpu_encoder->encoder_enum == encoder_enum) {
- amdgpu_encoder->devices |= supported_device;
- return;
- }
+ struct drm_connector *connector;
+
+ /* add a new encoder */
+ encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
+ if (!encoder)
+ return -ENOMEM;
+ encoder->possible_crtcs = 1 << index;
+ drm_encoder_init(adev->ddev, encoder, &dce_virtual_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
+ connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
+ if (!connector) {
+ kfree(encoder);
+ return -ENOMEM;
}
- /* add a new one */
- amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
- if (!amdgpu_encoder)
- return;
+ /* add a new connector */
+ drm_connector_init(adev->ddev, connector, &dce_virtual_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ drm_connector_register(connector);
- encoder = &amdgpu_encoder->base;
- encoder->possible_crtcs = 0x1;
- amdgpu_encoder->enc_priv = NULL;
- amdgpu_encoder->encoder_enum = encoder_enum;
- amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- amdgpu_encoder->devices = supported_device;
- amdgpu_encoder->rmx_type = RMX_OFF;
- amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
- amdgpu_encoder->is_ext_encoder = false;
- amdgpu_encoder->caps = caps;
-
- drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
- drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
- DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id);
+ /* link them */
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ return 0;
}
static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
@@ -630,8 +699,8 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
.hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg,
.page_flip = &dce_virtual_page_flip,
.page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
- .add_encoder = &dce_virtual_encoder_add,
- .add_connector = &amdgpu_connector_add,
+ .add_encoder = NULL,
+ .add_connector = NULL,
.stop_mc_access = &dce_virtual_stop_mc_access,
.resume_mc_access = &dce_virtual_resume_mc_access,
};
@@ -642,107 +711,13 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
adev->mode_info.funcs = &dce_virtual_display_funcs;
}
-static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
-{
- struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer);
- struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info);
- unsigned crtc = 0;
- drm_handle_vblank(adev->ddev, crtc);
- dce_virtual_pageflip_irq(adev, NULL, NULL);
- hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
- return HRTIMER_NORESTART;
-}
-
-static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
- int crtc,
- enum amdgpu_interrupt_state state)
-{
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- if (state && !adev->mode_info.vsync_timer_enabled) {
- DRM_DEBUG("Enable software vsync timer\n");
- hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
- adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle;
- hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
- } else if (!state && adev->mode_info.vsync_timer_enabled) {
- DRM_DEBUG("Disable software vsync timer\n");
- hrtimer_cancel(&adev->mode_info.vblank_timer);
- }
-
- adev->mode_info.vsync_timer_enabled = state;
- DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
-}
-
-
-static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- switch (type) {
- case AMDGPU_CRTC_IRQ_VBLANK1:
- dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state);
- break;
- default:
- break;
- }
- return 0;
-}
-
-static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev,
- int crtc)
-{
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-}
-
-static int dce_virtual_crtc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- unsigned crtc = 0;
- unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1;
-
- dce_virtual_crtc_vblank_int_ack(adev, crtc);
-
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
- }
- dce_virtual_pageflip_irq(adev, NULL, NULL);
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
- return 0;
-}
-
-static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- if (type >= adev->mode_info.num_crtc) {
- DRM_ERROR("invalid pageflip crtc %d\n", type);
- return -EINVAL;
- }
- DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state);
-
- return 0;
-}
-
-static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
+static int dce_virtual_pageflip(struct amdgpu_device *adev,
+ unsigned crtc_id)
{
unsigned long flags;
- unsigned crtc_id = 0;
struct amdgpu_crtc *amdgpu_crtc;
struct amdgpu_flip_work *works;
- crtc_id = 0;
amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
if (crtc_id >= adev->mode_info.num_crtc) {
@@ -781,22 +756,79 @@ static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
return 0;
}
+static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
+{
+ struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer,
+ struct amdgpu_crtc, vblank_timer);
+ struct drm_device *ddev = amdgpu_crtc->base.dev;
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
+ dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
+ hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD),
+ HRTIMER_MODE_REL);
+
+ return HRTIMER_NORESTART;
+}
+
+static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+ int crtc,
+ enum amdgpu_interrupt_state state)
+{
+ if (crtc >= adev->mode_info.num_crtc) {
+ DRM_DEBUG("invalid crtc %d\n", crtc);
+ return;
+ }
+
+ if (state && !adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
+ DRM_DEBUG("Enable software vsync timer\n");
+ hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer,
+ ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
+ adev->mode_info.crtcs[crtc]->vblank_timer.function =
+ dce_virtual_vblank_timer_handle;
+ hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer,
+ ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+ } else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
+ DRM_DEBUG("Disable software vsync timer\n");
+ hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer);
+ }
+
+ adev->mode_info.crtcs[crtc]->vsync_timer_enabled = state;
+ DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
+}
+
+
+static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ if (type > AMDGPU_CRTC_IRQ_VBLANK6)
+ return -EINVAL;
+
+ dce_virtual_set_crtc_vblank_interrupt_state(adev, type, state);
+
+ return 0;
+}
+
static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
.set = dce_virtual_set_crtc_irq_state,
- .process = dce_virtual_crtc_irq,
-};
-
-static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = {
- .set = dce_virtual_set_pageflip_irq_state,
- .process = dce_virtual_pageflip_irq,
+ .process = NULL,
};
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
{
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
-
- adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
- adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs;
}
+const struct amdgpu_ip_block_version dce_virtual_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
index e239243f6ebc..ed422012c8c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
@@ -24,8 +24,7 @@
#ifndef __DCE_VIRTUAL_H__
#define __DCE_VIRTUAL_H__
-extern const struct amd_ip_funcs dce_virtual_ip_funcs;
-#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
+extern const struct amdgpu_ip_block_version dce_virtual_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 40abb6b81c09..21c086e02e7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1522,7 +1522,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
long r;
@@ -1548,7 +1548,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err2;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -1569,7 +1569,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err2:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -1940,7 +1940,7 @@ static int gfx_v6_0_cp_resume(struct amdgpu_device *adev)
static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
@@ -1966,7 +1966,7 @@ static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
/* write new base address */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -2814,33 +2814,6 @@ static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
amdgpu_ring_write(ring, 0);
}
-static unsigned gfx_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 6; /* gfx_v6_0_ring_emit_ib */
-}
-
-static unsigned gfx_v6_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
-{
- return
- 5 + /* gfx_v6_0_ring_emit_hdp_flush */
- 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
- 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
- 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
- 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
- 3; /* gfx_v6_ring_emit_cntxcntl */
-}
-
-static unsigned gfx_v6_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
-{
- return
- 5 + /* gfx_v6_0_ring_emit_hdp_flush */
- 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
- 7 + /* gfx_v6_0_ring_emit_pipeline_sync */
- 17 + /* gfx_v6_0_ring_emit_vm_flush */
- 14 + 14 + 14; /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
-}
-
static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v6_0_select_se_sh,
@@ -2896,9 +2869,7 @@ static int gfx_v6_0_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
r = amdgpu_ring_init(adev, ring, 1024,
- 0x80000000, 0xf,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
- AMDGPU_RING_TYPE_GFX);
+ &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
if (r)
return r;
}
@@ -2920,9 +2891,7 @@ static int gfx_v6_0_sw_init(void *handle)
sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
r = amdgpu_ring_init(adev, ring, 1024,
- 0x80000000, 0xf,
- &adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_TYPE_COMPUTE);
+ &adev->gfx.eop_irq, irq_type);
if (r)
return r;
}
@@ -3237,7 +3206,7 @@ static int gfx_v6_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
.name = "gfx_v6_0",
.early_init = gfx_v6_0_early_init,
.late_init = NULL,
@@ -3255,10 +3224,20 @@ const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
+ .type = AMDGPU_RING_TYPE_GFX,
+ .align_mask = 0xff,
+ .nop = 0x80000000,
.get_rptr = gfx_v6_0_ring_get_rptr,
.get_wptr = gfx_v6_0_ring_get_wptr,
.set_wptr = gfx_v6_0_ring_set_wptr_gfx,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 5 + /* gfx_v6_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+ 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
+ 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
+ 3, /* gfx_v6_ring_emit_cntxcntl */
+ .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence,
.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
@@ -3269,15 +3248,22 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
.test_ib = gfx_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
- .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_gfx,
};
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
+ .type = AMDGPU_RING_TYPE_COMPUTE,
+ .align_mask = 0xff,
+ .nop = 0x80000000,
.get_rptr = gfx_v6_0_ring_get_rptr,
.get_wptr = gfx_v6_0_ring_get_wptr,
.set_wptr = gfx_v6_0_ring_set_wptr_compute,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 5 + /* gfx_v6_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v6_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v6_0_ring_emit_vm_flush */
+ 14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence,
.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
@@ -3287,8 +3273,6 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
.test_ring = gfx_v6_0_ring_test_ring,
.test_ib = gfx_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
- .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_compute,
};
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -3360,3 +3344,12 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
}
+
+const struct amdgpu_ip_block_version gfx_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
index b9657e72b248..ced6fc42f688 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
@@ -24,6 +24,6 @@
#ifndef __GFX_V6_0_H__
#define __GFX_V6_0_H__
-extern const struct amd_ip_funcs gfx_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v6_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 71116da9e782..5b631fd1a879 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2077,9 +2077,9 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask;
- int usepfp = ring->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
+ int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
- if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
case 1:
ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
@@ -2286,7 +2286,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
long r;
@@ -2312,7 +2312,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err2;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -2333,7 +2333,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err2:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -3222,7 +3222,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
*/
static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
@@ -3262,7 +3262,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -3391,7 +3391,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.save_restore_obj == NULL) {
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.save_restore_obj);
if (r) {
@@ -3435,7 +3436,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.clear_state_obj == NULL) {
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.clear_state_obj);
if (r) {
@@ -3475,7 +3477,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.cp_table_obj == NULL) {
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.cp_table_obj);
if (r) {
@@ -4354,44 +4357,40 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
-static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{
- return
- 4; /* gfx_v7_0_ring_emit_ib_gfx */
+ WREG32(mmSQ_IND_INDEX, (wave & 0xF) | ((simd & 0x3) << 4) | (address << 16) | (1 << 13));
+ return RREG32(mmSQ_IND_DATA);
}
-static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
{
- return
- 20 + /* gfx_v7_0_ring_emit_gds_switch */
- 7 + /* gfx_v7_0_ring_emit_hdp_flush */
- 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
- 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
- 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
- 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
- 3; /* gfx_v7_ring_emit_cntxcntl */
-}
-
-static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
-{
- return
- 4; /* gfx_v7_0_ring_emit_ib_compute */
-}
-
-static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
-{
- return
- 20 + /* gfx_v7_0_ring_emit_gds_switch */
- 7 + /* gfx_v7_0_ring_emit_hdp_flush */
- 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
- 7 + /* gfx_v7_0_ring_emit_pipeline_sync */
- 17 + /* gfx_v7_0_ring_emit_vm_flush */
- 7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ /* type 0 wave data */
+ dst[(*no_fields)++] = 0;
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
}
static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v7_0_select_se_sh,
+ .read_wave_data = &gfx_v7_0_read_wave_data,
};
static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
@@ -4643,9 +4642,7 @@ static int gfx_v7_0_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
- AMDGPU_RING_TYPE_GFX);
+ &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
if (r)
return r;
}
@@ -4670,9 +4667,7 @@ static int gfx_v7_0_sw_init(void *handle)
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_TYPE_COMPUTE);
+ &adev->gfx.eop_irq, irq_type);
if (r)
return r;
}
@@ -5123,7 +5118,7 @@ static int gfx_v7_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.name = "gfx_v7_0",
.early_init = gfx_v7_0_early_init,
.late_init = gfx_v7_0_late_init,
@@ -5141,10 +5136,21 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
+ .type = AMDGPU_RING_TYPE_GFX,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v7_0_ring_get_rptr,
.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v7_0_ring_emit_gds_switch */
+ 7 + /* gfx_v7_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+ 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+ 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
+ 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
+ 3, /* gfx_v7_ring_emit_cntxcntl */
+ .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
@@ -5157,15 +5163,23 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
- .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx,
- .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
+ .type = AMDGPU_RING_TYPE_COMPUTE,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v7_0_ring_get_rptr,
.get_wptr = gfx_v7_0_ring_get_wptr_compute,
.set_wptr = gfx_v7_0_ring_set_wptr_compute,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v7_0_ring_emit_gds_switch */
+ 7 + /* gfx_v7_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v7_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v7_0_ring_emit_vm_flush */
+ 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
@@ -5177,8 +5191,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.test_ib = gfx_v7_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute,
- .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute,
};
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -5289,3 +5301,39 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
}
+
+const struct amdgpu_ip_block_version gfx_v7_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_3_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
index 94e3ea147c26..2f5164cc0e53 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
@@ -24,6 +24,9 @@
#ifndef __GFX_V7_0_H__
#define __GFX_V7_0_H__
-extern const struct amd_ip_funcs gfx_v7_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v7_0_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_1_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_2_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_3_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index bb97182dc749..23f1bc94ad3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -797,7 +797,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
long r;
@@ -823,7 +823,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err2;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
@@ -843,7 +843,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
err2:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -1057,6 +1057,19 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ /* we need account JT in */
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
+
+ if (amdgpu_sriov_vf(adev)) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
+ info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
+ info->fw = adev->gfx.mec_fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
+ }
+
if (adev->gfx.mec2_fw) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
@@ -1126,34 +1139,8 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
PACKET3_SET_CONTEXT_REG_START);
- switch (adev->asic_type) {
- case CHIP_TONGA:
- case CHIP_POLARIS10:
- buffer[count++] = cpu_to_le32(0x16000012);
- buffer[count++] = cpu_to_le32(0x0000002A);
- break;
- case CHIP_POLARIS11:
- buffer[count++] = cpu_to_le32(0x16000012);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_FIJI:
- buffer[count++] = cpu_to_le32(0x3a00161a);
- buffer[count++] = cpu_to_le32(0x0000002e);
- break;
- case CHIP_TOPAZ:
- case CHIP_CARRIZO:
- buffer[count++] = cpu_to_le32(0x00000002);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_STONEY:
- buffer[count++] = cpu_to_le32(0x00000000);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- default:
- buffer[count++] = cpu_to_le32(0x00000000);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- }
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
@@ -1272,7 +1259,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.clear_state_obj == NULL) {
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.clear_state_obj);
if (r) {
@@ -1314,7 +1302,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
if (adev->gfx.rlc.cp_table_obj == NULL) {
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.cp_table_obj);
if (r) {
@@ -1574,7 +1563,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
int r, i;
u32 tmp;
unsigned total_size, vgpr_offset, sgpr_offset;
@@ -1707,7 +1696,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
}
/* wait for the GPU to finish processing the IB */
- r = fence_wait(f, false);
+ r = dma_fence_wait(f, false);
if (r) {
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
goto fail;
@@ -1728,7 +1717,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
fail:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
return r;
}
@@ -2044,10 +2033,8 @@ static int gfx_v8_0_sw_init(void *handle)
ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
}
- r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
- AMDGPU_RING_TYPE_GFX);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
+ AMDGPU_CP_IRQ_GFX_EOP);
if (r)
return r;
}
@@ -2071,10 +2058,8 @@ static int gfx_v8_0_sw_init(void *handle)
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
/* type-2 packets are deprecated on MEC, use type-3 instead */
- r = amdgpu_ring_init(adev, ring, 1024,
- PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
- &adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_TYPE_COMPUTE);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
+ irq_type);
if (r)
return r;
}
@@ -3678,6 +3663,21 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
num_rb_pipes);
}
+ /* cache the values for userspace */
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+ adev->gfx.config.rb_config[i][j].rb_backend_disable =
+ RREG32(mmCC_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
+ RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].raster_config =
+ RREG32(mmPA_SC_RASTER_CONFIG);
+ adev->gfx.config.rb_config[i][j].raster_config_1 =
+ RREG32(mmPA_SC_RASTER_CONFIG_1);
+ }
+ }
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -4330,7 +4330,7 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
u32 tmp;
u32 rb_bufsz;
- u64 rb_addr, rptr_addr;
+ u64 rb_addr, rptr_addr, wptr_gpu_addr;
int r;
/* Set the write pointer delay */
@@ -4361,6 +4361,9 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
+ WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
mdelay(1);
WREG32(mmCP_RB0_CNTL, tmp);
@@ -5437,9 +5440,41 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
+{
+ WREG32(mmSQ_IND_INDEX, (wave & 0xF) | ((simd & 0x3) << 4) | (address << 16) | (1 << 13));
+ return RREG32(mmSQ_IND_DATA);
+}
+
+static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
+{
+ /* type 0 wave data */
+ dst[(*no_fields)++] = 0;
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
+}
+
+
static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v8_0_select_se_sh,
+ .read_wave_data = &gfx_v8_0_read_wave_data,
};
static int gfx_v8_0_early_init(void *handle)
@@ -6119,7 +6154,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask, reg_mem_engine;
- if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
case 1:
ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
@@ -6221,7 +6256,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
@@ -6239,11 +6274,7 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
-
- /* GFX8 emits 128 dw nop to prevent DE do vm_flush before CE finish CEIB */
- if (usepfp)
- amdgpu_ring_insert_nop(ring, 128);
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -6359,42 +6390,6 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
amdgpu_ring_write(ring, 0);
}
-static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
-{
- return
- 4; /* gfx_v8_0_ring_emit_ib_gfx */
-}
-
-static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
-{
- return
- 20 + /* gfx_v8_0_ring_emit_gds_switch */
- 7 + /* gfx_v8_0_ring_emit_hdp_flush */
- 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
- 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
- 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
- 256 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
- 2 + /* gfx_v8_ring_emit_sb */
- 3; /* gfx_v8_ring_emit_cntxcntl */
-}
-
-static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
-{
- return
- 4; /* gfx_v8_0_ring_emit_ib_compute */
-}
-
-static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
-{
- return
- 20 + /* gfx_v8_0_ring_emit_gds_switch */
- 7 + /* gfx_v8_0_ring_emit_hdp_flush */
- 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
- 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
- 17 + /* gfx_v8_0_ring_emit_vm_flush */
- 7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
-}
-
static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
@@ -6540,7 +6535,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
-const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
.late_init = gfx_v8_0_late_init,
@@ -6561,10 +6556,22 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
+ .type = AMDGPU_RING_TYPE_GFX,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v8_0_ring_emit_gds_switch */
+ 7 + /* gfx_v8_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+ 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+ 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+ 128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
+ 2 + /* gfx_v8_ring_emit_sb */
+ 3, /* gfx_v8_ring_emit_cntxcntl */
+ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
@@ -6578,15 +6585,23 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v8_ring_emit_sb,
.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
- .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx,
- .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
+ .type = AMDGPU_RING_TYPE_COMPUTE,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
.get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 20 + /* gfx_v8_0_ring_emit_gds_switch */
+ 7 + /* gfx_v8_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v8_0_ring_emit_vm_flush */
+ 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
@@ -6598,8 +6613,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute,
- .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -6752,3 +6765,21 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
}
+
+const struct amdgpu_ip_block_version gfx_v8_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v8_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
index ebed1f829297..788cc3ab584b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
@@ -24,6 +24,7 @@
#ifndef __GFX_V8_0_H__
#define __GFX_V8_0_H__
-extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v8_1_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index b13c8aaec078..1940d36bc304 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -1030,7 +1030,7 @@ static int gmc_v6_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
.name = "gmc_v6_0",
.early_init = gmc_v6_0_early_init,
.late_init = gmc_v6_0_late_init,
@@ -1069,3 +1069,11 @@ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
}
+const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
index 42c4fc676cd4..ed2f64dec47a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
@@ -24,6 +24,6 @@
#ifndef __GMC_V6_0_H__
#define __GMC_V6_0_H__
-extern const struct amd_ip_funcs gmc_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v6_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index aa0c4b964621..3a25f72980c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1235,7 +1235,7 @@ static int gmc_v7_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.name = "gmc_v7_0",
.early_init = gmc_v7_0_early_init,
.late_init = gmc_v7_0_late_init,
@@ -1273,3 +1273,21 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.num_types = 1;
adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v7_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
index 0b386b5d2f7a..ebce2966c1c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
@@ -24,6 +24,7 @@
#ifndef __GMC_V7_0_H__
#define __GMC_V7_0_H__
-extern const struct amd_ip_funcs gmc_v7_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v7_0_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v7_4_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index a16b2201d52c..f7372d32b8e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1437,7 +1437,7 @@ static int gmc_v8_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.name = "gmc_v8_0",
.early_init = gmc_v8_0_early_init,
.late_init = gmc_v8_0_late_init,
@@ -1478,3 +1478,30 @@ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.num_types = 1;
adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
index fc5001a8119d..19b8a8aed204 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
@@ -24,6 +24,8 @@
#ifndef __GMC_V8_0_H__
#define __GMC_V8_0_H__
-extern const struct amd_ip_funcs gmc_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v8_1_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v8_5_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 3b8906ce3511..ac21bb7bc0f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -392,7 +392,7 @@ static int iceland_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs iceland_ih_ip_funcs = {
+static const struct amd_ip_funcs iceland_ih_ip_funcs = {
.name = "iceland_ih",
.early_init = iceland_ih_early_init,
.late_init = NULL,
@@ -421,3 +421,11 @@ static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &iceland_ih_funcs;
}
+const struct amdgpu_ip_block_version iceland_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &iceland_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
index 57558cddfbcb..3235f4277548 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
@@ -24,6 +24,6 @@
#ifndef __ICELAND_IH_H__
#define __ICELAND_IH_H__
-extern const struct amd_ip_funcs iceland_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version iceland_ih_ip_block;
#endif /* __ICELAND_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 71d2856222fa..61172d4a0657 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2796,7 +2796,7 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
- for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
u32 sclk;
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
clock_info = (union pplib_clock_info *)
@@ -3245,6 +3245,18 @@ static int kv_dpm_set_powergating_state(void *handle,
return 0;
}
+static int kv_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ if (equal == NULL)
+ return -EINVAL;
+
+ *equal = false;
+ return 0;
+}
+
const struct amd_ip_funcs kv_dpm_ip_funcs = {
.name = "kv_dpm",
.early_init = kv_dpm_early_init,
@@ -3275,6 +3287,8 @@ static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
.force_performance_level = &kv_dpm_force_performance_level,
.powergate_uvd = &kv_dpm_powergate_uvd,
.enable_bapm = &kv_dpm_enable_bapm,
+ .get_vce_clock_state = amdgpu_get_vce_clock_state,
+ .check_state_equal = kv_check_state_equal,
};
static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -3293,3 +3307,12 @@ static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev)
adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs;
}
+
+const struct amdgpu_ip_block_version kv_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &kv_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 565dab3c7218..e81aa4682760 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -232,10 +232,10 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
- amdgpu_ring_write(ring, ring->nop |
+ amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
else
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
@@ -668,7 +668,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -705,7 +705,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -725,7 +725,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -902,22 +902,6 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
-static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 6; /* sdma_v2_4_ring_emit_ib */
-}
-
-static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6 + /* sdma_v2_4_ring_emit_hdp_flush */
- 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
- 6 + /* sdma_v2_4_ring_emit_pipeline_sync */
- 12 + /* sdma_v2_4_ring_emit_vm_flush */
- 10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
-}
-
static int sdma_v2_4_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -965,11 +949,10 @@ static int sdma_v2_4_sw_init(void *handle)
ring->use_doorbell = false;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -1204,7 +1187,7 @@ static int sdma_v2_4_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
+static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
.name = "sdma_v2_4",
.early_init = sdma_v2_4_early_init,
.late_init = NULL,
@@ -1222,10 +1205,19 @@ const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
};
static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.get_rptr = sdma_v2_4_ring_get_rptr,
.get_wptr = sdma_v2_4_ring_get_wptr,
.set_wptr = sdma_v2_4_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 6 + /* sdma_v2_4_ring_emit_hdp_flush */
+ 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
+ 6 + /* sdma_v2_4_ring_emit_pipeline_sync */
+ 12 + /* sdma_v2_4_ring_emit_vm_flush */
+ 10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
.emit_ib = sdma_v2_4_ring_emit_ib,
.emit_fence = sdma_v2_4_ring_emit_fence,
.emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
@@ -1236,8 +1228,6 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.test_ib = sdma_v2_4_ring_test_ib,
.insert_nop = sdma_v2_4_ring_insert_nop,
.pad_ib = sdma_v2_4_ring_pad_ib,
- .get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size,
- .get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size,
};
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
@@ -1350,3 +1340,12 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &sdma_v2_4_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
index 07349f5ee10f..28b433729216 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
@@ -24,6 +24,6 @@
#ifndef __SDMA_V2_4_H__
#define __SDMA_V2_4_H__
-extern const struct amd_ip_funcs sdma_v2_4_ip_funcs;
+extern const struct amdgpu_ip_block_version sdma_v2_4_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index a9d10941fb53..77f146587c60 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -392,10 +392,10 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
- amdgpu_ring_write(ring, ring->nop |
+ amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
else
- amdgpu_ring_write(ring, ring->nop);
+ amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
@@ -871,7 +871,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -908,7 +908,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -927,7 +927,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -1104,22 +1104,6 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
-static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 6; /* sdma_v3_0_ring_emit_ib */
-}
-
-static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 6 + /* sdma_v3_0_ring_emit_hdp_flush */
- 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
- 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
- 12 + /* sdma_v3_0_ring_emit_vm_flush */
- 10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
-}
-
static int sdma_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1177,11 +1161,10 @@ static int sdma_v3_0_sw_init(void *handle)
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -1544,7 +1527,7 @@ static int sdma_v3_0_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
+static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.name = "sdma_v3_0",
.early_init = sdma_v3_0_early_init,
.late_init = NULL,
@@ -1565,10 +1548,19 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.get_rptr = sdma_v3_0_ring_get_rptr,
.get_wptr = sdma_v3_0_ring_get_wptr,
.set_wptr = sdma_v3_0_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 6 + /* sdma_v3_0_ring_emit_hdp_flush */
+ 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
+ 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
+ 12 + /* sdma_v3_0_ring_emit_vm_flush */
+ 10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */
.emit_ib = sdma_v3_0_ring_emit_ib,
.emit_fence = sdma_v3_0_ring_emit_fence,
.emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
@@ -1579,8 +1571,6 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
.test_ib = sdma_v3_0_ring_test_ib,
.insert_nop = sdma_v3_0_ring_insert_nop,
.pad_ib = sdma_v3_0_ring_pad_ib,
- .get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size,
- .get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size,
};
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1693,3 +1683,21 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version sdma_v3_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
index 0cb9698a3054..7aa223d35f1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
@@ -24,6 +24,7 @@
#ifndef __SDMA_V3_0_H__
#define __SDMA_V3_0_H__
-extern const struct amd_ip_funcs sdma_v3_0_ip_funcs;
+extern const struct amdgpu_ip_block_version sdma_v3_0_ip_block;
+extern const struct amdgpu_ip_block_version sdma_v3_1_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index dc9511c5ecb8..3ed8ad8725b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -39,6 +39,7 @@
#include "si_dma.h"
#include "dce_v6_0.h"
#include "si.h"
+#include "dce_virtual.h"
static const u32 tahiti_golden_registers[] =
{
@@ -905,7 +906,7 @@ static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
-u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
+static u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags;
u32 r;
@@ -918,7 +919,7 @@ u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
return r;
}
-void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+static void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags;
@@ -1811,7 +1812,7 @@ static int si_common_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs si_common_ip_funcs = {
+static const struct amd_ip_funcs si_common_ip_funcs = {
.name = "si_common",
.early_init = si_common_early_init,
.late_init = NULL,
@@ -1828,119 +1829,13 @@ const struct amd_ip_funcs si_common_ip_funcs = {
.set_powergating_state = si_common_set_powergating_state,
};
-static const struct amdgpu_ip_block_version verde_ip_blocks[] =
+static const struct amdgpu_ip_block_version si_common_ip_block =
{
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_dma_ip_funcs,
- },
-/* {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &si_null_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_null_ip_funcs,
- },
- */
-};
-
-
-static const struct amdgpu_ip_block_version hainan_ip_blocks[] =
-{
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &si_dma_ip_funcs,
- },
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_common_ip_funcs,
};
int si_set_ip_blocks(struct amdgpu_device *adev)
@@ -1949,13 +1844,42 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_VERDE:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
+ amdgpu_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_dma_ip_block);
+ /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
+ /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
+ break;
case CHIP_OLAND:
- adev->ip_blocks = verde_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(verde_ip_blocks);
+ amdgpu_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v6_4_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_dma_ip_block);
+ /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
+ /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
break;
case CHIP_HAINAN:
- adev->ip_blocks = hainan_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(hainan_ip_blocks);
+ amdgpu_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &si_dma_ip_block);
break;
default:
BUG();
diff --git a/drivers/gpu/drm/amd/amdgpu/si.h b/drivers/gpu/drm/amd/amdgpu/si.h
index 959d7b63e0e5..589225080c24 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.h
+++ b/drivers/gpu/drm/amd/amdgpu/si.h
@@ -24,8 +24,6 @@
#ifndef __SI_H__
#define __SI_H__
-extern const struct amd_ip_funcs si_common_ip_funcs;
-
void si_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int si_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index de358193a8f9..3dd552ae0b59 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -274,7 +274,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
- struct fence *f = NULL;
+ struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
@@ -305,7 +305,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = fence_wait_timeout(f, false, timeout);
+ r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
@@ -325,7 +325,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
err1:
amdgpu_ib_free(adev, &ib, NULL);
- fence_put(f);
+ dma_fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -495,22 +495,6 @@ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
}
-static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 7 + 3; /* si_dma_ring_emit_ib */
-}
-
-static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 3 + /* si_dma_ring_emit_hdp_flush */
- 3 + /* si_dma_ring_emit_hdp_invalidate */
- 6 + /* si_dma_ring_emit_pipeline_sync */
- 12 + /* si_dma_ring_emit_vm_flush */
- 9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */
-}
-
static int si_dma_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -547,11 +531,10 @@ static int si_dma_sw_init(void *handle)
ring->use_doorbell = false;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
- DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
- AMDGPU_RING_TYPE_SDMA);
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
}
@@ -762,7 +745,7 @@ static int si_dma_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs si_dma_ip_funcs = {
+static const struct amd_ip_funcs si_dma_ip_funcs = {
.name = "si_dma",
.early_init = si_dma_early_init,
.late_init = NULL,
@@ -780,10 +763,19 @@ const struct amd_ip_funcs si_dma_ip_funcs = {
};
static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0),
.get_rptr = si_dma_ring_get_rptr,
.get_wptr = si_dma_ring_get_wptr,
.set_wptr = si_dma_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 3 + /* si_dma_ring_emit_hdp_flush */
+ 3 + /* si_dma_ring_emit_hdp_invalidate */
+ 6 + /* si_dma_ring_emit_pipeline_sync */
+ 12 + /* si_dma_ring_emit_vm_flush */
+ 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
.emit_ib = si_dma_ring_emit_ib,
.emit_fence = si_dma_ring_emit_fence,
.emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
@@ -794,8 +786,6 @@ static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
.test_ib = si_dma_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = si_dma_ring_pad_ib,
- .get_emit_ib_size = si_dma_ring_get_emit_ib_size,
- .get_dma_frame_size = si_dma_ring_get_dma_frame_size,
};
static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
@@ -913,3 +903,12 @@ static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
+
+const struct amdgpu_ip_block_version si_dma_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_dma_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.h b/drivers/gpu/drm/amd/amdgpu/si_dma.h
index 3a3e0c78a54b..5ac1b8452fb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.h
@@ -24,6 +24,6 @@
#ifndef __SI_DMA_H__
#define __SI_DMA_H__
-extern const struct amd_ip_funcs si_dma_ip_funcs;
+extern const struct amdgpu_ip_block_version si_dma_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index d6f85b1a0b93..f0f2f6c9718e 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3171,6 +3171,7 @@ static void ni_update_current_ps(struct amdgpu_device *adev,
eg_pi->current_rps = *rps;
ni_pi->current_ps = *new_ps;
eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
+ adev->pm.dpm.current_ps = &eg_pi->current_rps;
}
static void ni_update_requested_ps(struct amdgpu_device *adev,
@@ -3183,6 +3184,7 @@ static void ni_update_requested_ps(struct amdgpu_device *adev,
eg_pi->requested_rps = *rps;
ni_pi->requested_ps = *new_ps;
eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
+ adev->pm.dpm.requested_ps = &eg_pi->requested_rps;
}
static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
@@ -7347,7 +7349,7 @@ static int si_parse_power_table(struct amdgpu_device *adev)
adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
- for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
u32 sclk, mclk;
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
clock_info = (union pplib_clock_info *)
@@ -7986,6 +7988,57 @@ static int si_dpm_early_init(void *handle)
return 0;
}
+static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1,
+ const struct rv7xx_pl *si_cpl2)
+{
+ return ((si_cpl1->mclk == si_cpl2->mclk) &&
+ (si_cpl1->sclk == si_cpl2->sclk) &&
+ (si_cpl1->pcie_gen == si_cpl2->pcie_gen) &&
+ (si_cpl1->vddc == si_cpl2->vddc) &&
+ (si_cpl1->vddci == si_cpl2->vddci));
+}
+
+static int si_check_state_equal(struct amdgpu_device *adev,
+ struct amdgpu_ps *cps,
+ struct amdgpu_ps *rps,
+ bool *equal)
+{
+ struct si_ps *si_cps;
+ struct si_ps *si_rps;
+ int i;
+
+ if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
+ return -EINVAL;
+
+ si_cps = si_get_ps(cps);
+ si_rps = si_get_ps(rps);
+
+ if (si_cps == NULL) {
+ printk("si_cps is NULL\n");
+ *equal = false;
+ return 0;
+ }
+
+ if (si_cps->performance_level_count != si_rps->performance_level_count) {
+ *equal = false;
+ return 0;
+ }
+
+ for (i = 0; i < si_cps->performance_level_count; i++) {
+ if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]),
+ &(si_rps->performance_levels[i]))) {
+ *equal = false;
+ return 0;
+ }
+ }
+
+ /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+ *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
+ *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
+
+ return 0;
+}
+
const struct amd_ip_funcs si_dpm_ip_funcs = {
.name = "si_dpm",
@@ -8020,6 +8073,8 @@ static const struct amdgpu_dpm_funcs si_dpm_funcs = {
.get_fan_control_mode = &si_dpm_get_fan_control_mode,
.set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
.get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
+ .check_state_equal = &si_check_state_equal,
+ .get_vce_clock_state = amdgpu_get_vce_clock_state,
};
static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -8039,3 +8094,11 @@ static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
}
+const struct amdgpu_ip_block_version si_dpm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 8fae3d4a2360..db0f36846661 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -268,7 +268,7 @@ static int si_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs si_ih_ip_funcs = {
+static const struct amd_ip_funcs si_ih_ip_funcs = {
.name = "si_ih",
.early_init = si_ih_early_init,
.late_init = NULL,
@@ -297,3 +297,11 @@ static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &si_ih_funcs;
}
+const struct amdgpu_ip_block_version si_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.h b/drivers/gpu/drm/amd/amdgpu/si_ih.h
index f3e3a954369c..42e64a53e24f 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.h
@@ -24,6 +24,6 @@
#ifndef __SI_IH_H__
#define __SI_IH_H__
-extern const struct amd_ip_funcs si_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version si_ih_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index b4ea229bb449..52b71ee58793 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -455,7 +455,7 @@ static int tonga_ih_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs tonga_ih_ip_funcs = {
+static const struct amd_ip_funcs tonga_ih_ip_funcs = {
.name = "tonga_ih",
.early_init = tonga_ih_early_init,
.late_init = NULL,
@@ -487,3 +487,11 @@ static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &tonga_ih_funcs;
}
+const struct amdgpu_ip_block_version tonga_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &tonga_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
index 7392d70fa4a7..499027eee5c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
@@ -24,6 +24,6 @@
#ifndef __TONGA_IH_H__
#define __TONGA_IH_H__
-extern const struct amd_ip_funcs tonga_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version tonga_ih_ip_block;
-#endif /* __CZ_IH_H__ */
+#endif /* __TONGA_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index f6c941550b8f..8f9c7d55ddda 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -36,6 +36,9 @@
#include "bif/bif_4_1_d.h"
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
@@ -116,8 +119,7 @@ static int uvd_v4_2_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
- &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
return r;
}
@@ -526,20 +528,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
-static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 4; /* uvd_v4_2_ring_emit_ib */
-}
-
-static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v4_2_ring_emit_hdp_flush */
- 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
- 14; /* uvd_v4_2_ring_emit_fence x1 no user fence */
-}
-
/**
* uvd_v4_2_mc_resume - memory controller programming
*
@@ -698,18 +686,34 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void uvd_v5_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+ if (enable)
+ tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+ GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+ else
+ tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+ GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+
+ WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
static int uvd_v4_2_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
bool gate = false;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
- return 0;
-
if (state == AMD_CG_STATE_GATE)
gate = true;
+ uvd_v5_0_set_bypass_mode(adev, gate);
+
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
+ return 0;
+
uvd_v4_2_enable_mgcg(adev, gate);
return 0;
@@ -738,7 +742,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
}
}
-const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
+static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
.name = "uvd_v4_2",
.early_init = uvd_v4_2_early_init,
.late_init = NULL,
@@ -756,10 +760,18 @@ const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
};
static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v4_2_ring_get_rptr,
.get_wptr = uvd_v4_2_ring_get_wptr,
.set_wptr = uvd_v4_2_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
+ .emit_frame_size =
+ 2 + /* uvd_v4_2_ring_emit_hdp_flush */
+ 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
+ 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
.emit_ib = uvd_v4_2_ring_emit_ib,
.emit_fence = uvd_v4_2_ring_emit_fence,
.emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
@@ -770,8 +782,6 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size,
};
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
@@ -789,3 +799,12 @@ static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
adev->uvd.irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs;
}
+
+const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
index 0a615dd50840..8a0444bb8b95 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
@@ -24,6 +24,6 @@
#ifndef __UVD_V4_2_H__
#define __UVD_V4_2_H__
-extern const struct amd_ip_funcs uvd_v4_2_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v4_2_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 400c16fe579e..95303e2d5f92 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -33,6 +33,8 @@
#include "oss/oss_2_0_sh_mask.h"
#include "bif/bif_5_0_d.h"
#include "vi.h"
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -112,8 +114,7 @@ static int uvd_v5_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
- &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
return r;
}
@@ -577,20 +578,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
-static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 6; /* uvd_v5_0_ring_emit_ib */
-}
-
-static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v5_0_ring_emit_hdp_flush */
- 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
- 14; /* uvd_v5_0_ring_emit_fence x1 no user fence */
-}
-
static bool uvd_v5_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -737,6 +724,20 @@ static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
}
#endif
+static void uvd_v5_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+ if (enable)
+ tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+ GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+ else
+ tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+ GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+
+ WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
static int uvd_v5_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -744,6 +745,8 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
static int curstate = -1;
+ uvd_v5_0_set_bypass_mode(adev, enable);
+
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
@@ -789,7 +792,7 @@ static int uvd_v5_0_set_powergating_state(void *handle,
}
}
-const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
+static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
.name = "uvd_v5_0",
.early_init = uvd_v5_0_early_init,
.late_init = NULL,
@@ -807,10 +810,18 @@ const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v5_0_ring_get_rptr,
.get_wptr = uvd_v5_0_ring_get_wptr,
.set_wptr = uvd_v5_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
+ .emit_frame_size =
+ 2 + /* uvd_v5_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
+ 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
.emit_ib = uvd_v5_0_ring_emit_ib,
.emit_fence = uvd_v5_0_ring_emit_fence,
.emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
@@ -821,8 +832,6 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size,
};
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -840,3 +849,12 @@ static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
adev->uvd.irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 5,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v5_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
index e3b3c49fa5de..2eaaea793ac5 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
@@ -24,6 +24,6 @@
#ifndef __UVD_V5_0_H__
#define __UVD_V5_0_H__
-extern const struct amd_ip_funcs uvd_v5_0_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v5_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index ab3df6d75656..a339b5ccb296 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -116,8 +116,7 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
- &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
return r;
}
@@ -725,31 +724,6 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0xE);
}
-static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 8; /* uvd_v6_0_ring_emit_ib */
-}
-
-static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v6_0_ring_emit_hdp_flush */
- 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
- 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
- 14; /* uvd_v6_0_ring_emit_fence x1 no user fence */
-}
-
-static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
-{
- return
- 2 + /* uvd_v6_0_ring_emit_hdp_flush */
- 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
- 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
- 20 + /* uvd_v6_0_ring_emit_vm_flush */
- 14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */
-}
-
static bool uvd_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -961,7 +935,7 @@ static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
}
#endif
-static void uvd_v6_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+static void uvd_v6_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
{
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
@@ -979,15 +953,14 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
- if (adev->asic_type == CHIP_FIJI ||
- adev->asic_type == CHIP_POLARIS10)
- uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false);
+ uvd_v6_0_set_bypass_mode(adev, enable);
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
- if (state == AMD_CG_STATE_GATE) {
+ if (enable) {
/* disable HW gating and enable Sw gating */
uvd_v6_0_set_sw_clock_gating(adev);
} else {
@@ -1027,7 +1000,7 @@ static int uvd_v6_0_set_powergating_state(void *handle,
}
}
-const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
+static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.name = "uvd_v6_0",
.early_init = uvd_v6_0_early_init,
.late_init = NULL,
@@ -1048,10 +1021,19 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
+ .emit_frame_size =
+ 2 + /* uvd_v6_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+ 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
@@ -1062,15 +1044,22 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size,
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD,
+ .align_mask = 0xf,
+ .nop = PACKET0(mmUVD_NO_OP, 0),
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
- .parse_cs = NULL,
+ .emit_frame_size =
+ 2 + /* uvd_v6_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+ 20 + /* uvd_v6_0_ring_emit_vm_flush */
+ 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
+ .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
@@ -1083,8 +1072,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
- .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
- .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm,
};
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1108,3 +1095,30 @@ static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->uvd.irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
}
+
+const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
index 6b92a2352986..d3d48c6428cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
@@ -24,6 +24,8 @@
#ifndef __UVD_V6_0_H__
#define __UVD_V6_0_H__
-extern const struct amd_ip_funcs uvd_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v6_0_ip_block;
+extern const struct amdgpu_ip_block_version uvd_v6_2_ip_block;
+extern const struct amdgpu_ip_block_version uvd_v6_3_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 76e64ad04a53..38ed903dd6f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -224,8 +224,8 @@ static int vce_v2_0_sw_init(void *handle)
for (i = 0; i < adev->vce.num_rings; i++) {
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
- r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
- &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+ r = amdgpu_ring_init(adev, ring, 512,
+ &adev->vce.irq, 0);
if (r)
return r;
}
@@ -592,7 +592,7 @@ static int vce_v2_0_set_powergating_state(void *handle,
return vce_v2_0_start(adev);
}
-const struct amd_ip_funcs vce_v2_0_ip_funcs = {
+static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
.name = "vce_v2_0",
.early_init = vce_v2_0_early_init,
.late_init = NULL,
@@ -610,10 +610,15 @@ const struct amd_ip_funcs vce_v2_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_VCE,
+ .align_mask = 0xf,
+ .nop = VCE_CMD_NO_OP,
.get_rptr = vce_v2_0_ring_get_rptr,
.get_wptr = vce_v2_0_ring_get_wptr,
.set_wptr = vce_v2_0_ring_set_wptr,
.parse_cs = amdgpu_vce_ring_parse_cs,
+ .emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
.emit_ib = amdgpu_vce_ring_emit_ib,
.emit_fence = amdgpu_vce_ring_emit_fence,
.test_ring = amdgpu_vce_ring_test_ring,
@@ -622,8 +627,6 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
- .get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size,
- .get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size,
};
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -644,3 +647,12 @@ static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
adev->vce.irq.num_types = 1;
adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
};
+
+const struct amdgpu_ip_block_version vce_v2_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
index 0d2ae8a01acd..4d15167654a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
@@ -24,6 +24,6 @@
#ifndef __VCE_V2_0_H__
#define __VCE_V2_0_H__
-extern const struct amd_ip_funcs vce_v2_0_ip_funcs;
+extern const struct amdgpu_ip_block_version vce_v2_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6feed726e299..39f03f137a56 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -395,8 +395,7 @@ static int vce_v3_0_sw_init(void *handle)
for (i = 0; i < adev->vce.num_rings; i++) {
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
- r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
- &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
if (r)
return r;
}
@@ -814,28 +813,7 @@ static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, seq);
}
-static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
- return
- 5; /* vce_v3_0_ring_emit_ib */
-}
-
-static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
- return
- 4 + /* vce_v3_0_emit_pipeline_sync */
- 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
-}
-
-static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
-{
- return
- 6 + /* vce_v3_0_emit_vm_flush */
- 4 + /* vce_v3_0_emit_pipeline_sync */
- 6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */
-}
-
-const struct amd_ip_funcs vce_v3_0_ip_funcs = {
+static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.name = "vce_v3_0",
.early_init = vce_v3_0_early_init,
.late_init = NULL,
@@ -856,10 +834,17 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
+ .type = AMDGPU_RING_TYPE_VCE,
+ .align_mask = 0xf,
+ .nop = VCE_CMD_NO_OP,
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
.parse_cs = amdgpu_vce_ring_parse_cs,
+ .emit_frame_size =
+ 4 + /* vce_v3_0_emit_pipeline_sync */
+ 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
.emit_ib = amdgpu_vce_ring_emit_ib,
.emit_fence = amdgpu_vce_ring_emit_fence,
.test_ring = amdgpu_vce_ring_test_ring,
@@ -868,15 +853,21 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
- .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
- .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size,
};
static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCE,
+ .align_mask = 0xf,
+ .nop = VCE_CMD_NO_OP,
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
- .parse_cs = NULL,
+ .parse_cs = amdgpu_vce_ring_parse_cs_vm,
+ .emit_frame_size =
+ 6 + /* vce_v3_0_emit_vm_flush */
+ 4 + /* vce_v3_0_emit_pipeline_sync */
+ 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
+ .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
.emit_ib = vce_v3_0_ring_emit_ib,
.emit_vm_flush = vce_v3_0_emit_vm_flush,
.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
@@ -887,8 +878,6 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
- .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
- .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm,
};
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -916,3 +905,30 @@ static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
adev->vce.irq.num_types = 1;
adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
};
+
+const struct amdgpu_ip_block_version vce_v3_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version vce_v3_1_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version vce_v3_4_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
index b45af65da81f..08b908c7de0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
@@ -24,6 +24,8 @@
#ifndef __VCE_V3_0_H__
#define __VCE_V3_0_H__
-extern const struct amd_ip_funcs vce_v3_0_ip_funcs;
+extern const struct amdgpu_ip_block_version vce_v3_0_ip_block;
+extern const struct amdgpu_ip_block_version vce_v3_1_ip_block;
+extern const struct amdgpu_ip_block_version vce_v3_4_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 7c13090df7c0..52d0a83e6ad1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -121,8 +121,8 @@ static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
u32 r;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, (reg));
- r = RREG32(mmSMC_IND_DATA_0);
+ WREG32(mmSMC_IND_INDEX_11, (reg));
+ r = RREG32(mmSMC_IND_DATA_11);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return r;
}
@@ -132,8 +132,8 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, (reg));
- WREG32(mmSMC_IND_DATA_0, (v));
+ WREG32(mmSMC_IND_INDEX_11, (reg));
+ WREG32(mmSMC_IND_DATA_11, (v));
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
@@ -437,12 +437,12 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
/* take the smc lock since we are using the smc index */
spin_lock_irqsave(&adev->smc_idx_lock, flags);
/* set rom index to 0 */
- WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX);
- WREG32(mmSMC_IND_DATA_0, 0);
+ WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
+ WREG32(mmSMC_IND_DATA_11, 0);
/* set index to data for continous read */
- WREG32(mmSMC_IND_INDEX_0, ixROM_DATA);
+ WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
for (i = 0; i < length_dw; i++)
- dw_ptr[i] = RREG32(mmSMC_IND_DATA_0);
+ dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return true;
@@ -556,21 +556,100 @@ static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] =
{mmPA_SC_RASTER_CONFIG_1, false, true},
};
-static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
- u32 sh_num, u32 reg_offset)
-{
- uint32_t val;
+static uint32_t vi_get_register_value(struct amdgpu_device *adev,
+ bool indexed, u32 se_num,
+ u32 sh_num, u32 reg_offset)
+{
+ if (indexed) {
+ uint32_t val;
+ unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+ unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
+
+ switch (reg_offset) {
+ case mmCC_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+ case mmGC_USER_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+ case mmPA_SC_RASTER_CONFIG:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+ case mmPA_SC_RASTER_CONFIG_1:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
+ }
- mutex_lock(&adev->grbm_idx_mutex);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ mutex_lock(&adev->grbm_idx_mutex);
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
- val = RREG32(reg_offset);
+ val = RREG32(reg_offset);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- return val;
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ return val;
+ } else {
+ unsigned idx;
+
+ switch (reg_offset) {
+ case mmGB_ADDR_CONFIG:
+ return adev->gfx.config.gb_addr_config;
+ case mmMC_ARB_RAMCFG:
+ return adev->gfx.config.mc_arb_ramcfg;
+ case mmGB_TILE_MODE0:
+ case mmGB_TILE_MODE1:
+ case mmGB_TILE_MODE2:
+ case mmGB_TILE_MODE3:
+ case mmGB_TILE_MODE4:
+ case mmGB_TILE_MODE5:
+ case mmGB_TILE_MODE6:
+ case mmGB_TILE_MODE7:
+ case mmGB_TILE_MODE8:
+ case mmGB_TILE_MODE9:
+ case mmGB_TILE_MODE10:
+ case mmGB_TILE_MODE11:
+ case mmGB_TILE_MODE12:
+ case mmGB_TILE_MODE13:
+ case mmGB_TILE_MODE14:
+ case mmGB_TILE_MODE15:
+ case mmGB_TILE_MODE16:
+ case mmGB_TILE_MODE17:
+ case mmGB_TILE_MODE18:
+ case mmGB_TILE_MODE19:
+ case mmGB_TILE_MODE20:
+ case mmGB_TILE_MODE21:
+ case mmGB_TILE_MODE22:
+ case mmGB_TILE_MODE23:
+ case mmGB_TILE_MODE24:
+ case mmGB_TILE_MODE25:
+ case mmGB_TILE_MODE26:
+ case mmGB_TILE_MODE27:
+ case mmGB_TILE_MODE28:
+ case mmGB_TILE_MODE29:
+ case mmGB_TILE_MODE30:
+ case mmGB_TILE_MODE31:
+ idx = (reg_offset - mmGB_TILE_MODE0);
+ return adev->gfx.config.tile_mode_array[idx];
+ case mmGB_MACROTILE_MODE0:
+ case mmGB_MACROTILE_MODE1:
+ case mmGB_MACROTILE_MODE2:
+ case mmGB_MACROTILE_MODE3:
+ case mmGB_MACROTILE_MODE4:
+ case mmGB_MACROTILE_MODE5:
+ case mmGB_MACROTILE_MODE6:
+ case mmGB_MACROTILE_MODE7:
+ case mmGB_MACROTILE_MODE8:
+ case mmGB_MACROTILE_MODE9:
+ case mmGB_MACROTILE_MODE10:
+ case mmGB_MACROTILE_MODE11:
+ case mmGB_MACROTILE_MODE12:
+ case mmGB_MACROTILE_MODE13:
+ case mmGB_MACROTILE_MODE14:
+ case mmGB_MACROTILE_MODE15:
+ idx = (reg_offset - mmGB_MACROTILE_MODE0);
+ return adev->gfx.config.macrotile_mode_array[idx];
+ default:
+ return RREG32(reg_offset);
+ }
+ }
}
static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
@@ -605,10 +684,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
if (reg_offset != asic_register_entry->reg_offset)
continue;
if (!asic_register_entry->untouched)
- *value = asic_register_entry->grbm_indexed ?
- vi_read_indexed_register(adev, se_num,
- sh_num, reg_offset) :
- RREG32(reg_offset);
+ *value = vi_get_register_value(adev,
+ asic_register_entry->grbm_indexed,
+ se_num, sh_num, reg_offset);
return 0;
}
}
@@ -618,10 +696,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
continue;
if (!vi_allowed_read_registers[i].untouched)
- *value = vi_allowed_read_registers[i].grbm_indexed ?
- vi_read_indexed_register(adev, se_num,
- sh_num, reg_offset) :
- RREG32(reg_offset);
+ *value = vi_get_register_value(adev,
+ vi_allowed_read_registers[i].grbm_indexed,
+ se_num, sh_num, reg_offset);
return 0;
}
return -EINVAL;
@@ -652,18 +729,6 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
return -EINVAL;
}
-static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
-{
- u32 tmp = RREG32(mmBIOS_SCRATCH_3);
-
- if (hung)
- tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
- else
- tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-
- WREG32(mmBIOS_SCRATCH_3, tmp);
-}
-
/**
* vi_asic_reset - soft reset GPU
*
@@ -677,11 +742,11 @@ static int vi_asic_reset(struct amdgpu_device *adev)
{
int r;
- vi_set_bios_scratch_engine_hung(adev, true);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
r = vi_gpu_pci_config_reset(adev);
- vi_set_bios_scratch_engine_hung(adev, false);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return r;
}
@@ -781,734 +846,6 @@ static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
WREG32(mmBIF_DOORBELL_APER_EN, tmp);
}
-/* topaz has no DCE, UVD, VCE */
-static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 4,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &iceland_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &sdma_v2_4_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 7,
- .minor = 4,
- .rev = 0,
- .funcs = &gmc_v7_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &iceland_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 2,
- .minor = 4,
- .rev = 0,
- .funcs = &sdma_v2_4_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v10_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 5,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v5_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 5,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v5_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_v10_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 5,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 1,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 10,
- .minor = 1,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 3,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 4,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 1,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &tonga_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 2,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 1,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 3,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 4,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-};
-
-static const struct amdgpu_ip_block_version cz_ip_blocks[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &cz_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-#if defined(CONFIG_DRM_AMD_ACP)
- {
- .type = AMD_IP_BLOCK_TYPE_ACP,
- .major = 2,
- .minor = 2,
- .rev = 0,
- .funcs = &acp_ip_funcs,
- },
-#endif
-};
-
-static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] =
-{
- /* ORDER MATTERS! */
- {
- .type = AMD_IP_BLOCK_TYPE_COMMON,
- .major = 2,
- .minor = 0,
- .rev = 0,
- .funcs = &vi_common_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_IH,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &cz_ih_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs
- },
- {
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_virtual_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_GFX,
- .major = 8,
- .minor = 0,
- .rev = 0,
- .funcs = &gfx_v8_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_SDMA,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &sdma_v3_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_UVD,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &uvd_v6_0_ip_funcs,
- },
- {
- .type = AMD_IP_BLOCK_TYPE_VCE,
- .major = 3,
- .minor = 0,
- .rev = 0,
- .funcs = &vce_v3_0_ip_funcs,
- },
-#if defined(CONFIG_DRM_AMD_ACP)
- {
- .type = AMD_IP_BLOCK_TYPE_ACP,
- .major = 2,
- .minor = 2,
- .rev = 0,
- .funcs = &acp_ip_funcs,
- },
-#endif
-};
-
-int vi_set_ip_blocks(struct amdgpu_device *adev)
-{
- if (adev->enable_virtual_display) {
- switch (adev->asic_type) {
- case CHIP_TOPAZ:
- adev->ip_blocks = topaz_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd);
- break;
- case CHIP_FIJI:
- adev->ip_blocks = fiji_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd);
- break;
- case CHIP_TONGA:
- adev->ip_blocks = tonga_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd);
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- adev->ip_blocks = polaris11_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd);
- break;
-
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- adev->ip_blocks = cz_ip_blocks_vd;
- adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- } else {
- switch (adev->asic_type) {
- case CHIP_TOPAZ:
- adev->ip_blocks = topaz_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
- break;
- case CHIP_FIJI:
- adev->ip_blocks = fiji_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
- break;
- case CHIP_TONGA:
- adev->ip_blocks = tonga_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- adev->ip_blocks = polaris11_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
- break;
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- adev->ip_blocks = cz_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
#define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
#define ATI_REV_ID_FUSE_MACRO__SHIFT 9
#define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
@@ -1593,7 +930,7 @@ static int vi_common_early_init(void *handle)
break;
case CHIP_TONGA:
adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
- adev->pg_flags = 0;
+ adev->pg_flags = AMD_PG_SUPPORT_UVD;
adev->external_rev_id = adev->rev_id + 0x14;
break;
case CHIP_POLARIS11:
@@ -1908,7 +1245,7 @@ static int vi_common_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs vi_common_ip_funcs = {
+static const struct amd_ip_funcs vi_common_ip_funcs = {
.name = "vi_common",
.early_init = vi_common_early_init,
.late_init = NULL,
@@ -1925,3 +1262,110 @@ const struct amd_ip_funcs vi_common_ip_funcs = {
.set_powergating_state = vi_common_set_powergating_state,
};
+static const struct amdgpu_ip_block_version vi_common_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+};
+
+int vi_set_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ /* topaz has no DCE, UVD, VCE */
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block);
+ amdgpu_ip_block_add(adev, &iceland_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block);
+ break;
+ case CHIP_FIJI:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block);
+ amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ break;
+ case CHIP_TONGA:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
+ amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+ break;
+ case CHIP_CARRIZO:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &cz_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_1_ip_block);
+#if defined(CONFIG_DRM_AMD_ACP)
+ amdgpu_ip_block_add(adev, &acp_ip_block);
+#endif
+ break;
+ case CHIP_STONEY:
+ amdgpu_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_ip_block_add(adev, &cz_ih_ip_block);
+ amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ if (adev->enable_virtual_display)
+ amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ else
+ amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
+ amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
+ amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block);
+ amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+#if defined(CONFIG_DRM_AMD_ACP)
+ amdgpu_ip_block_add(adev, &acp_ip_block);
+#endif
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 502094042462..575d7aed5d32 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -24,8 +24,6 @@
#ifndef __VI_H__
#define __VI_H__
-extern const struct amd_ip_funcs vi_common_ip_funcs;
-
void vi_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int vi_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index bec8125bceb0..d1986276dbbd 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -84,6 +84,29 @@ enum amd_powergating_state {
AMD_PG_STATE_UNGATE,
};
+struct amd_vce_state {
+ /* vce clocks */
+ u32 evclk;
+ u32 ecclk;
+ /* gpu clocks */
+ u32 sclk;
+ u32 mclk;
+ u8 clk_idx;
+ u8 pstate;
+};
+
+
+#define AMD_MAX_VCE_LEVELS 6
+
+enum amd_vce_level {
+ AMD_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
+ AMD_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
+ AMD_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
+ AMD_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
+ AMD_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
+ AMD_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
+};
+
/* CG flags */
#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
index 3014d4a58c43..a9ef1562f43b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
@@ -176,6 +176,8 @@
#define mmSMU1_SMU_SMC_IND_DATA 0x83
#define mmSMU2_SMU_SMC_IND_DATA 0x85
#define mmSMU3_SMU_SMC_IND_DATA 0x87
+#define mmSMC_IND_INDEX_11 0x1AC
+#define mmSMC_IND_DATA_11 0x1AD
#define ixRCU_UC_EVENTS 0xc0000004
#define ixRCU_MISC_CTRL 0xc0000010
#define ixCC_RCU_FUSES 0xc00c0000
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
index 933917479985..22dd4c2b7290 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
@@ -87,6 +87,8 @@
#define mmSMC_IND_DATA_6 0x8d
#define mmSMC_IND_INDEX_7 0x8e
#define mmSMC_IND_DATA_7 0x8f
+#define mmSMC_IND_INDEX_11 0x1AC
+#define mmSMC_IND_DATA_11 0x1AD
#define mmSMC_IND_ACCESS_CNTL 0x92
#define mmSMC_MESSAGE_0 0x94
#define mmSMC_RESP_0 0x95
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
index 44b1855cb8df..eca2b851f25f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -90,6 +90,8 @@
#define mmSMC_IND_DATA_6 0x8d
#define mmSMC_IND_INDEX_7 0x8e
#define mmSMC_IND_DATA_7 0x8f
+#define mmSMC_IND_INDEX_11 0x1AC
+#define mmSMC_IND_DATA_11 0x1AD
#define mmSMC_IND_ACCESS_CNTL 0x92
#define mmSMC_MESSAGE_0 0x94
#define mmSMC_RESP_0 0x95
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index df7c18b6a02a..e4a1697ec1d3 100755
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -106,6 +106,7 @@ enum cgs_ucode_id {
CGS_UCODE_ID_CP_MEC_JT2,
CGS_UCODE_ID_GMCON_RENG,
CGS_UCODE_ID_RLC_G,
+ CGS_UCODE_ID_STORAGE,
CGS_UCODE_ID_MAXIMUM,
};
@@ -619,6 +620,8 @@ typedef int (*cgs_call_acpi_method)(struct cgs_device *cgs_device,
typedef int (*cgs_query_system_info)(struct cgs_device *cgs_device,
struct cgs_system_info *sys_info);
+typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device);
+
struct cgs_ops {
/* memory management calls (similar to KFD interface) */
cgs_gpu_mem_info_t gpu_mem_info;
@@ -670,6 +673,7 @@ struct cgs_ops {
cgs_call_acpi_method call_acpi_method;
/* get system info */
cgs_query_system_info query_system_info;
+ cgs_is_virtualization_enabled_t is_virtualization_enabled;
};
struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -773,4 +777,6 @@ struct cgs_device
CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \
resource_base)
+#define cgs_is_virtualization_enabled(cgs_device) \
+ CGS_CALL(is_virtualization_enabled, cgs_device)
#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 7174f7a68266..0b1f2205c2f1 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -436,7 +436,8 @@ static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
}
}
-int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, void *output)
+static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id,
+ void *input, void *output)
{
int ret = 0;
struct pp_instance *pp_handle;
@@ -475,7 +476,7 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
return ret;
}
-enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
+static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
{
struct pp_hwmgr *hwmgr;
struct pp_power_state *state;
@@ -820,6 +821,21 @@ static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value)
return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value);
}
+static struct amd_vce_state*
+pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (handle) {
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ if (hwmgr && idx < hwmgr->num_vce_state_tables)
+ return &hwmgr->vce_states[idx];
+ }
+
+ return NULL;
+}
+
const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_temperature = pp_dpm_get_temperature,
.load_firmware = pp_dpm_load_fw,
@@ -846,6 +862,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_mclk_od = pp_dpm_get_mclk_od,
.set_mclk_od = pp_dpm_set_mclk_od,
.read_sensor = pp_dpm_read_sensor,
+ .get_vce_clock_state = pp_dpm_get_vce_clock_state,
};
static int amd_pp_instance_init(struct amd_pp_init *pp_init,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 960424913496..4b14f259a147 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -66,7 +66,7 @@ static const struct cz_power_state *cast_const_PhwCzPowerState(
return (struct cz_power_state *)hw_ps;
}
-uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
+static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
uint32_t clock, uint32_t msg)
{
int i = 0;
@@ -1017,7 +1017,7 @@ static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input,
+static int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input,
void *output, void *storage, int result)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1225,7 +1225,7 @@ static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
+static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1239,7 +1239,7 @@ int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_clock_voltage_dependency_table *table =
@@ -1277,7 +1277,7 @@ int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1533,7 +1533,7 @@ static int cz_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
return result;
}
-int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
+static int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
{
return sizeof(struct cz_power_state);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
index 1944d289f846..f5e8fda964f7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
@@ -25,6 +25,7 @@
#include "linux/delay.h"
#include "hwmgr.h"
#include "amd_acpi.h"
+#include "pp_acpi.h"
bool acpi_atcs_functions_supported(void *device, uint32_t index)
{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 4477c55a58e3..c45bd2560468 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -131,7 +131,7 @@ static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
/**
* Private Function to get the PowerPlay Table Address.
*/
-const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
+static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
{
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
@@ -1049,7 +1049,7 @@ static int check_powerplay_tables(
return 0;
}
-int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
+static int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
{
int result = 0;
const ATOM_Tonga_POWERPLAYTABLE *powerplay_table;
@@ -1100,7 +1100,7 @@ int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
return result;
}
-int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
+static int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -1214,7 +1214,7 @@ static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr)
}
static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i,
- struct pp_vce_state *vce_state, void **clock_info, uint32_t *flag)
+ struct amd_vce_state *vce_state, void **clock_info, uint32_t *flag)
{
const ATOM_Tonga_VCE_State_Record *vce_state_record;
ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
@@ -1318,7 +1318,7 @@ int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr,
hwmgr->num_vce_state_tables = i = ppt_get_num_of_vce_state_table_entries_v1_0(hwmgr);
- if ((i != 0) && (i <= PP_MAX_VCE_LEVELS)) {
+ if ((i != 0) && (i <= AMD_MAX_VCE_LEVELS)) {
for (j = 0; j < i; j++)
ppt_get_vce_state_table_entry_v1_0(hwmgr, j, &(hwmgr->vce_states[j]), NULL, &flags);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index ccf7ebeaf892..a4e9cf429e62 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -1507,7 +1507,7 @@ static int init_phase_shedding_table(struct pp_hwmgr *hwmgr,
return 0;
}
-int get_number_of_vce_state_table_entries(
+static int get_number_of_vce_state_table_entries(
struct pp_hwmgr *hwmgr)
{
const ATOM_PPLIB_POWERPLAYTABLE *table =
@@ -1521,9 +1521,9 @@ int get_number_of_vce_state_table_entries(
return 0;
}
-int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
+static int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
unsigned long i,
- struct pp_vce_state *vce_state,
+ struct amd_vce_state *vce_state,
void **clock_info,
unsigned long *flag)
{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 6eb6db199250..cf2ee93d8475 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -75,7 +75,7 @@ int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -91,7 +91,7 @@ int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
+static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -99,7 +99,7 @@ int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -107,7 +107,7 @@ int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
+static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
@@ -116,7 +116,7 @@ int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
@@ -149,15 +149,21 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
if (bgate) {
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_GATE);
+ AMD_CG_STATE_UNGATE);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
smu7_update_uvd_dpm(hwmgr, true);
smu7_powerdown_uvd(hwmgr);
} else {
smu7_powerup_uvd(hwmgr);
- smu7_update_uvd_dpm(hwmgr, false);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_UNGATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
+ AMD_CG_STATE_GATE);
+ smu7_update_uvd_dpm(hwmgr, false);
}
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 75854021f403..9e49f2777143 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -89,7 +89,7 @@ enum DPM_EVENT_SRC {
static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
-struct smu7_power_state *cast_phw_smu7_power_state(
+static struct smu7_power_state *cast_phw_smu7_power_state(
struct pp_hw_power_state *hw_ps)
{
PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
@@ -99,7 +99,7 @@ struct smu7_power_state *cast_phw_smu7_power_state(
return (struct smu7_power_state *)hw_ps;
}
-const struct smu7_power_state *cast_const_phw_smu7_power_state(
+static const struct smu7_power_state *cast_const_phw_smu7_power_state(
const struct pp_hw_power_state *hw_ps)
{
PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
@@ -115,7 +115,7 @@ const struct smu7_power_state *cast_const_phw_smu7_power_state(
* @param hwmgr the address of the powerplay hardware manager.
* @return always 0
*/
-int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
+static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
{
cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
@@ -124,7 +124,7 @@ int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
return 0;
}
-uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
+static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
{
uint32_t speedCntl = 0;
@@ -135,7 +135,7 @@ uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
}
-int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
+static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
{
uint32_t link_width;
@@ -155,7 +155,7 @@ int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
* @param pHwMgr the address of the powerplay hardware manager.
* @return always PP_Result_OK
*/
-int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
+static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
{
if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable);
@@ -802,7 +802,7 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -1153,7 +1153,7 @@ static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
}
-int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
+static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
data->pcie_performance_request = true;
@@ -1161,7 +1161,7 @@ int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result = 0;
int result = 0;
@@ -1352,6 +1352,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct cgs_system_info sys_info = {0};
+ int result;
data->dll_default_on = false;
data->mclk_dpm0_activity_target = 0xa;
@@ -1439,6 +1441,18 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->pcie_lane_performance.min = 16;
data->pcie_lane_power_saving.max = 0;
data->pcie_lane_power_saving.min = 16;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
+ result = cgs_query_system_info(hwmgr->device, &sys_info);
+ if (!result) {
+ if (sys_info.value & AMD_PG_SUPPORT_UVD)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDPowerGating);
+ if (sys_info.value & AMD_PG_SUPPORT_VCE)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEPowerGating);
+ }
}
/**
@@ -1864,7 +1878,7 @@ static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
+static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -2256,7 +2270,7 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data;
int result;
@@ -3675,14 +3689,16 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
}
-int smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
+static int
+smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
{
PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
}
-int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
+static int
+smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
{
uint32_t num_active_displays = 0;
struct cgs_display_info info = {0};
@@ -3704,7 +3720,7 @@ int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
* @param hwmgr the address of the powerplay hardware manager.
* @return always OK
*/
-int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
+static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t num_active_displays = 0;
@@ -3754,7 +3770,7 @@ int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
{
return smu7_program_display_gap(hwmgr);
}
@@ -3778,13 +3794,14 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
}
-int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
+static int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
const void *thermal_interrupt_info)
{
return 0;
}
-bool smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+static bool
+smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
bool is_update_required = false;
@@ -3813,7 +3830,9 @@ static inline bool smu7_are_power_levels_equal(const struct smu7_performance_lev
(pl1->pcie_lane == pl2->pcie_lane));
}
-int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
+ const struct pp_hw_power_state *pstate1,
+ const struct pp_hw_power_state *pstate2, bool *equal)
{
const struct smu7_power_state *psa;
const struct smu7_power_state *psb;
@@ -3846,7 +3865,7 @@ int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_sta
return 0;
}
-int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
+static int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -3975,7 +3994,7 @@ static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
+static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index 3fb5e57a378b..eb3e83d7af31 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -359,6 +359,7 @@ struct amd_powerplay_funcs {
int (*get_mclk_od)(void *handle);
int (*set_mclk_od)(void *handle, uint32_t value);
int (*read_sensor)(void *handle, int idx, int32_t *value);
+ struct amd_vce_state* (*get_vce_clock_state)(void *handle, unsigned idx);
};
struct amd_powerplay {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 4f0fedd1e9d3..e38b999e3235 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -367,7 +367,7 @@ struct pp_table_func {
int (*pptable_get_vce_state_table_entry)(
struct pp_hwmgr *hwmgr,
unsigned long i,
- struct pp_vce_state *vce_state,
+ struct amd_vce_state *vce_state,
void **clock_info,
unsigned long *flag);
};
@@ -586,18 +586,6 @@ struct phm_microcode_version_info {
uint32_t NB;
};
-#define PP_MAX_VCE_LEVELS 6
-
-enum PP_VCE_LEVEL {
- PP_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
- PP_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
- PP_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
- PP_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
- PP_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
- PP_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
-};
-
-
enum PP_TABLE_VERSION {
PP_TABLE_V0 = 0,
PP_TABLE_V1,
@@ -620,7 +608,7 @@ struct pp_hwmgr {
void *hardcode_pp_table;
bool need_pp_table_upload;
- struct pp_vce_state vce_states[PP_MAX_VCE_LEVELS];
+ struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS];
uint32_t num_vce_state_tables;
enum amd_dpm_forced_level dpm_level;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
index 9ceaed9ac52a..827860fffe78 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/power_state.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
@@ -156,15 +156,6 @@ struct pp_power_state {
struct pp_hw_power_state hardware;
};
-
-/*Structure to hold a VCE state entry*/
-struct pp_vce_state {
- uint32_t evclk;
- uint32_t ecclk;
- uint32_t sclk;
- uint32_t mclk;
-};
-
enum PP_MMProfilingState {
PP_MMProfilingState_NA = 0,
PP_MMProfilingState_Started,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
index 3df5de2cdab0..8fe8ba9434ff 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
@@ -21,9 +21,6 @@
*
*/
-extern bool acpi_atcs_functions_supported(void *device,
- uint32_t index);
-extern int acpi_pcie_perf_request(void *device,
- uint8_t perf_req,
- bool advertise);
-extern bool acpi_atcs_notify_pcie_device_ready(void *device);
+bool acpi_atcs_functions_supported(void *device, uint32_t index);
+int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise);
+bool acpi_atcs_notify_pcie_device_ready(void *device);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
index 76310ac7ef0d..34523fe6ed6f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
@@ -2049,7 +2049,7 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
return 0;
}
-int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -2125,7 +2125,7 @@ uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x \n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2150,7 +2150,7 @@ uint32_t fiji_get_mac_definition(uint32_t value)
return SMU73_MAX_LEVELS_MVDD;
}
- printk("cant't get the mac of %x \n", value);
+ printk(KERN_WARNING "can't get the mac of %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 02fe1df855a9..b86e48fb40d1 100755
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -159,7 +159,7 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
return result;
}
-int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
+static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
{
int i, result = -1;
uint32_t reg, data;
@@ -224,7 +224,7 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
return result;
}
-int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
+static int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
{
int result = 0;
uint32_t table_start;
@@ -260,7 +260,7 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
return result;
}
-int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
{
int32_t vr_config;
uint32_t table_start;
@@ -299,7 +299,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
}
/* Work in Progress */
-int fiji_restore_vft_table(struct pp_smumgr *smumgr)
+static int fiji_restore_vft_table(struct pp_smumgr *smumgr)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
@@ -311,7 +311,7 @@ int fiji_restore_vft_table(struct pp_smumgr *smumgr)
}
/* Work in Progress */
-int fiji_save_vft_table(struct pp_smumgr *smumgr)
+static int fiji_save_vft_table(struct pp_smumgr *smumgr)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
@@ -322,7 +322,7 @@ int fiji_save_vft_table(struct pp_smumgr *smumgr)
return -EINVAL;
}
-int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
+static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
index 8c889caba420..b579f0c175e6 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
@@ -2140,7 +2140,7 @@ uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x \n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2163,7 +2163,7 @@ uint32_t iceland_get_mac_definition(uint32_t value)
return SMU71_MAX_LEVELS_MVDD;
}
- printk("cant't get the mac of %x \n", value);
+ printk(KERN_WARNING "can't get the mac of %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
index 4ccc0b72324d..006b22071685 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
@@ -2174,7 +2174,7 @@ uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x \n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2201,7 +2201,7 @@ uint32_t polaris10_get_mac_definition(uint32_t value)
return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
}
- printk("cant't get the mac of %x \n", value);
+ printk(KERN_WARNING "can't get the mac of %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 5c3598ab7dae..f38a68747df0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -118,7 +118,7 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr)
}
-int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
{
uint32_t vr_config;
uint32_t dpm_table_start;
@@ -172,7 +172,8 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
return 0;
}
-int polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
+static int
+polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
{
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 6af744f42ec9..6df0d6edfdd1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -278,6 +278,9 @@ enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
case UCODE_ID_RLC_G:
result = CGS_UCODE_ID_RLC_G;
break;
+ case UCODE_ID_MEC_STORAGE:
+ result = CGS_UCODE_ID_STORAGE;
+ break;
default:
break;
}
@@ -452,6 +455,10 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
+ if (cgs_is_virtualization_enabled(smumgr->device))
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 76352f2423ae..919be435b49c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -28,8 +28,6 @@
#include <pp_endian.h>
#define SMC_RAM_END 0x40000
-#define mmSMC_IND_INDEX_11 0x01AC
-#define mmSMC_IND_DATA_11 0x01AD
struct smu7_buffer_entry {
uint32_t data_size;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
index de2a24d85f48..d08f6f19b454 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
@@ -2651,7 +2651,7 @@ uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
}
}
- printk("cant't get the offset of type %x member %x\n", type, member);
+ printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
return 0;
}
@@ -2675,7 +2675,7 @@ uint32_t tonga_get_mac_definition(uint32_t value)
case SMU_MAX_LEVELS_MVDD:
return SMU72_MAX_LEVELS_MVDD;
}
- printk("cant't get the mac value %x\n", value);
+ printk(KERN_WARNING "can't get the mac value %x\n", value);
return 0;
}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index b961a1c6caf3..dbd4fd3a810b 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -17,7 +17,7 @@ TRACE_EVENT(amd_sched_job,
TP_STRUCT__entry(
__field(struct amd_sched_entity *, entity)
__field(struct amd_sched_job *, sched_job)
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
__field(const char *, name)
__field(u32, job_count)
__field(int, hw_job_count)
@@ -42,7 +42,7 @@ TRACE_EVENT(amd_sched_process_job,
TP_PROTO(struct amd_sched_fence *fence),
TP_ARGS(fence),
TP_STRUCT__entry(
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
),
TP_fast_assign(
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 910b8d5b21c5..09b2cf6ccfa4 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -32,7 +32,7 @@
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
-static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
+static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
struct kmem_cache *sched_fence_slab;
atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
@@ -141,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
return r;
atomic_set(&entity->fence_seq, 0);
- entity->fence_context = fence_context_alloc(2);
+ entity->fence_context = dma_fence_context_alloc(2);
return 0;
}
@@ -221,32 +221,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
kfifo_free(&entity->job_queue);
}
-static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
+static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amd_sched_entity *entity =
container_of(cb, struct amd_sched_entity, cb);
entity->dependency = NULL;
- fence_put(f);
+ dma_fence_put(f);
amd_sched_wakeup(entity->sched);
}
-static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb)
+static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amd_sched_entity *entity =
container_of(cb, struct amd_sched_entity, cb);
entity->dependency = NULL;
- fence_put(f);
+ dma_fence_put(f);
}
static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
{
struct amd_gpu_scheduler *sched = entity->sched;
- struct fence * fence = entity->dependency;
+ struct dma_fence * fence = entity->dependency;
struct amd_sched_fence *s_fence;
if (fence->context == entity->fence_context) {
/* We can ignore fences from ourself */
- fence_put(entity->dependency);
+ dma_fence_put(entity->dependency);
return false;
}
@@ -257,23 +257,23 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
* Fence is from the same scheduler, only need to wait for
* it to be scheduled
*/
- fence = fence_get(&s_fence->scheduled);
- fence_put(entity->dependency);
+ fence = dma_fence_get(&s_fence->scheduled);
+ dma_fence_put(entity->dependency);
entity->dependency = fence;
- if (!fence_add_callback(fence, &entity->cb,
- amd_sched_entity_clear_dep))
+ if (!dma_fence_add_callback(fence, &entity->cb,
+ amd_sched_entity_clear_dep))
return true;
/* Ignore it when it is already scheduled */
- fence_put(fence);
+ dma_fence_put(fence);
return false;
}
- if (!fence_add_callback(entity->dependency, &entity->cb,
- amd_sched_entity_wakeup))
+ if (!dma_fence_add_callback(entity->dependency, &entity->cb,
+ amd_sched_entity_wakeup))
return true;
- fence_put(entity->dependency);
+ dma_fence_put(entity->dependency);
return false;
}
@@ -354,7 +354,8 @@ static void amd_sched_job_finish(struct work_struct *work)
sched->ops->free_job(s_job);
}
-static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
+static void amd_sched_job_finish_cb(struct dma_fence *f,
+ struct dma_fence_cb *cb)
{
struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
finish_cb);
@@ -388,8 +389,8 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
spin_lock(&sched->job_list_lock);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
- if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
- fence_put(s_job->s_fence->parent);
+ if (dma_fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
+ dma_fence_put(s_job->s_fence->parent);
s_job->s_fence->parent = NULL;
}
}
@@ -410,21 +411,21 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct amd_sched_fence *s_fence = s_job->s_fence;
- struct fence *fence;
+ struct dma_fence *fence;
spin_unlock(&sched->job_list_lock);
fence = sched->ops->run_job(s_job);
atomic_inc(&sched->hw_rq_count);
if (fence) {
- s_fence->parent = fence_get(fence);
- r = fence_add_callback(fence, &s_fence->cb,
- amd_sched_process_job);
+ s_fence->parent = dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &s_fence->cb,
+ amd_sched_process_job);
if (r == -ENOENT)
amd_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
- fence_put(fence);
+ dma_fence_put(fence);
} else {
DRM_ERROR("Failed to run job!\n");
amd_sched_process_job(NULL, &s_fence->cb);
@@ -446,8 +447,8 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
struct amd_sched_entity *entity = sched_job->s_entity;
trace_amd_sched_job(sched_job);
- fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
- amd_sched_job_finish_cb);
+ dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
+ amd_sched_job_finish_cb);
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
}
@@ -511,7 +512,7 @@ amd_sched_select_entity(struct amd_gpu_scheduler *sched)
return entity;
}
-static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
+static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amd_sched_fence *s_fence =
container_of(cb, struct amd_sched_fence, cb);
@@ -521,7 +522,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
amd_sched_fence_finished(s_fence);
trace_amd_sched_process_job(s_fence);
- fence_put(&s_fence->finished);
+ dma_fence_put(&s_fence->finished);
wake_up_interruptible(&sched->wake_up_worker);
}
@@ -547,7 +548,7 @@ static int amd_sched_main(void *param)
struct amd_sched_entity *entity = NULL;
struct amd_sched_fence *s_fence;
struct amd_sched_job *sched_job;
- struct fence *fence;
+ struct dma_fence *fence;
wait_event_interruptible(sched->wake_up_worker,
(!amd_sched_blocked(sched) &&
@@ -569,15 +570,15 @@ static int amd_sched_main(void *param)
fence = sched->ops->run_job(sched_job);
amd_sched_fence_scheduled(s_fence);
if (fence) {
- s_fence->parent = fence_get(fence);
- r = fence_add_callback(fence, &s_fence->cb,
- amd_sched_process_job);
+ s_fence->parent = dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &s_fence->cb,
+ amd_sched_process_job);
if (r == -ENOENT)
amd_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
- fence_put(fence);
+ dma_fence_put(fence);
} else {
DRM_ERROR("Failed to run job!\n");
amd_sched_process_job(NULL, &s_fence->cb);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 7cbbbfb502ef..876aa43b57df 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -25,7 +25,7 @@
#define _GPU_SCHEDULER_H_
#include <linux/kfifo.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
struct amd_gpu_scheduler;
struct amd_sched_rq;
@@ -50,8 +50,8 @@ struct amd_sched_entity {
atomic_t fence_seq;
uint64_t fence_context;
- struct fence *dependency;
- struct fence_cb cb;
+ struct dma_fence *dependency;
+ struct dma_fence_cb cb;
};
/**
@@ -66,10 +66,10 @@ struct amd_sched_rq {
};
struct amd_sched_fence {
- struct fence scheduled;
- struct fence finished;
- struct fence_cb cb;
- struct fence *parent;
+ struct dma_fence scheduled;
+ struct dma_fence finished;
+ struct dma_fence_cb cb;
+ struct dma_fence *parent;
struct amd_gpu_scheduler *sched;
spinlock_t lock;
void *owner;
@@ -79,15 +79,15 @@ struct amd_sched_job {
struct amd_gpu_scheduler *sched;
struct amd_sched_entity *s_entity;
struct amd_sched_fence *s_fence;
- struct fence_cb finish_cb;
+ struct dma_fence_cb finish_cb;
struct work_struct finish_work;
struct list_head node;
struct delayed_work work_tdr;
};
-extern const struct fence_ops amd_sched_fence_ops_scheduled;
-extern const struct fence_ops amd_sched_fence_ops_finished;
-static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
+extern const struct dma_fence_ops amd_sched_fence_ops_scheduled;
+extern const struct dma_fence_ops amd_sched_fence_ops_finished;
+static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f)
{
if (f->ops == &amd_sched_fence_ops_scheduled)
return container_of(f, struct amd_sched_fence, scheduled);
@@ -103,8 +103,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
* these functions should be implemented in driver side
*/
struct amd_sched_backend_ops {
- struct fence *(*dependency)(struct amd_sched_job *sched_job);
- struct fence *(*run_job)(struct amd_sched_job *sched_job);
+ struct dma_fence *(*dependency)(struct amd_sched_job *sched_job);
+ struct dma_fence *(*run_job)(struct amd_sched_job *sched_job);
void (*timedout_job)(struct amd_sched_job *sched_job);
void (*free_job)(struct amd_sched_job *sched_job);
};
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 3653b5a40494..91530e25aaff 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -42,46 +42,50 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
spin_lock_init(&fence->lock);
seq = atomic_inc_return(&entity->fence_seq);
- fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
- &fence->lock, entity->fence_context, seq);
- fence_init(&fence->finished, &amd_sched_fence_ops_finished,
- &fence->lock, entity->fence_context + 1, seq);
+ dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
+ &fence->lock, entity->fence_context, seq);
+ dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished,
+ &fence->lock, entity->fence_context + 1, seq);
return fence;
}
void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
{
- int ret = fence_signal(&fence->scheduled);
+ int ret = dma_fence_signal(&fence->scheduled);
if (!ret)
- FENCE_TRACE(&fence->scheduled, "signaled from irq context\n");
+ DMA_FENCE_TRACE(&fence->scheduled,
+ "signaled from irq context\n");
else
- FENCE_TRACE(&fence->scheduled, "was already signaled\n");
+ DMA_FENCE_TRACE(&fence->scheduled,
+ "was already signaled\n");
}
void amd_sched_fence_finished(struct amd_sched_fence *fence)
{
- int ret = fence_signal(&fence->finished);
+ int ret = dma_fence_signal(&fence->finished);
if (!ret)
- FENCE_TRACE(&fence->finished, "signaled from irq context\n");
+ DMA_FENCE_TRACE(&fence->finished,
+ "signaled from irq context\n");
else
- FENCE_TRACE(&fence->finished, "was already signaled\n");
+ DMA_FENCE_TRACE(&fence->finished,
+ "was already signaled\n");
}
-static const char *amd_sched_fence_get_driver_name(struct fence *fence)
+static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence)
{
return "amd_sched";
}
-static const char *amd_sched_fence_get_timeline_name(struct fence *f)
+static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
return (const char *)fence->sched->name;
}
-static bool amd_sched_fence_enable_signaling(struct fence *f)
+static bool amd_sched_fence_enable_signaling(struct dma_fence *f)
{
return true;
}
@@ -95,10 +99,10 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)
*/
static void amd_sched_fence_free(struct rcu_head *rcu)
{
- struct fence *f = container_of(rcu, struct fence, rcu);
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
struct amd_sched_fence *fence = to_amd_sched_fence(f);
- fence_put(fence->parent);
+ dma_fence_put(fence->parent);
kmem_cache_free(sched_fence_slab, fence);
}
@@ -110,7 +114,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
-static void amd_sched_fence_release_scheduled(struct fence *f)
+static void amd_sched_fence_release_scheduled(struct dma_fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
@@ -124,27 +128,27 @@ static void amd_sched_fence_release_scheduled(struct fence *f)
*
* Drop the extra reference from the scheduled fence to the base fence.
*/
-static void amd_sched_fence_release_finished(struct fence *f)
+static void amd_sched_fence_release_finished(struct dma_fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
- fence_put(&fence->scheduled);
+ dma_fence_put(&fence->scheduled);
}
-const struct fence_ops amd_sched_fence_ops_scheduled = {
+const struct dma_fence_ops amd_sched_fence_ops_scheduled = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = amd_sched_fence_release_scheduled,
};
-const struct fence_ops amd_sched_fence_ops_finished = {
+const struct dma_fence_ops amd_sched_fence_ops_finished = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = amd_sched_fence_release_finished,
};
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 48019ae22ddb..bbaa55add2d2 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -223,14 +223,12 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
{
struct hdlcd_drm_private *hdlcd;
struct drm_gem_cma_object *gem;
- unsigned int depth, bpp;
u32 src_w, src_h, dest_w, dest_h;
dma_addr_t scanout_start;
if (!plane->state->fb)
return;
- drm_fb_get_bpp_depth(plane->state->fb->pixel_format, &depth, &bpp);
src_w = plane->state->src_w >> 16;
src_h = plane->state->src_h >> 16;
dest_w = plane->state->crtc_w;
@@ -238,7 +236,8 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
scanout_start = gem->paddr + plane->state->fb->offsets[0] +
plane->state->crtc_y * plane->state->fb->pitches[0] +
- plane->state->crtc_x * bpp / 8;
+ plane->state->crtc_x *
+ drm_format_plane_cpp(plane->state->fb->pixel_format, 0);
hdlcd = plane->dev->dev_private;
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]);
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index fb6a418ce6be..6477d1a65266 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -453,7 +453,8 @@ static int hdlcd_probe(struct platform_device *pdev)
return -EAGAIN;
}
- component_match_add(&pdev->dev, &match, compare_dev, port);
+ drm_of_component_match_add(&pdev->dev, &match, compare_dev, port);
+ of_node_put(port);
return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops,
match);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 9280358b8f15..9f4739452a25 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -493,7 +493,9 @@ static int malidp_platform_probe(struct platform_device *pdev)
return -EAGAIN;
}
- component_match_add(&pdev->dev, &match, malidp_compare_dev, port);
+ drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev,
+ port);
+ of_node_put(port);
return component_master_add_with_match(&pdev->dev, &malidp_master_ops,
match);
}
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index a6132f1d58c1..be815d0cc772 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -198,9 +198,6 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *
static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
{
- unsigned int depth;
- int bpp;
-
/* RGB888 or BGR888 can't be rotated */
if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888))
return -EINVAL;
@@ -210,9 +207,7 @@ static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16
* worth of pixel data. Required size is then:
* size = rotated_width * (bpp / 8) * 8;
*/
- drm_fb_get_bpp_depth(fmt, &depth, &bpp);
-
- return w * bpp;
+ return w * drm_format_plane_cpp(fmt, 0) * 8;
}
static int malidp550_query_hw(struct malidp_hw_device *hwdev)
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 82c193e5e0d6..abaca03b9d36 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -254,21 +254,18 @@ int malidp_de_planes_init(struct drm_device *drm)
if (ret < 0)
goto cleanup;
- if (!drm->mode_config.rotation_property) {
+ /* SMART layer can't be rotated */
+ if (id != DE_SMART) {
unsigned long flags = DRM_ROTATE_0 |
DRM_ROTATE_90 |
DRM_ROTATE_180 |
DRM_ROTATE_270 |
DRM_REFLECT_X |
DRM_REFLECT_Y;
- drm->mode_config.rotation_property =
- drm_mode_create_rotation_property(drm, flags);
+ drm_plane_create_rotation_property(&plane->base,
+ DRM_ROTATE_0,
+ flags);
}
- /* SMART layer can't be rotated */
- if (drm->mode_config.rotation_property && (id != DE_SMART))
- drm_object_attach_property(&plane->base.base,
- drm->mode_config.rotation_property,
- DRM_ROTATE_0);
drm_plane_helper_add(&plane->base,
&malidp_de_plane_helper_funcs);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 1e0e68f608e4..94e46da9a758 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -254,7 +254,7 @@ static void armada_add_endpoints(struct device *dev,
continue;
}
- component_match_add(dev, match, compare_of, remote);
+ drm_of_component_match_add(dev, match, compare_of, remote);
of_node_put(remote);
}
}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 0743e65cb240..2a1368fac1d1 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver ast_bo_driver = {
.ttm_tt_populate = ast_ttm_tt_populate,
.ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
.init_mem_type = ast_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = ast_bo_evict_flags,
.move = NULL,
.verify_access = ast_bo_verify_access,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 5f484310bee9..9f6222895212 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -464,7 +464,7 @@ atmel_hlcdc_dc_atomic_complete(struct atmel_hlcdc_dc_commit *commit)
drm_atomic_helper_cleanup_planes(dev, old_state);
- drm_atomic_state_free(old_state);
+ drm_atomic_state_put(old_state);
/* Complete the commit, wake up any waiter. */
spin_lock(&dc->commit.wait.lock);
@@ -521,6 +521,7 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
/* Swap the state, this is the point of no return. */
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (async)
queue_work(dc->wq, &commit->work);
else
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 9d4c030672f0..246ed1e33d8a 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -393,7 +393,7 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
if ((state->base.fb->pixel_format == DRM_FORMAT_YUV422 ||
state->base.fb->pixel_format == DRM_FORMAT_NV61) &&
- (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)))
+ drm_rotation_90_or_270(state->base.rotation))
cfg |= ATMEL_HLCDC_YUV422ROT;
atmel_hlcdc_layer_update_cfg(&plane->layer,
@@ -628,7 +628,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
/*
* Swap width and size in case of 90 or 270 degrees rotation
*/
- if (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) {
+ if (drm_rotation_90_or_270(state->base.rotation)) {
tmp = state->crtc_w;
state->crtc_w = state->crtc_h;
state->crtc_h = tmp;
@@ -883,9 +883,9 @@ static int atmel_hlcdc_plane_atomic_get_property(struct drm_plane *p,
return 0;
}
-static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
- const struct atmel_hlcdc_layer_desc *desc,
- struct atmel_hlcdc_plane_properties *props)
+static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
+ const struct atmel_hlcdc_layer_desc *desc,
+ struct atmel_hlcdc_plane_properties *props)
{
struct regmap *regmap = plane->layer.hlcdc->regmap;
@@ -902,10 +902,18 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_GA_MASK);
}
- if (desc->layout.xstride && desc->layout.pstride)
- drm_object_attach_property(&plane->base.base,
- plane->base.dev->mode_config.rotation_property,
- DRM_ROTATE_0);
+ if (desc->layout.xstride && desc->layout.pstride) {
+ int ret;
+
+ ret = drm_plane_create_rotation_property(&plane->base,
+ DRM_ROTATE_0,
+ DRM_ROTATE_0 |
+ DRM_ROTATE_90 |
+ DRM_ROTATE_180 |
+ DRM_ROTATE_270);
+ if (ret)
+ return ret;
+ }
if (desc->layout.csc) {
/*
@@ -925,6 +933,8 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_CSC_CFG(&plane->layer, 2),
0x40040890);
}
+
+ return 0;
}
static struct drm_plane_helper_funcs atmel_hlcdc_layer_plane_helper_funcs = {
@@ -1036,7 +1046,9 @@ atmel_hlcdc_plane_create(struct drm_device *dev,
&atmel_hlcdc_layer_plane_helper_funcs);
/* Set default property values*/
- atmel_hlcdc_plane_init_properties(plane, desc, props);
+ ret = atmel_hlcdc_plane_init_properties(plane, desc, props);
+ if (ret)
+ return ERR_PTR(ret);
return plane;
}
@@ -1054,15 +1066,6 @@ atmel_hlcdc_plane_create_properties(struct drm_device *dev)
if (!props->alpha)
return ERR_PTR(-ENOMEM);
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev,
- DRM_ROTATE_0 |
- DRM_ROTATE_90 |
- DRM_ROTATE_180 |
- DRM_ROTATE_270);
- if (!dev->mode_config.rotation_property)
- return ERR_PTR(-ENOMEM);
-
return props;
}
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 269cfca9ca06..099a3c688c26 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -199,6 +199,7 @@ struct ttm_bo_driver bochs_bo_driver = {
.ttm_tt_populate = ttm_pool_populate,
.ttm_tt_unpopulate = ttm_pool_unpopulate,
.init_mem_type = bochs_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = bochs_bo_evict_flags,
.move = NULL,
.verify_access = bochs_bo_verify_access,
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 10e12e74fc9f..bd6acc829f97 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -57,6 +57,13 @@ config DRM_PARADE_PS8622
---help---
Parade eDP-LVDS bridge chip driver.
+config DRM_SIL_SII8620
+ tristate "Silicon Image SII8620 HDMI/MHL bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ help
+ Silicon Image SII8620 HDMI/MHL bridge chip driver.
+
config DRM_SII902X
tristate "Silicon Image sii902x RGB/HDMI bridge"
depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index cdf3a3cf765d..97ed1a5fea9a 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
+obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
new file mode 100644
index 000000000000..b2c267df7ee7
--- /dev/null
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -0,0 +1,1564 @@
+/*
+ * Silicon Image SiI8620 HDMI/MHL bridge driver
+ *
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/bridge/mhl.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include "sil-sii8620.h"
+
+#define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
+
+enum sii8620_mode {
+ CM_DISCONNECTED,
+ CM_DISCOVERY,
+ CM_MHL1,
+ CM_MHL3,
+ CM_ECBUS_S
+};
+
+enum sii8620_sink_type {
+ SINK_NONE,
+ SINK_HDMI,
+ SINK_DVI
+};
+
+enum sii8620_mt_state {
+ MT_STATE_READY,
+ MT_STATE_BUSY,
+ MT_STATE_DONE
+};
+
+struct sii8620 {
+ struct drm_bridge bridge;
+ struct device *dev;
+ struct clk *clk_xtal;
+ struct gpio_desc *gpio_reset;
+ struct gpio_desc *gpio_int;
+ struct regulator_bulk_data supplies[2];
+ struct mutex lock; /* context lock, protects fields below */
+ int error;
+ enum sii8620_mode mode;
+ enum sii8620_sink_type sink_type;
+ u8 cbus_status;
+ u8 stat[MHL_DST_SIZE];
+ u8 xstat[MHL_XDS_SIZE];
+ u8 devcap[MHL_DCAP_SIZE];
+ u8 xdevcap[MHL_XDC_SIZE];
+ u8 avif[19];
+ struct edid *edid;
+ unsigned int gen2_write_burst:1;
+ enum sii8620_mt_state mt_state;
+ struct list_head mt_queue;
+};
+
+struct sii8620_mt_msg;
+
+typedef void (*sii8620_mt_msg_cb)(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg);
+
+struct sii8620_mt_msg {
+ struct list_head node;
+ u8 reg[4];
+ u8 ret;
+ sii8620_mt_msg_cb send;
+ sii8620_mt_msg_cb recv;
+};
+
+static const u8 sii8620_i2c_page[] = {
+ 0x39, /* Main System */
+ 0x3d, /* TDM and HSIC */
+ 0x49, /* TMDS Receiver, MHL EDID */
+ 0x4d, /* eMSC, HDCP, HSIC */
+ 0x5d, /* MHL Spec */
+ 0x64, /* MHL CBUS */
+ 0x59, /* Hardware TPI (Transmitter Programming Interface) */
+ 0x61, /* eCBUS-S, eCBUS-D */
+};
+
+static void sii8620_fetch_edid(struct sii8620 *ctx);
+static void sii8620_set_upstream_edid(struct sii8620 *ctx);
+static void sii8620_enable_hpd(struct sii8620 *ctx);
+static void sii8620_mhl_disconnected(struct sii8620 *ctx);
+
+static int sii8620_clear_error(struct sii8620 *ctx)
+{
+ int ret = ctx->error;
+
+ ctx->error = 0;
+ return ret;
+}
+
+static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len)
+{
+ struct device *dev = ctx->dev;
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 data = addr;
+ struct i2c_msg msg[] = {
+ {
+ .addr = sii8620_i2c_page[addr >> 8],
+ .flags = client->flags,
+ .len = 1,
+ .buf = &data
+ },
+ {
+ .addr = sii8620_i2c_page[addr >> 8],
+ .flags = client->flags | I2C_M_RD,
+ .len = len,
+ .buf = buf
+ },
+ };
+ int ret;
+
+ if (ctx->error)
+ return;
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ dev_dbg(dev, "read at %04x: %*ph, %d\n", addr, len, buf, ret);
+
+ if (ret != 2) {
+ dev_err(dev, "Read at %#06x of %d bytes failed with code %d.\n",
+ addr, len, ret);
+ ctx->error = ret < 0 ? ret : -EIO;
+ }
+}
+
+static u8 sii8620_readb(struct sii8620 *ctx, u16 addr)
+{
+ u8 ret;
+
+ sii8620_read_buf(ctx, addr, &ret, 1);
+ return ret;
+}
+
+static void sii8620_write_buf(struct sii8620 *ctx, u16 addr, const u8 *buf,
+ int len)
+{
+ struct device *dev = ctx->dev;
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 data[2];
+ struct i2c_msg msg = {
+ .addr = sii8620_i2c_page[addr >> 8],
+ .flags = client->flags,
+ .len = len + 1,
+ };
+ int ret;
+
+ if (ctx->error)
+ return;
+
+ if (len > 1) {
+ msg.buf = kmalloc(len + 1, GFP_KERNEL);
+ if (!msg.buf) {
+ ctx->error = -ENOMEM;
+ return;
+ }
+ memcpy(msg.buf + 1, buf, len);
+ } else {
+ msg.buf = data;
+ msg.buf[1] = *buf;
+ }
+
+ msg.buf[0] = addr;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ dev_dbg(dev, "write at %04x: %*ph, %d\n", addr, len, buf, ret);
+
+ if (ret != 1) {
+ dev_err(dev, "Write at %#06x of %*ph failed with code %d.\n",
+ addr, len, buf, ret);
+ ctx->error = ret ?: -EIO;
+ }
+
+ if (len > 1)
+ kfree(msg.buf);
+}
+
+#define sii8620_write(ctx, addr, arr...) \
+({\
+ u8 d[] = { arr }; \
+ sii8620_write_buf(ctx, addr, d, ARRAY_SIZE(d)); \
+})
+
+static void __sii8620_write_seq(struct sii8620 *ctx, const u16 *seq, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i += 2)
+ sii8620_write(ctx, seq[i], seq[i + 1]);
+}
+
+#define sii8620_write_seq(ctx, seq...) \
+({\
+ const u16 d[] = { seq }; \
+ __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \
+})
+
+#define sii8620_write_seq_static(ctx, seq...) \
+({\
+ static const u16 d[] = { seq }; \
+ __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \
+})
+
+static void sii8620_setbits(struct sii8620 *ctx, u16 addr, u8 mask, u8 val)
+{
+ val = (val & mask) | (sii8620_readb(ctx, addr) & ~mask);
+ sii8620_write(ctx, addr, val);
+}
+
+static void sii8620_mt_cleanup(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg, *n;
+
+ list_for_each_entry_safe(msg, n, &ctx->mt_queue, node) {
+ list_del(&msg->node);
+ kfree(msg);
+ }
+ ctx->mt_state = MT_STATE_READY;
+}
+
+static void sii8620_mt_work(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg;
+
+ if (ctx->error)
+ return;
+ if (ctx->mt_state == MT_STATE_BUSY || list_empty(&ctx->mt_queue))
+ return;
+
+ if (ctx->mt_state == MT_STATE_DONE) {
+ ctx->mt_state = MT_STATE_READY;
+ msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg,
+ node);
+ if (msg->recv)
+ msg->recv(ctx, msg);
+ list_del(&msg->node);
+ kfree(msg);
+ }
+
+ if (ctx->mt_state != MT_STATE_READY || list_empty(&ctx->mt_queue))
+ return;
+
+ ctx->mt_state = MT_STATE_BUSY;
+ msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
+ if (msg->send)
+ msg->send(ctx, msg);
+}
+
+static void sii8620_mt_msc_cmd_send(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg)
+{
+ switch (msg->reg[0]) {
+ case MHL_WRITE_STAT:
+ case MHL_SET_INT:
+ sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg + 1, 2);
+ sii8620_write(ctx, REG_MSC_COMMAND_START,
+ BIT_MSC_COMMAND_START_WRITE_STAT);
+ break;
+ case MHL_MSC_MSG:
+ sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg, 3);
+ sii8620_write(ctx, REG_MSC_COMMAND_START,
+ BIT_MSC_COMMAND_START_MSC_MSG);
+ break;
+ default:
+ dev_err(ctx->dev, "%s: command %#x not supported\n", __func__,
+ msg->reg[0]);
+ }
+}
+
+static struct sii8620_mt_msg *sii8620_mt_msg_new(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+
+ if (!msg)
+ ctx->error = -ENOMEM;
+ else
+ list_add_tail(&msg->node, &ctx->mt_queue);
+
+ return msg;
+}
+
+static void sii8620_mt_msc_cmd(struct sii8620 *ctx, u8 cmd, u8 arg1, u8 arg2)
+{
+ struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
+
+ if (!msg)
+ return;
+
+ msg->reg[0] = cmd;
+ msg->reg[1] = arg1;
+ msg->reg[2] = arg2;
+ msg->send = sii8620_mt_msc_cmd_send;
+}
+
+static void sii8620_mt_write_stat(struct sii8620 *ctx, u8 reg, u8 val)
+{
+ sii8620_mt_msc_cmd(ctx, MHL_WRITE_STAT, reg, val);
+}
+
+static inline void sii8620_mt_set_int(struct sii8620 *ctx, u8 irq, u8 mask)
+{
+ sii8620_mt_msc_cmd(ctx, MHL_SET_INT, irq, mask);
+}
+
+static void sii8620_mt_msc_msg(struct sii8620 *ctx, u8 cmd, u8 data)
+{
+ sii8620_mt_msc_cmd(ctx, MHL_MSC_MSG, cmd, data);
+}
+
+static void sii8620_mt_rap(struct sii8620 *ctx, u8 code)
+{
+ sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RAP, code);
+}
+
+static void sii8620_mt_read_devcap_send(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg)
+{
+ u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP
+ | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN;
+
+ if (msg->reg[0] == MHL_READ_XDEVCAP)
+ ctrl |= BIT_EDID_CTRL_XDEVCAP_EN;
+
+ sii8620_write_seq(ctx,
+ REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE,
+ REG_EDID_CTRL, ctrl,
+ REG_TPI_CBUS_START, BIT_TPI_CBUS_START_GET_DEVCAP_START
+ );
+}
+
+/* copy src to dst and set changed bits in src */
+static void sii8620_update_array(u8 *dst, u8 *src, int count)
+{
+ while (--count >= 0) {
+ *src ^= *dst;
+ *dst++ ^= *src++;
+ }
+}
+
+static void sii8620_mr_devcap(struct sii8620 *ctx)
+{
+ static const char * const sink_str[] = {
+ [SINK_NONE] = "NONE",
+ [SINK_HDMI] = "HDMI",
+ [SINK_DVI] = "DVI"
+ };
+
+ u8 dcap[MHL_DCAP_SIZE];
+ char sink_name[20];
+ struct device *dev = ctx->dev;
+
+ sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, dcap, MHL_DCAP_SIZE);
+ if (ctx->error < 0)
+ return;
+
+ dev_info(dev, "dcap: %*ph\n", MHL_DCAP_SIZE, dcap);
+ dev_info(dev, "detected dongle MHL %d.%d, ChipID %02x%02x:%02x%02x\n",
+ dcap[MHL_DCAP_MHL_VERSION] / 16,
+ dcap[MHL_DCAP_MHL_VERSION] % 16, dcap[MHL_DCAP_ADOPTER_ID_H],
+ dcap[MHL_DCAP_ADOPTER_ID_L], dcap[MHL_DCAP_DEVICE_ID_H],
+ dcap[MHL_DCAP_DEVICE_ID_L]);
+ sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
+
+ if (!(dcap[MHL_DCAP_CAT] & MHL_DCAP_CAT_SINK))
+ return;
+
+ sii8620_fetch_edid(ctx);
+ if (!ctx->edid) {
+ dev_err(ctx->dev, "Cannot fetch EDID\n");
+ sii8620_mhl_disconnected(ctx);
+ return;
+ }
+
+ if (drm_detect_hdmi_monitor(ctx->edid))
+ ctx->sink_type = SINK_HDMI;
+ else
+ ctx->sink_type = SINK_DVI;
+
+ drm_edid_get_monitor_name(ctx->edid, sink_name, ARRAY_SIZE(sink_name));
+
+ dev_info(dev, "detected sink(type: %s): %s\n",
+ sink_str[ctx->sink_type], sink_name);
+ sii8620_set_upstream_edid(ctx);
+ sii8620_enable_hpd(ctx);
+}
+
+static void sii8620_mr_xdevcap(struct sii8620 *ctx)
+{
+ sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, ctx->xdevcap,
+ MHL_XDC_SIZE);
+
+ sii8620_mt_write_stat(ctx, MHL_XDS_REG(CURR_ECBUS_MODE),
+ MHL_XDS_ECBUS_S | MHL_XDS_SLOT_MODE_8BIT);
+ sii8620_mt_rap(ctx, MHL_RAP_CBUS_MODE_UP);
+}
+
+static void sii8620_mt_read_devcap_recv(struct sii8620 *ctx,
+ struct sii8620_mt_msg *msg)
+{
+ u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP
+ | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN;
+
+ if (msg->reg[0] == MHL_READ_XDEVCAP)
+ ctrl |= BIT_EDID_CTRL_XDEVCAP_EN;
+
+ sii8620_write_seq(ctx,
+ REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE | BIT_INTR9_EDID_DONE
+ | BIT_INTR9_EDID_ERROR,
+ REG_EDID_CTRL, ctrl,
+ REG_EDID_FIFO_ADDR, 0
+ );
+
+ if (msg->reg[0] == MHL_READ_XDEVCAP)
+ sii8620_mr_xdevcap(ctx);
+ else
+ sii8620_mr_devcap(ctx);
+}
+
+static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap)
+{
+ struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx);
+
+ if (!msg)
+ return;
+
+ msg->reg[0] = xdevcap ? MHL_READ_XDEVCAP : MHL_READ_DEVCAP;
+ msg->send = sii8620_mt_read_devcap_send;
+ msg->recv = sii8620_mt_read_devcap_recv;
+}
+
+static void sii8620_fetch_edid(struct sii8620 *ctx)
+{
+ u8 lm_ddc, ddc_cmd, int3, cbus;
+ int fetched, i;
+ int edid_len = EDID_LENGTH;
+ u8 *edid;
+
+ sii8620_readb(ctx, REG_CBUS_STATUS);
+ lm_ddc = sii8620_readb(ctx, REG_LM_DDC);
+ ddc_cmd = sii8620_readb(ctx, REG_DDC_CMD);
+
+ sii8620_write_seq(ctx,
+ REG_INTR9_MASK, 0,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO,
+ REG_HDCP2X_POLL_CS, 0x71,
+ REG_HDCP2X_CTRL_0, BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX,
+ REG_LM_DDC, lm_ddc | BIT_LM_DDC_SW_TPI_EN_DISABLED,
+ );
+
+ for (i = 0; i < 256; ++i) {
+ u8 ddc_stat = sii8620_readb(ctx, REG_DDC_STATUS);
+
+ if (!(ddc_stat & BIT_DDC_STATUS_DDC_I2C_IN_PROG))
+ break;
+ sii8620_write(ctx, REG_DDC_STATUS,
+ BIT_DDC_STATUS_DDC_FIFO_EMPTY);
+ }
+
+ sii8620_write(ctx, REG_DDC_ADDR, 0x50 << 1);
+
+ edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (!edid) {
+ ctx->error = -ENOMEM;
+ return;
+ }
+
+#define FETCH_SIZE 16
+ for (fetched = 0; fetched < edid_len; fetched += FETCH_SIZE) {
+ sii8620_readb(ctx, REG_DDC_STATUS);
+ sii8620_write_seq(ctx,
+ REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_ABORT,
+ REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO,
+ REG_DDC_STATUS, BIT_DDC_STATUS_DDC_FIFO_EMPTY
+ );
+ sii8620_write_seq(ctx,
+ REG_DDC_SEGM, fetched >> 8,
+ REG_DDC_OFFSET, fetched & 0xff,
+ REG_DDC_DIN_CNT1, FETCH_SIZE,
+ REG_DDC_DIN_CNT2, 0,
+ REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK
+ );
+
+ do {
+ int3 = sii8620_readb(ctx, REG_INTR3);
+ cbus = sii8620_readb(ctx, REG_CBUS_STATUS);
+
+ if (int3 & BIT_DDC_CMD_DONE)
+ break;
+
+ if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) {
+ kfree(edid);
+ edid = NULL;
+ goto end;
+ }
+ } while (1);
+
+ sii8620_readb(ctx, REG_DDC_STATUS);
+ while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE)
+ usleep_range(10, 20);
+
+ sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE);
+ if (fetched + FETCH_SIZE == EDID_LENGTH) {
+ u8 ext = ((struct edid *)edid)->extensions;
+
+ if (ext) {
+ u8 *new_edid;
+
+ edid_len += ext * EDID_LENGTH;
+ new_edid = krealloc(edid, edid_len, GFP_KERNEL);
+ if (!new_edid) {
+ kfree(edid);
+ ctx->error = -ENOMEM;
+ return;
+ }
+ edid = new_edid;
+ }
+ }
+
+ if (fetched + FETCH_SIZE == edid_len)
+ sii8620_write(ctx, REG_INTR3, int3);
+ }
+
+ sii8620_write(ctx, REG_LM_DDC, lm_ddc);
+
+end:
+ kfree(ctx->edid);
+ ctx->edid = (struct edid *)edid;
+}
+
+static void sii8620_set_upstream_edid(struct sii8620 *ctx)
+{
+ sii8620_setbits(ctx, REG_DPD, BIT_DPD_PDNRX12 | BIT_DPD_PDIDCK_N
+ | BIT_DPD_PD_MHL_CLK_N, 0xff);
+
+ sii8620_write_seq_static(ctx,
+ REG_RX_HDMI_CTRL3, 0x00,
+ REG_PKT_FILTER_0, 0xFF,
+ REG_PKT_FILTER_1, 0xFF,
+ REG_ALICE0_BW_I2C, 0x06
+ );
+
+ sii8620_setbits(ctx, REG_RX_HDMI_CLR_BUFFER,
+ BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN, 0xff);
+
+ sii8620_write_seq_static(ctx,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN,
+ REG_EDID_FIFO_ADDR, 0,
+ );
+
+ sii8620_write_buf(ctx, REG_EDID_FIFO_WR_DATA, (u8 *)ctx->edid,
+ (ctx->edid->extensions + 1) * EDID_LENGTH);
+
+ sii8620_write_seq_static(ctx,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID
+ | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO
+ | BIT_EDID_CTRL_EDID_MODE_EN,
+ REG_INTR5_MASK, BIT_INTR_SCDT_CHANGE,
+ REG_INTR9_MASK, 0
+ );
+}
+
+static void sii8620_xtal_set_rate(struct sii8620 *ctx)
+{
+ static const struct {
+ unsigned int rate;
+ u8 div;
+ u8 tp1;
+ } rates[] = {
+ { 19200, 0x04, 0x53 },
+ { 20000, 0x04, 0x62 },
+ { 24000, 0x05, 0x75 },
+ { 30000, 0x06, 0x92 },
+ { 38400, 0x0c, 0xbc },
+ };
+ unsigned long rate = clk_get_rate(ctx->clk_xtal) / 1000;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rates) - 1; ++i)
+ if (rate <= rates[i].rate)
+ break;
+
+ if (rate != rates[i].rate)
+ dev_err(ctx->dev, "xtal clock rate(%lukHz) not supported, setting MHL for %ukHz.\n",
+ rate, rates[i].rate);
+
+ sii8620_write(ctx, REG_DIV_CTL_MAIN, rates[i].div);
+ sii8620_write(ctx, REG_HDCP2X_TP1, rates[i].tp1);
+}
+
+static int sii8620_hw_on(struct sii8620 *ctx)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret)
+ return ret;
+ usleep_range(10000, 20000);
+ return clk_prepare_enable(ctx->clk_xtal);
+}
+
+static int sii8620_hw_off(struct sii8620 *ctx)
+{
+ clk_disable_unprepare(ctx->clk_xtal);
+ gpiod_set_value(ctx->gpio_reset, 1);
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static void sii8620_hw_reset(struct sii8620 *ctx)
+{
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->gpio_reset, 0);
+ usleep_range(5000, 20000);
+ gpiod_set_value(ctx->gpio_reset, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->gpio_reset, 0);
+ msleep(300);
+}
+
+static void sii8620_cbus_reset(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
+ | BIT_PWD_SRST_CBUS_RST_SW_EN,
+ REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN
+ );
+}
+
+static void sii8620_set_auto_zone(struct sii8620 *ctx)
+{
+ if (ctx->mode != CM_MHL1) {
+ sii8620_write_seq_static(ctx,
+ REG_TX_ZONE_CTL1, 0x0,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE
+ );
+ } else {
+ sii8620_write_seq_static(ctx,
+ REG_TX_ZONE_CTL1, VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE
+ );
+ }
+}
+
+static void sii8620_stop_video(struct sii8620 *ctx)
+{
+ u8 uninitialized_var(val);
+
+ sii8620_write_seq_static(ctx,
+ REG_TPI_INTR_EN, 0,
+ REG_HDCP2X_INTR0_MASK, 0,
+ REG_TPI_COPP_DATA2, 0,
+ REG_TPI_INTR_ST0, ~0,
+ );
+
+ switch (ctx->sink_type) {
+ case SINK_DVI:
+ val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN
+ | BIT_TPI_SC_TPI_AV_MUTE;
+ break;
+ case SINK_HDMI:
+ val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN
+ | BIT_TPI_SC_TPI_AV_MUTE
+ | BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI;
+ break;
+ default:
+ return;
+ }
+
+ sii8620_write(ctx, REG_TPI_SC, val);
+}
+
+static void sii8620_start_hdmi(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL
+ | BIT_RX_HDMI_CTRL2_USE_AV_MUTE,
+ REG_VID_OVRRD, BIT_VID_OVRRD_PP_AUTO_DISABLE
+ | BIT_VID_OVRRD_M1080P_OVRRD,
+ REG_VID_MODE, 0,
+ REG_MHL_TOP_CTL, 0x1,
+ REG_MHLTX_CTL6, 0xa0,
+ REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
+ REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL),
+ );
+
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ MHL_DST_LM_CLK_MODE_NORMAL |
+ MHL_DST_LM_PATH_ENABLED);
+
+ sii8620_set_auto_zone(ctx);
+
+ sii8620_write(ctx, REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
+
+ sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif,
+ ARRAY_SIZE(ctx->avif));
+
+ sii8620_write(ctx, REG_PKT_FILTER_0, 0xa1, 0x2);
+}
+
+static void sii8620_start_video(struct sii8620 *ctx)
+{
+ if (ctx->mode < CM_MHL3)
+ sii8620_stop_video(ctx);
+
+ switch (ctx->sink_type) {
+ case SINK_HDMI:
+ sii8620_start_hdmi(ctx);
+ break;
+ case SINK_DVI:
+ default:
+ break;
+ }
+}
+
+static void sii8620_disable_hpd(struct sii8620 *ctx)
+{
+ sii8620_setbits(ctx, REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID, 0);
+ sii8620_write_seq_static(ctx,
+ REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN,
+ REG_INTR8_MASK, 0
+ );
+}
+
+static void sii8620_enable_hpd(struct sii8620 *ctx)
+{
+ sii8620_setbits(ctx, REG_TMDS_CSTAT_P3,
+ BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS
+ | BIT_TMDS_CSTAT_P3_CLR_AVI, ~0);
+ sii8620_write_seq_static(ctx,
+ REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN
+ | BIT_HPD_CTRL_HPD_HIGH,
+ );
+}
+
+static void sii8620_enable_gen2_write_burst(struct sii8620 *ctx)
+{
+ if (ctx->gen2_write_burst)
+ return;
+
+ sii8620_write_seq_static(ctx,
+ REG_MDT_RCV_TIMEOUT, 100,
+ REG_MDT_RCV_CTRL, BIT_MDT_RCV_CTRL_MDT_RCV_EN
+ );
+ ctx->gen2_write_burst = 1;
+}
+
+static void sii8620_disable_gen2_write_burst(struct sii8620 *ctx)
+{
+ if (!ctx->gen2_write_burst)
+ return;
+
+ sii8620_write_seq_static(ctx,
+ REG_MDT_XMIT_CTRL, 0,
+ REG_MDT_RCV_CTRL, 0
+ );
+ ctx->gen2_write_burst = 0;
+}
+
+static void sii8620_start_gen2_write_burst(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_MDT_INT_1_MASK, BIT_MDT_RCV_TIMEOUT
+ | BIT_MDT_RCV_SM_ABORT_PKT_RCVD | BIT_MDT_RCV_SM_ERROR
+ | BIT_MDT_XMIT_TIMEOUT | BIT_MDT_XMIT_SM_ABORT_PKT_RCVD
+ | BIT_MDT_XMIT_SM_ERROR,
+ REG_MDT_INT_0_MASK, BIT_MDT_XFIFO_EMPTY
+ | BIT_MDT_IDLE_AFTER_HAWB_DISABLE
+ | BIT_MDT_RFIFO_DATA_RDY
+ );
+ sii8620_enable_gen2_write_burst(ctx);
+}
+
+static void sii8620_mhl_discover(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_DISC_PULSE_PROCEED,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_5K, VAL_PUP_20K),
+ REG_CBUS_DISC_INTR0_MASK, BIT_MHL3_EST_INT
+ | BIT_MHL_EST_INT
+ | BIT_NOT_MHL_EST_INT
+ | BIT_CBUS_MHL3_DISCON_INT
+ | BIT_CBUS_MHL12_DISCON_INT
+ | BIT_RGND_READY_INT,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE,
+ REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE
+ | BIT_MHL_DP_CTL0_TX_OE_OVR,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+ REG_MHL_DP_CTL1, 0xA2,
+ REG_MHL_DP_CTL2, 0x03,
+ REG_MHL_DP_CTL3, 0x35,
+ REG_MHL_DP_CTL5, 0x02,
+ REG_MHL_DP_CTL6, 0x02,
+ REG_MHL_DP_CTL7, 0x03,
+ REG_COC_CTLC, 0xFF,
+ REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12
+ | BIT_DPD_OSC_EN | BIT_DPD_PWRON_HSIC,
+ REG_COC_INTR_MASK, BIT_COC_PLL_LOCK_STATUS_CHANGE
+ | BIT_COC_CALIBRATION_DONE,
+ REG_CBUS_INT_1_MASK, BIT_CBUS_MSC_ABORT_RCVD
+ | BIT_CBUS_CMD_ABORT,
+ REG_CBUS_INT_0_MASK, BIT_CBUS_MSC_MT_DONE
+ | BIT_CBUS_HPD_CHG
+ | BIT_CBUS_MSC_MR_WRITE_STAT
+ | BIT_CBUS_MSC_MR_MSC_MSG
+ | BIT_CBUS_MSC_MR_WRITE_BURST
+ | BIT_CBUS_MSC_MR_SET_INT
+ | BIT_CBUS_MSC_MT_DONE_NACK
+ );
+}
+
+static void sii8620_peer_specific_init(struct sii8620 *ctx)
+{
+ if (ctx->mode == CM_MHL3)
+ sii8620_write_seq_static(ctx,
+ REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD,
+ REG_EMSCINTRMASK1,
+ BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR
+ );
+ else
+ sii8620_write_seq_static(ctx,
+ REG_HDCP2X_INTR0_MASK, 0x00,
+ REG_EMSCINTRMASK1, 0x00,
+ REG_HDCP2X_INTR0, 0xFF,
+ REG_INTR1, 0xFF,
+ REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD
+ | BIT_SYS_CTRL1_TX_CTRL_HDMI
+ );
+}
+
+#define SII8620_MHL_VERSION 0x32
+#define SII8620_SCRATCHPAD_SIZE 16
+#define SII8620_INT_STAT_SIZE 0x33
+
+static void sii8620_set_dev_cap(struct sii8620 *ctx)
+{
+ static const u8 devcap[MHL_DCAP_SIZE] = {
+ [MHL_DCAP_MHL_VERSION] = SII8620_MHL_VERSION,
+ [MHL_DCAP_CAT] = MHL_DCAP_CAT_SOURCE | MHL_DCAP_CAT_POWER,
+ [MHL_DCAP_ADOPTER_ID_H] = 0x01,
+ [MHL_DCAP_ADOPTER_ID_L] = 0x41,
+ [MHL_DCAP_VID_LINK_MODE] = MHL_DCAP_VID_LINK_RGB444
+ | MHL_DCAP_VID_LINK_PPIXEL
+ | MHL_DCAP_VID_LINK_16BPP,
+ [MHL_DCAP_AUD_LINK_MODE] = MHL_DCAP_AUD_LINK_2CH,
+ [MHL_DCAP_VIDEO_TYPE] = MHL_DCAP_VT_GRAPHICS,
+ [MHL_DCAP_LOG_DEV_MAP] = MHL_DCAP_LD_GUI,
+ [MHL_DCAP_BANDWIDTH] = 0x0f,
+ [MHL_DCAP_FEATURE_FLAG] = MHL_DCAP_FEATURE_RCP_SUPPORT
+ | MHL_DCAP_FEATURE_RAP_SUPPORT
+ | MHL_DCAP_FEATURE_SP_SUPPORT,
+ [MHL_DCAP_SCRATCHPAD_SIZE] = SII8620_SCRATCHPAD_SIZE,
+ [MHL_DCAP_INT_STAT_SIZE] = SII8620_INT_STAT_SIZE,
+ };
+ static const u8 xdcap[MHL_XDC_SIZE] = {
+ [MHL_XDC_ECBUS_SPEEDS] = MHL_XDC_ECBUS_S_075
+ | MHL_XDC_ECBUS_S_8BIT,
+ [MHL_XDC_TMDS_SPEEDS] = MHL_XDC_TMDS_150
+ | MHL_XDC_TMDS_300 | MHL_XDC_TMDS_600,
+ [MHL_XDC_ECBUS_ROLES] = MHL_XDC_DEV_HOST,
+ [MHL_XDC_LOG_DEV_MAPX] = MHL_XDC_LD_PHONE,
+ };
+
+ sii8620_write_buf(ctx, REG_MHL_DEVCAP_0, devcap, ARRAY_SIZE(devcap));
+ sii8620_write_buf(ctx, REG_MHL_EXTDEVCAP_0, xdcap, ARRAY_SIZE(xdcap));
+}
+
+static void sii8620_mhl_init(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+ REG_CBUS_MSC_COMPAT_CTRL,
+ BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN,
+ );
+
+ sii8620_peer_specific_init(ctx);
+
+ sii8620_disable_hpd(ctx);
+
+ sii8620_write_seq_static(ctx,
+ REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+ REG_TMDS0_CCTRL1, 0x90,
+ REG_TMDS_CLK_EN, 0x01,
+ REG_TMDS_CH_EN, 0x11,
+ REG_BGR_BIAS, 0x87,
+ REG_ALICE0_ZONE_CTRL, 0xE8,
+ REG_ALICE0_MODE_CTRL, 0x04,
+ );
+ sii8620_setbits(ctx, REG_LM_DDC, BIT_LM_DDC_SW_TPI_EN_DISABLED, 0);
+ sii8620_write_seq_static(ctx,
+ REG_TPI_HW_OPT3, 0x76,
+ REG_TMDS_CCTRL, BIT_TMDS_CCTRL_TMDS_OE,
+ REG_TPI_DTD_B2, 79,
+ );
+ sii8620_set_dev_cap(ctx);
+ sii8620_write_seq_static(ctx,
+ REG_MDT_XMIT_TIMEOUT, 100,
+ REG_MDT_XMIT_CTRL, 0x03,
+ REG_MDT_XFIFO_STAT, 0x00,
+ REG_MDT_RCV_TIMEOUT, 100,
+ REG_CBUS_LINK_CTRL_8, 0x1D,
+ );
+
+ sii8620_start_gen2_write_burst(ctx);
+ sii8620_write_seq_static(ctx,
+ REG_BIST_CTRL, 0x00,
+ REG_COC_CTL1, 0x10,
+ REG_COC_CTL2, 0x18,
+ REG_COC_CTLF, 0x07,
+ REG_COC_CTL11, 0xF8,
+ REG_COC_CTL17, 0x61,
+ REG_COC_CTL18, 0x46,
+ REG_COC_CTL19, 0x15,
+ REG_COC_CTL1A, 0x01,
+ REG_MHL_COC_CTL3, BIT_MHL_COC_CTL3_COC_AECHO_EN,
+ REG_MHL_COC_CTL4, 0x2D,
+ REG_MHL_COC_CTL5, 0xF9,
+ REG_MSC_HEARTBEAT_CTRL, 0x27,
+ );
+ sii8620_disable_gen2_write_burst(ctx);
+
+ /* currently MHL3 is not supported, so we force version to 0 */
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(VERSION), 0);
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(CONNECTED_RDY),
+ MHL_DST_CONN_DCAP_RDY | MHL_DST_CONN_XDEVCAPP_SUPP
+ | MHL_DST_CONN_POW_STAT);
+ sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), MHL_INT_RC_DCAP_CHG);
+}
+
+static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
+{
+ if (ctx->mode == mode)
+ return;
+
+ ctx->mode = mode;
+
+ switch (mode) {
+ case CM_MHL1:
+ sii8620_write_seq_static(ctx,
+ REG_CBUS_MSC_COMPAT_CTRL, 0x02,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL1_2_VALUE,
+ REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12
+ | BIT_DPD_OSC_EN,
+ REG_COC_INTR_MASK, 0
+ );
+ break;
+ case CM_MHL3:
+ sii8620_write_seq_static(ctx,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+ REG_COC_CTL0, 0x40,
+ REG_MHL_COC_CTL1, 0x07
+ );
+ break;
+ case CM_DISCONNECTED:
+ break;
+ default:
+ dev_err(ctx->dev, "%s mode %d not supported\n", __func__, mode);
+ break;
+ }
+
+ sii8620_set_auto_zone(ctx);
+
+ if (mode != CM_MHL1)
+ return;
+
+ sii8620_write_seq_static(ctx,
+ REG_MHL_DP_CTL0, 0xBC,
+ REG_MHL_DP_CTL1, 0xBB,
+ REG_MHL_DP_CTL3, 0x48,
+ REG_MHL_DP_CTL5, 0x39,
+ REG_MHL_DP_CTL2, 0x2A,
+ REG_MHL_DP_CTL6, 0x2A,
+ REG_MHL_DP_CTL7, 0x08
+ );
+}
+
+static void sii8620_disconnect(struct sii8620 *ctx)
+{
+ sii8620_disable_gen2_write_burst(ctx);
+ sii8620_stop_video(ctx);
+ msleep(50);
+ sii8620_cbus_reset(ctx);
+ sii8620_set_mode(ctx, CM_DISCONNECTED);
+ sii8620_write_seq_static(ctx,
+ REG_COC_CTL0, 0x40,
+ REG_CBUS3_CNVT, 0x84,
+ REG_COC_CTL14, 0x00,
+ REG_COC_CTL0, 0x40,
+ REG_HRXCTRL3, 0x07,
+ REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X
+ | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL
+ | BIT_MHL_PLL_CTL0_ZONE_MASK_OE,
+ REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE
+ | BIT_MHL_DP_CTL0_TX_OE_OVR,
+ REG_MHL_DP_CTL1, 0xBB,
+ REG_MHL_DP_CTL3, 0x48,
+ REG_MHL_DP_CTL5, 0x3F,
+ REG_MHL_DP_CTL2, 0x2F,
+ REG_MHL_DP_CTL6, 0x2A,
+ REG_MHL_DP_CTL7, 0x03
+ );
+ sii8620_disable_hpd(ctx);
+ sii8620_write_seq_static(ctx,
+ REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
+ REG_MHL_COC_CTL1, 0x07,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+ REG_DISC_CTRL8, 0x00,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+ REG_INT_CTRL, 0x00,
+ REG_MSC_HEARTBEAT_CTRL, 0x27,
+ REG_DISC_CTRL1, 0x25,
+ REG_CBUS_DISC_INTR0, (u8)~BIT_RGND_READY_INT,
+ REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT,
+ REG_MDT_INT_1, 0xff,
+ REG_MDT_INT_1_MASK, 0x00,
+ REG_MDT_INT_0, 0xff,
+ REG_MDT_INT_0_MASK, 0x00,
+ REG_COC_INTR, 0xff,
+ REG_COC_INTR_MASK, 0x00,
+ REG_TRXINTH, 0xff,
+ REG_TRXINTMH, 0x00,
+ REG_CBUS_INT_0, 0xff,
+ REG_CBUS_INT_0_MASK, 0x00,
+ REG_CBUS_INT_1, 0xff,
+ REG_CBUS_INT_1_MASK, 0x00,
+ REG_EMSCINTR, 0xff,
+ REG_EMSCINTRMASK, 0x00,
+ REG_EMSCINTR1, 0xff,
+ REG_EMSCINTRMASK1, 0x00,
+ REG_INTR8, 0xff,
+ REG_INTR8_MASK, 0x00,
+ REG_TPI_INTR_ST0, 0xff,
+ REG_TPI_INTR_EN, 0x00,
+ REG_HDCP2X_INTR0, 0xff,
+ REG_HDCP2X_INTR0_MASK, 0x00,
+ REG_INTR9, 0xff,
+ REG_INTR9_MASK, 0x00,
+ REG_INTR3, 0xff,
+ REG_INTR3_MASK, 0x00,
+ REG_INTR5, 0xff,
+ REG_INTR5_MASK, 0x00,
+ REG_INTR2, 0xff,
+ REG_INTR2_MASK, 0x00,
+ );
+ memset(ctx->stat, 0, sizeof(ctx->stat));
+ memset(ctx->xstat, 0, sizeof(ctx->xstat));
+ memset(ctx->devcap, 0, sizeof(ctx->devcap));
+ memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap));
+ ctx->cbus_status = 0;
+ ctx->sink_type = SINK_NONE;
+ kfree(ctx->edid);
+ ctx->edid = NULL;
+ sii8620_mt_cleanup(ctx);
+}
+
+static void sii8620_mhl_disconnected(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K),
+ REG_CBUS_MSC_COMPAT_CTRL,
+ BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN
+ );
+ sii8620_disconnect(ctx);
+}
+
+static void sii8620_irq_disc(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_CBUS_DISC_INTR0);
+
+ if (stat & VAL_CBUS_MHL_DISCON)
+ sii8620_mhl_disconnected(ctx);
+
+ if (stat & BIT_RGND_READY_INT) {
+ u8 stat2 = sii8620_readb(ctx, REG_DISC_STAT2);
+
+ if ((stat2 & MSK_DISC_STAT2_RGND) == VAL_RGND_1K) {
+ sii8620_mhl_discover(ctx);
+ } else {
+ sii8620_write_seq_static(ctx,
+ REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT
+ | BIT_DISC_CTRL9_NOMHL_EST
+ | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS,
+ REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT
+ | BIT_CBUS_MHL3_DISCON_INT
+ | BIT_CBUS_MHL12_DISCON_INT
+ | BIT_NOT_MHL_EST_INT
+ );
+ }
+ }
+ if (stat & BIT_MHL_EST_INT)
+ sii8620_mhl_init(ctx);
+
+ sii8620_write(ctx, REG_CBUS_DISC_INTR0, stat);
+}
+
+static void sii8620_irq_g2wb(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_MDT_INT_0);
+
+ if (stat & BIT_MDT_IDLE_AFTER_HAWB_DISABLE)
+ dev_dbg(ctx->dev, "HAWB idle\n");
+
+ sii8620_write(ctx, REG_MDT_INT_0, stat);
+}
+
+static void sii8620_status_changed_dcap(struct sii8620 *ctx)
+{
+ if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) {
+ sii8620_set_mode(ctx, CM_MHL1);
+ sii8620_peer_specific_init(ctx);
+ sii8620_write(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE
+ | BIT_INTR9_EDID_DONE | BIT_INTR9_EDID_ERROR);
+ }
+}
+
+static void sii8620_status_changed_path(struct sii8620 *ctx)
+{
+ if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) {
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ MHL_DST_LM_CLK_MODE_NORMAL
+ | MHL_DST_LM_PATH_ENABLED);
+ sii8620_mt_read_devcap(ctx, false);
+ } else {
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ MHL_DST_LM_CLK_MODE_NORMAL);
+ }
+}
+
+static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
+{
+ u8 st[MHL_DST_SIZE], xst[MHL_XDS_SIZE];
+
+ sii8620_read_buf(ctx, REG_MHL_STAT_0, st, MHL_DST_SIZE);
+ sii8620_read_buf(ctx, REG_MHL_EXTSTAT_0, xst, MHL_XDS_SIZE);
+
+ sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
+ sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
+
+ if (st[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
+ sii8620_status_changed_dcap(ctx);
+
+ if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
+ sii8620_status_changed_path(ctx);
+}
+
+static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
+{
+ u8 ints[MHL_INT_SIZE];
+
+ sii8620_read_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
+ sii8620_write_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE);
+}
+
+static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
+{
+ struct device *dev = ctx->dev;
+
+ if (list_empty(&ctx->mt_queue)) {
+ dev_err(dev, "unexpected MSC MT response\n");
+ return NULL;
+ }
+
+ return list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node);
+}
+
+static void sii8620_msc_mt_done(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx);
+
+ if (!msg)
+ return;
+
+ msg->ret = sii8620_readb(ctx, REG_MSC_MT_RCVD_DATA0);
+ ctx->mt_state = MT_STATE_DONE;
+}
+
+static void sii8620_msc_mr_msc_msg(struct sii8620 *ctx)
+{
+ struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx);
+ u8 buf[2];
+
+ if (!msg)
+ return;
+
+ sii8620_read_buf(ctx, REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA, buf, 2);
+
+ switch (buf[0]) {
+ case MHL_MSC_MSG_RAPK:
+ msg->ret = buf[1];
+ ctx->mt_state = MT_STATE_DONE;
+ break;
+ default:
+ dev_err(ctx->dev, "%s message type %d,%d not supported",
+ __func__, buf[0], buf[1]);
+ }
+}
+
+static void sii8620_irq_msc(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_CBUS_INT_0);
+
+ if (stat & ~BIT_CBUS_HPD_CHG)
+ sii8620_write(ctx, REG_CBUS_INT_0, stat & ~BIT_CBUS_HPD_CHG);
+
+ if (stat & BIT_CBUS_HPD_CHG) {
+ u8 cbus_stat = sii8620_readb(ctx, REG_CBUS_STATUS);
+
+ if ((cbus_stat ^ ctx->cbus_status) & BIT_CBUS_STATUS_CBUS_HPD) {
+ sii8620_write(ctx, REG_CBUS_INT_0, BIT_CBUS_HPD_CHG);
+ } else {
+ stat ^= BIT_CBUS_STATUS_CBUS_HPD;
+ cbus_stat ^= BIT_CBUS_STATUS_CBUS_HPD;
+ }
+ ctx->cbus_status = cbus_stat;
+ }
+
+ if (stat & BIT_CBUS_MSC_MR_WRITE_STAT)
+ sii8620_msc_mr_write_stat(ctx);
+
+ if (stat & BIT_CBUS_MSC_MR_SET_INT)
+ sii8620_msc_mr_set_int(ctx);
+
+ if (stat & BIT_CBUS_MSC_MT_DONE)
+ sii8620_msc_mt_done(ctx);
+
+ if (stat & BIT_CBUS_MSC_MR_MSC_MSG)
+ sii8620_msc_mr_msc_msg(ctx);
+}
+
+static void sii8620_irq_coc(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_COC_INTR);
+
+ sii8620_write(ctx, REG_COC_INTR, stat);
+}
+
+static void sii8620_irq_merr(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_CBUS_INT_1);
+
+ sii8620_write(ctx, REG_CBUS_INT_1, stat);
+}
+
+static void sii8620_irq_edid(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_INTR9);
+
+ sii8620_write(ctx, REG_INTR9, stat);
+
+ if (stat & BIT_INTR9_DEVCAP_DONE)
+ ctx->mt_state = MT_STATE_DONE;
+}
+
+static void sii8620_scdt_high(struct sii8620 *ctx)
+{
+ sii8620_write_seq_static(ctx,
+ REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI,
+ REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI,
+ );
+}
+
+static void sii8620_scdt_low(struct sii8620 *ctx)
+{
+ sii8620_write(ctx, REG_TMDS_CSTAT_P3,
+ BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS |
+ BIT_TMDS_CSTAT_P3_CLR_AVI);
+
+ sii8620_stop_video(ctx);
+
+ sii8620_write(ctx, REG_INTR8_MASK, 0);
+}
+
+static void sii8620_irq_scdt(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_INTR5);
+
+ if (stat & BIT_INTR_SCDT_CHANGE) {
+ u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3);
+
+ if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
+ sii8620_scdt_high(ctx);
+ else
+ sii8620_scdt_low(ctx);
+ }
+
+ sii8620_write(ctx, REG_INTR5, stat);
+}
+
+static void sii8620_new_vsi(struct sii8620 *ctx)
+{
+ u8 vsif[11];
+
+ sii8620_write(ctx, REG_RX_HDMI_CTRL2,
+ VAL_RX_HDMI_CTRL2_DEFVAL |
+ BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI);
+ sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif,
+ ARRAY_SIZE(vsif));
+}
+
+static void sii8620_new_avi(struct sii8620 *ctx)
+{
+ sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL);
+ sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif,
+ ARRAY_SIZE(ctx->avif));
+}
+
+static void sii8620_irq_infr(struct sii8620 *ctx)
+{
+ u8 stat = sii8620_readb(ctx, REG_INTR8)
+ & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI);
+
+ sii8620_write(ctx, REG_INTR8, stat);
+
+ if (stat & BIT_CEA_NEW_VSI)
+ sii8620_new_vsi(ctx);
+
+ if (stat & BIT_CEA_NEW_AVI)
+ sii8620_new_avi(ctx);
+
+ if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI))
+ sii8620_start_video(ctx);
+}
+
+/* endian agnostic, non-volatile version of test_bit */
+static bool sii8620_test_bit(unsigned int nr, const u8 *addr)
+{
+ return 1 & (addr[nr / BITS_PER_BYTE] >> (nr % BITS_PER_BYTE));
+}
+
+static irqreturn_t sii8620_irq_thread(int irq, void *data)
+{
+ static const struct {
+ int bit;
+ void (*handler)(struct sii8620 *ctx);
+ } irq_vec[] = {
+ { BIT_FAST_INTR_STAT_DISC, sii8620_irq_disc },
+ { BIT_FAST_INTR_STAT_G2WB, sii8620_irq_g2wb },
+ { BIT_FAST_INTR_STAT_COC, sii8620_irq_coc },
+ { BIT_FAST_INTR_STAT_MSC, sii8620_irq_msc },
+ { BIT_FAST_INTR_STAT_MERR, sii8620_irq_merr },
+ { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
+ { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
+ { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
+ };
+ struct sii8620 *ctx = data;
+ u8 stats[LEN_FAST_INTR_STAT];
+ int i, ret;
+
+ mutex_lock(&ctx->lock);
+
+ sii8620_read_buf(ctx, REG_FAST_INTR_STAT, stats, ARRAY_SIZE(stats));
+ for (i = 0; i < ARRAY_SIZE(irq_vec); ++i)
+ if (sii8620_test_bit(irq_vec[i].bit, stats))
+ irq_vec[i].handler(ctx);
+
+ sii8620_mt_work(ctx);
+
+ ret = sii8620_clear_error(ctx);
+ if (ret) {
+ dev_err(ctx->dev, "Error during IRQ handling, %d.\n", ret);
+ sii8620_mhl_disconnected(ctx);
+ }
+ mutex_unlock(&ctx->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void sii8620_cable_in(struct sii8620 *ctx)
+{
+ struct device *dev = ctx->dev;
+ u8 ver[5];
+ int ret;
+
+ ret = sii8620_hw_on(ctx);
+ if (ret) {
+ dev_err(dev, "Error powering on, %d.\n", ret);
+ return;
+ }
+ sii8620_hw_reset(ctx);
+
+ sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver));
+ ret = sii8620_clear_error(ctx);
+ if (ret) {
+ dev_err(dev, "Error accessing I2C bus, %d.\n", ret);
+ return;
+ }
+
+ dev_info(dev, "ChipID %02x%02x:%02x%02x rev %02x.\n", ver[1], ver[0],
+ ver[3], ver[2], ver[4]);
+
+ sii8620_write(ctx, REG_DPD,
+ BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN);
+
+ sii8620_xtal_set_rate(ctx);
+ sii8620_disconnect(ctx);
+
+ sii8620_write_seq_static(ctx,
+ REG_MHL_CBUS_CTL0, VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG
+ | VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734,
+ REG_MHL_CBUS_CTL1, VAL_MHL_CBUS_CTL1_1115_OHM,
+ REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN,
+ );
+
+ ret = sii8620_clear_error(ctx);
+ if (ret) {
+ dev_err(dev, "Error accessing I2C bus, %d.\n", ret);
+ return;
+ }
+
+ enable_irq(to_i2c_client(ctx->dev)->irq);
+}
+
+static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct sii8620, bridge);
+}
+
+static bool sii8620_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sii8620 *ctx = bridge_to_sii8620(bridge);
+ bool ret = false;
+ int max_clock = 74250;
+
+ mutex_lock(&ctx->lock);
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ goto out;
+
+ if (ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL)
+ max_clock = 300000;
+
+ ret = mode->clock <= max_clock;
+
+out:
+ mutex_unlock(&ctx->lock);
+
+ return ret;
+}
+
+static const struct drm_bridge_funcs sii8620_bridge_funcs = {
+ .mode_fixup = sii8620_mode_fixup,
+};
+
+static int sii8620_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct sii8620 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+ mutex_init(&ctx->lock);
+ INIT_LIST_HEAD(&ctx->mt_queue);
+
+ ctx->clk_xtal = devm_clk_get(dev, "xtal");
+ if (IS_ERR(ctx->clk_xtal)) {
+ dev_err(dev, "failed to get xtal clock from DT\n");
+ return PTR_ERR(ctx->clk_xtal);
+ }
+
+ if (!client->irq) {
+ dev_err(dev, "no irq provided\n");
+ return -EINVAL;
+ }
+ irq_set_status_flags(client->irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ sii8620_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "sii8620", ctx);
+
+ ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->gpio_reset)) {
+ dev_err(dev, "failed to get reset gpio from DT\n");
+ return PTR_ERR(ctx->gpio_reset);
+ }
+
+ ctx->supplies[0].supply = "cvcc10";
+ ctx->supplies[1].supply = "iovcc18";
+ ret = devm_regulator_bulk_get(dev, 2, ctx->supplies);
+ if (ret)
+ return ret;
+
+ i2c_set_clientdata(client, ctx);
+
+ ctx->bridge.funcs = &sii8620_bridge_funcs;
+ ctx->bridge.of_node = dev->of_node;
+ drm_bridge_add(&ctx->bridge);
+
+ sii8620_cable_in(ctx);
+
+ return 0;
+}
+
+static int sii8620_remove(struct i2c_client *client)
+{
+ struct sii8620 *ctx = i2c_get_clientdata(client);
+
+ disable_irq(to_i2c_client(ctx->dev)->irq);
+ drm_bridge_remove(&ctx->bridge);
+ sii8620_hw_off(ctx);
+
+ return 0;
+}
+
+static const struct of_device_id sii8620_dt_match[] = {
+ { .compatible = "sil,sii8620" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sii8620_dt_match);
+
+static const struct i2c_device_id sii8620_id[] = {
+ { "sii8620", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, sii8620_id);
+static struct i2c_driver sii8620_driver = {
+ .driver = {
+ .name = "sii8620",
+ .of_match_table = of_match_ptr(sii8620_dt_match),
+ },
+ .probe = sii8620_probe,
+ .remove = sii8620_remove,
+ .id_table = sii8620_id,
+};
+
+module_i2c_driver(sii8620_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.h b/drivers/gpu/drm/bridge/sil-sii8620.h
new file mode 100644
index 000000000000..6ff616a4f6ce
--- /dev/null
+++ b/drivers/gpu/drm/bridge/sil-sii8620.h
@@ -0,0 +1,1517 @@
+/*
+ * Registers of Silicon Image SiI8620 Mobile HD Transmitter
+ *
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * Based on MHL driver for Android devices.
+ * Copyright (C) 2013-2014 Silicon Image, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SIL_SII8620_H__
+#define __SIL_SII8620_H__
+
+/* Vendor ID Low byte, default value: 0x01 */
+#define REG_VND_IDL 0x0000
+
+/* Vendor ID High byte, default value: 0x00 */
+#define REG_VND_IDH 0x0001
+
+/* Device ID Low byte, default value: 0x60 */
+#define REG_DEV_IDL 0x0002
+
+/* Device ID High byte, default value: 0x86 */
+#define REG_DEV_IDH 0x0003
+
+/* Device Revision, default value: 0x10 */
+#define REG_DEV_REV 0x0004
+
+/* OTP DBYTE510, default value: 0x00 */
+#define REG_OTP_DBYTE510 0x0006
+
+/* System Control #1, default value: 0x00 */
+#define REG_SYS_CTRL1 0x0008
+#define BIT_SYS_CTRL1_OTPVMUTEOVR_SET BIT(7)
+#define BIT_SYS_CTRL1_VSYNCPIN BIT(6)
+#define BIT_SYS_CTRL1_OTPADROPOVR_SET BIT(5)
+#define BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD BIT(4)
+#define BIT_SYS_CTRL1_OTP2XVOVR_EN BIT(3)
+#define BIT_SYS_CTRL1_OTP2XAOVR_EN BIT(2)
+#define BIT_SYS_CTRL1_TX_CTRL_HDMI BIT(1)
+#define BIT_SYS_CTRL1_OTPAMUTEOVR_SET BIT(0)
+
+/* System Control DPD, default value: 0x90 */
+#define REG_DPD 0x000b
+#define BIT_DPD_PWRON_PLL BIT(7)
+#define BIT_DPD_PDNTX12 BIT(6)
+#define BIT_DPD_PDNRX12 BIT(5)
+#define BIT_DPD_OSC_EN BIT(4)
+#define BIT_DPD_PWRON_HSIC BIT(3)
+#define BIT_DPD_PDIDCK_N BIT(2)
+#define BIT_DPD_PD_MHL_CLK_N BIT(1)
+
+/* Dual link Control, default value: 0x00 */
+#define REG_DCTL 0x000d
+#define BIT_DCTL_TDM_LCLK_PHASE BIT(7)
+#define BIT_DCTL_HSIC_CLK_PHASE BIT(6)
+#define BIT_DCTL_CTS_TCK_PHASE BIT(5)
+#define BIT_DCTL_EXT_DDC_SEL BIT(4)
+#define BIT_DCTL_TRANSCODE BIT(3)
+#define BIT_DCTL_HSIC_RX_STROBE_PHASE BIT(2)
+#define BIT_DCTL_HSIC_TX_BIST_START_SEL BIT(1)
+#define BIT_DCTL_TCLKNX_PHASE BIT(0)
+
+/* PWD Software Reset, default value: 0x20 */
+#define REG_PWD_SRST 0x000e
+#define BIT_PWD_SRST_COC_DOC_RST BIT(7)
+#define BIT_PWD_SRST_CBUS_RST_SW BIT(6)
+#define BIT_PWD_SRST_CBUS_RST_SW_EN BIT(5)
+#define BIT_PWD_SRST_MHLFIFO_RST BIT(4)
+#define BIT_PWD_SRST_CBUS_RST BIT(3)
+#define BIT_PWD_SRST_SW_RST_AUTO BIT(2)
+#define BIT_PWD_SRST_HDCP2X_SW_RST BIT(1)
+#define BIT_PWD_SRST_SW_RST BIT(0)
+
+/* AKSV_1, default value: 0x00 */
+#define REG_AKSV_1 0x001d
+
+/* Video H Resolution #1, default value: 0x00 */
+#define REG_H_RESL 0x003a
+
+/* Video Mode, default value: 0x00 */
+#define REG_VID_MODE 0x004a
+#define BIT_VID_MODE_M1080P BIT(6)
+
+/* Video Input Mode, default value: 0xc0 */
+#define REG_VID_OVRRD 0x0051
+#define BIT_VID_OVRRD_PP_AUTO_DISABLE BIT(7)
+#define BIT_VID_OVRRD_M1080P_OVRRD BIT(6)
+#define BIT_VID_OVRRD_MINIVSYNC_ON BIT(5)
+#define BIT_VID_OVRRD_3DCONV_EN_FRAME_PACK BIT(4)
+#define BIT_VID_OVRRD_ENABLE_AUTO_PATH_EN BIT(3)
+#define BIT_VID_OVRRD_ENRGB2YCBCR_OVRRD BIT(2)
+#define BIT_VID_OVRRD_ENDOWNSAMPLE_OVRRD BIT(0)
+
+/* I2C Address reassignment, default value: 0x00 */
+#define REG_PAGE_MHLSPEC_ADDR 0x0057
+#define REG_PAGE7_ADDR 0x0058
+#define REG_PAGE8_ADDR 0x005c
+
+/* Fast Interrupt Status, default value: 0x00 */
+#define REG_FAST_INTR_STAT 0x005f
+#define LEN_FAST_INTR_STAT 7
+#define BIT_FAST_INTR_STAT_TIMR 8
+#define BIT_FAST_INTR_STAT_INT2 9
+#define BIT_FAST_INTR_STAT_DDC 10
+#define BIT_FAST_INTR_STAT_SCDT 11
+#define BIT_FAST_INTR_STAT_INFR 13
+#define BIT_FAST_INTR_STAT_EDID 14
+#define BIT_FAST_INTR_STAT_HDCP 15
+#define BIT_FAST_INTR_STAT_MSC 16
+#define BIT_FAST_INTR_STAT_MERR 17
+#define BIT_FAST_INTR_STAT_G2WB 18
+#define BIT_FAST_INTR_STAT_G2WB_ERR 19
+#define BIT_FAST_INTR_STAT_DISC 28
+#define BIT_FAST_INTR_STAT_BLOCK 30
+#define BIT_FAST_INTR_STAT_LTRN 31
+#define BIT_FAST_INTR_STAT_HDCP2 32
+#define BIT_FAST_INTR_STAT_TDM 42
+#define BIT_FAST_INTR_STAT_COC 51
+
+/* GPIO Control, default value: 0x15 */
+#define REG_GPIO_CTRL1 0x006e
+#define BIT_CTRL1_GPIO_I_8 BIT(5)
+#define BIT_CTRL1_GPIO_OEN_8 BIT(4)
+#define BIT_CTRL1_GPIO_I_7 BIT(3)
+#define BIT_CTRL1_GPIO_OEN_7 BIT(2)
+#define BIT_CTRL1_GPIO_I_6 BIT(1)
+#define BIT_CTRL1_GPIO_OEN_6 BIT(0)
+
+/* Interrupt Control, default value: 0x06 */
+#define REG_INT_CTRL 0x006f
+#define BIT_INT_CTRL_SOFTWARE_WP BIT(7)
+#define BIT_INT_CTRL_INTR_OD BIT(2)
+#define BIT_INT_CTRL_INTR_POLARITY BIT(1)
+
+/* Interrupt State, default value: 0x00 */
+#define REG_INTR_STATE 0x0070
+#define BIT_INTR_STATE_INTR_STATE BIT(0)
+
+/* Interrupt Source #1, default value: 0x00 */
+#define REG_INTR1 0x0071
+
+/* Interrupt Source #2, default value: 0x00 */
+#define REG_INTR2 0x0072
+
+/* Interrupt Source #3, default value: 0x01 */
+#define REG_INTR3 0x0073
+#define BIT_DDC_CMD_DONE BIT(3)
+
+/* Interrupt Source #5, default value: 0x00 */
+#define REG_INTR5 0x0074
+
+/* Interrupt #1 Mask, default value: 0x00 */
+#define REG_INTR1_MASK 0x0075
+
+/* Interrupt #2 Mask, default value: 0x00 */
+#define REG_INTR2_MASK 0x0076
+
+/* Interrupt #3 Mask, default value: 0x00 */
+#define REG_INTR3_MASK 0x0077
+
+/* Interrupt #5 Mask, default value: 0x00 */
+#define REG_INTR5_MASK 0x0078
+#define BIT_INTR_SCDT_CHANGE BIT(0)
+
+/* Hot Plug Connection Control, default value: 0x45 */
+#define REG_HPD_CTRL 0x0079
+#define BIT_HPD_CTRL_HPD_DS_SIGNAL BIT(7)
+#define BIT_HPD_CTRL_HPD_OUT_OD_EN BIT(6)
+#define BIT_HPD_CTRL_HPD_HIGH BIT(5)
+#define BIT_HPD_CTRL_HPD_OUT_OVR_EN BIT(4)
+#define BIT_HPD_CTRL_GPIO_I_1 BIT(3)
+#define BIT_HPD_CTRL_GPIO_OEN_1 BIT(2)
+#define BIT_HPD_CTRL_GPIO_I_0 BIT(1)
+#define BIT_HPD_CTRL_GPIO_OEN_0 BIT(0)
+
+/* GPIO Control, default value: 0x55 */
+#define REG_GPIO_CTRL 0x007a
+#define BIT_CTRL_GPIO_I_5 BIT(7)
+#define BIT_CTRL_GPIO_OEN_5 BIT(6)
+#define BIT_CTRL_GPIO_I_4 BIT(5)
+#define BIT_CTRL_GPIO_OEN_4 BIT(4)
+#define BIT_CTRL_GPIO_I_3 BIT(3)
+#define BIT_CTRL_GPIO_OEN_3 BIT(2)
+#define BIT_CTRL_GPIO_I_2 BIT(1)
+#define BIT_CTRL_GPIO_OEN_2 BIT(0)
+
+/* Interrupt Source 7, default value: 0x00 */
+#define REG_INTR7 0x007b
+
+/* Interrupt Source 8, default value: 0x00 */
+#define REG_INTR8 0x007c
+
+/* Interrupt #7 Mask, default value: 0x00 */
+#define REG_INTR7_MASK 0x007d
+
+/* Interrupt #8 Mask, default value: 0x00 */
+#define REG_INTR8_MASK 0x007e
+#define BIT_CEA_NEW_VSI BIT(2)
+#define BIT_CEA_NEW_AVI BIT(1)
+
+/* IEEE, default value: 0x10 */
+#define REG_TMDS_CCTRL 0x0080
+#define BIT_TMDS_CCTRL_TMDS_OE BIT(4)
+
+/* TMDS Control #4, default value: 0x02 */
+#define REG_TMDS_CTRL4 0x0085
+#define BIT_TMDS_CTRL4_SCDT_CKDT_SEL BIT(1)
+#define BIT_TMDS_CTRL4_TX_EN_BY_SCDT BIT(0)
+
+/* BIST CNTL, default value: 0x00 */
+#define REG_BIST_CTRL 0x00bb
+#define BIT_RXBIST_VGB_EN BIT(7)
+#define BIT_TXBIST_VGB_EN BIT(6)
+#define BIT_BIST_START_SEL BIT(5)
+#define BIT_BIST_START_BIT BIT(4)
+#define BIT_BIST_ALWAYS_ON BIT(3)
+#define BIT_BIST_TRANS BIT(2)
+#define BIT_BIST_RESET BIT(1)
+#define BIT_BIST_EN BIT(0)
+
+/* BIST DURATION0, default value: 0x00 */
+#define REG_BIST_TEST_SEL 0x00bd
+#define MSK_BIST_TEST_SEL_BIST_PATT_SEL 0x0f
+
+/* BIST VIDEO_MODE, default value: 0x00 */
+#define REG_BIST_VIDEO_MODE 0x00be
+#define MSK_BIST_VIDEO_MODE_BIST_VIDEO_MODE_3_0 0x0f
+
+/* BIST DURATION0, default value: 0x00 */
+#define REG_BIST_DURATION_0 0x00bf
+
+/* BIST DURATION1, default value: 0x00 */
+#define REG_BIST_DURATION_1 0x00c0
+
+/* BIST DURATION2, default value: 0x00 */
+#define REG_BIST_DURATION_2 0x00c1
+
+/* BIST 8BIT_PATTERN, default value: 0x00 */
+#define REG_BIST_8BIT_PATTERN 0x00c2
+
+/* LM DDC, default value: 0x80 */
+#define REG_LM_DDC 0x00c7
+#define BIT_LM_DDC_SW_TPI_EN_DISABLED BIT(7)
+
+#define BIT_LM_DDC_VIDEO_MUTE_EN BIT(5)
+#define BIT_LM_DDC_DDC_TPI_SW BIT(2)
+#define BIT_LM_DDC_DDC_GRANT BIT(1)
+#define BIT_LM_DDC_DDC_GPU_REQUEST BIT(0)
+
+/* DDC I2C Manual, default value: 0x03 */
+#define REG_DDC_MANUAL 0x00ec
+#define BIT_DDC_MANUAL_MAN_DDC BIT(7)
+#define BIT_DDC_MANUAL_VP_SEL BIT(6)
+#define BIT_DDC_MANUAL_DSDA BIT(5)
+#define BIT_DDC_MANUAL_DSCL BIT(4)
+#define BIT_DDC_MANUAL_GCP_HW_CTL_EN BIT(3)
+#define BIT_DDC_MANUAL_DDCM_ABORT_WP BIT(2)
+#define BIT_DDC_MANUAL_IO_DSDA BIT(1)
+#define BIT_DDC_MANUAL_IO_DSCL BIT(0)
+
+/* DDC I2C Target Slave Address, default value: 0x00 */
+#define REG_DDC_ADDR 0x00ed
+#define MSK_DDC_ADDR_DDC_ADDR 0xfe
+
+/* DDC I2C Target Segment Address, default value: 0x00 */
+#define REG_DDC_SEGM 0x00ee
+
+/* DDC I2C Target Offset Address, default value: 0x00 */
+#define REG_DDC_OFFSET 0x00ef
+
+/* DDC I2C Data In count #1, default value: 0x00 */
+#define REG_DDC_DIN_CNT1 0x00f0
+
+/* DDC I2C Data In count #2, default value: 0x00 */
+#define REG_DDC_DIN_CNT2 0x00f1
+#define MSK_DDC_DIN_CNT2_DDC_DIN_CNT_9_8 0x03
+
+/* DDC I2C Status, default value: 0x04 */
+#define REG_DDC_STATUS 0x00f2
+#define BIT_DDC_STATUS_DDC_BUS_LOW BIT(6)
+#define BIT_DDC_STATUS_DDC_NO_ACK BIT(5)
+#define BIT_DDC_STATUS_DDC_I2C_IN_PROG BIT(4)
+#define BIT_DDC_STATUS_DDC_FIFO_FULL BIT(3)
+#define BIT_DDC_STATUS_DDC_FIFO_EMPTY BIT(2)
+#define BIT_DDC_STATUS_DDC_FIFO_READ_IN_SUE BIT(1)
+#define BIT_DDC_STATUS_DDC_FIFO_WRITE_IN_USE BIT(0)
+
+/* DDC I2C Command, default value: 0x70 */
+#define REG_DDC_CMD 0x00f3
+#define BIT_DDC_CMD_HDCP_DDC_EN BIT(6)
+#define BIT_DDC_CMD_SDA_DEL_EN BIT(5)
+#define BIT_DDC_CMD_DDC_FLT_EN BIT(4)
+
+#define MSK_DDC_CMD_DDC_CMD 0x0f
+#define VAL_DDC_CMD_ENH_DDC_READ_NO_ACK 0x04
+#define VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO 0x09
+#define VAL_DDC_CMD_DDC_CMD_ABORT 0x0f
+
+/* DDC I2C FIFO Data In/Out, default value: 0x00 */
+#define REG_DDC_DATA 0x00f4
+
+/* DDC I2C Data Out Counter, default value: 0x00 */
+#define REG_DDC_DOUT_CNT 0x00f5
+#define BIT_DDC_DOUT_CNT_DDC_DELAY_CNT_8 BIT(7)
+#define MSK_DDC_DOUT_CNT_DDC_DATA_OUT_CNT 0x1f
+
+/* DDC I2C Delay Count, default value: 0x14 */
+#define REG_DDC_DELAY_CNT 0x00f6
+
+/* Test Control, default value: 0x80 */
+#define REG_TEST_TXCTRL 0x00f7
+#define BIT_TEST_TXCTRL_RCLK_REF_SEL BIT(7)
+#define BIT_TEST_TXCTRL_PCLK_REF_SEL BIT(6)
+#define MSK_TEST_TXCTRL_BYPASS_PLL_CLK 0x3c
+#define BIT_TEST_TXCTRL_HDMI_MODE BIT(1)
+#define BIT_TEST_TXCTRL_TST_PLLCK BIT(0)
+
+/* CBUS Address, default value: 0x00 */
+#define REG_PAGE_CBUS_ADDR 0x00f8
+
+/* I2C Device Address re-assignment */
+#define REG_PAGE1_ADDR 0x00fc
+#define REG_PAGE2_ADDR 0x00fd
+#define REG_PAGE3_ADDR 0x00fe
+#define REG_HW_TPI_ADDR 0x00ff
+
+/* USBT CTRL0, default value: 0x00 */
+#define REG_UTSRST 0x0100
+#define BIT_UTSRST_FC_SRST BIT(5)
+#define BIT_UTSRST_KEEPER_SRST BIT(4)
+#define BIT_UTSRST_HTX_SRST BIT(3)
+#define BIT_UTSRST_TRX_SRST BIT(2)
+#define BIT_UTSRST_TTX_SRST BIT(1)
+#define BIT_UTSRST_HRX_SRST BIT(0)
+
+/* HSIC RX Control3, default value: 0x07 */
+#define REG_HRXCTRL3 0x0104
+#define MSK_HRXCTRL3_HRX_AFFCTRL 0xf0
+#define BIT_HRXCTRL3_HRX_OUT_EN BIT(2)
+#define BIT_HRXCTRL3_STATUS_EN BIT(1)
+#define BIT_HRXCTRL3_HRX_STAY_RESET BIT(0)
+
+/* HSIC RX INT Registers */
+#define REG_HRXINTL 0x0111
+#define REG_HRXINTH 0x0112
+
+/* TDM TX NUMBITS, default value: 0x0c */
+#define REG_TTXNUMB 0x0116
+#define MSK_TTXNUMB_TTX_AFFCTRL_3_0 0xf0
+#define BIT_TTXNUMB_TTX_COM1_AT_SYNC_WAIT BIT(3)
+#define MSK_TTXNUMB_TTX_NUMBPS_2_0 0x07
+
+/* TDM TX NUMSPISYM, default value: 0x04 */
+#define REG_TTXSPINUMS 0x0117
+
+/* TDM TX NUMHSICSYM, default value: 0x14 */
+#define REG_TTXHSICNUMS 0x0118
+
+/* TDM TX NUMTOTSYM, default value: 0x18 */
+#define REG_TTXTOTNUMS 0x0119
+
+/* TDM TX INT Low, default value: 0x00 */
+#define REG_TTXINTL 0x0136
+#define BIT_TTXINTL_TTX_INTR7 BIT(7)
+#define BIT_TTXINTL_TTX_INTR6 BIT(6)
+#define BIT_TTXINTL_TTX_INTR5 BIT(5)
+#define BIT_TTXINTL_TTX_INTR4 BIT(4)
+#define BIT_TTXINTL_TTX_INTR3 BIT(3)
+#define BIT_TTXINTL_TTX_INTR2 BIT(2)
+#define BIT_TTXINTL_TTX_INTR1 BIT(1)
+#define BIT_TTXINTL_TTX_INTR0 BIT(0)
+
+/* TDM TX INT High, default value: 0x00 */
+#define REG_TTXINTH 0x0137
+#define BIT_TTXINTH_TTX_INTR15 BIT(7)
+#define BIT_TTXINTH_TTX_INTR14 BIT(6)
+#define BIT_TTXINTH_TTX_INTR13 BIT(5)
+#define BIT_TTXINTH_TTX_INTR12 BIT(4)
+#define BIT_TTXINTH_TTX_INTR11 BIT(3)
+#define BIT_TTXINTH_TTX_INTR10 BIT(2)
+#define BIT_TTXINTH_TTX_INTR9 BIT(1)
+#define BIT_TTXINTH_TTX_INTR8 BIT(0)
+
+/* TDM RX Control, default value: 0x1c */
+#define REG_TRXCTRL 0x013b
+#define BIT_TRXCTRL_TRX_CLR_WVALLOW BIT(4)
+#define BIT_TRXCTRL_TRX_FROM_SE_COC BIT(3)
+#define MSK_TRXCTRL_TRX_NUMBPS_2_0 0x07
+
+/* TDM RX NUMSPISYM, default value: 0x04 */
+#define REG_TRXSPINUMS 0x013c
+
+/* TDM RX NUMHSICSYM, default value: 0x14 */
+#define REG_TRXHSICNUMS 0x013d
+
+/* TDM RX NUMTOTSYM, default value: 0x18 */
+#define REG_TRXTOTNUMS 0x013e
+
+/* TDM RX Status 2nd, default value: 0x00 */
+#define REG_TRXSTA2 0x015c
+
+/* TDM RX INT Low, default value: 0x00 */
+#define REG_TRXINTL 0x0163
+
+/* TDM RX INT High, default value: 0x00 */
+#define REG_TRXINTH 0x0164
+
+/* TDM RX INTMASK High, default value: 0x00 */
+#define REG_TRXINTMH 0x0166
+
+/* HSIC TX CRTL, default value: 0x00 */
+#define REG_HTXCTRL 0x0169
+#define BIT_HTXCTRL_HTX_ALLSBE_SOP BIT(4)
+#define BIT_HTXCTRL_HTX_RGDINV_USB BIT(3)
+#define BIT_HTXCTRL_HTX_RSPTDM_BUSY BIT(2)
+#define BIT_HTXCTRL_HTX_DRVCONN1 BIT(1)
+#define BIT_HTXCTRL_HTX_DRVRST1 BIT(0)
+
+/* HSIC TX INT Low, default value: 0x00 */
+#define REG_HTXINTL 0x017d
+
+/* HSIC TX INT High, default value: 0x00 */
+#define REG_HTXINTH 0x017e
+
+/* HSIC Keeper, default value: 0x00 */
+#define REG_KEEPER 0x0181
+#define MSK_KEEPER_KEEPER_MODE_1_0 0x03
+
+/* HSIC Flow Control General, default value: 0x02 */
+#define REG_FCGC 0x0183
+#define BIT_FCGC_HSIC_FC_HOSTMODE BIT(1)
+#define BIT_FCGC_HSIC_FC_ENABLE BIT(0)
+
+/* HSIC Flow Control CTR13, default value: 0xfc */
+#define REG_FCCTR13 0x0191
+
+/* HSIC Flow Control CTR14, default value: 0xff */
+#define REG_FCCTR14 0x0192
+
+/* HSIC Flow Control CTR15, default value: 0xff */
+#define REG_FCCTR15 0x0193
+
+/* HSIC Flow Control CTR50, default value: 0x03 */
+#define REG_FCCTR50 0x01b6
+
+/* HSIC Flow Control INTR0, default value: 0x00 */
+#define REG_FCINTR0 0x01ec
+#define REG_FCINTR1 0x01ed
+#define REG_FCINTR2 0x01ee
+#define REG_FCINTR3 0x01ef
+#define REG_FCINTR4 0x01f0
+#define REG_FCINTR5 0x01f1
+#define REG_FCINTR6 0x01f2
+#define REG_FCINTR7 0x01f3
+
+/* TDM Low Latency, default value: 0x20 */
+#define REG_TDMLLCTL 0x01fc
+#define MSK_TDMLLCTL_TRX_LL_SEL_MANUAL 0xc0
+#define MSK_TDMLLCTL_TRX_LL_SEL_MODE 0x30
+#define MSK_TDMLLCTL_TTX_LL_SEL_MANUAL 0x0c
+#define BIT_TDMLLCTL_TTX_LL_TIE_LOW BIT(1)
+#define BIT_TDMLLCTL_TTX_LL_SEL_MODE BIT(0)
+
+/* TMDS 0 Clock Control, default value: 0x10 */
+#define REG_TMDS0_CCTRL1 0x0210
+#define MSK_TMDS0_CCTRL1_TEST_SEL 0xc0
+#define MSK_TMDS0_CCTRL1_CLK1X_CTL 0x30
+
+/* TMDS Clock Enable, default value: 0x00 */
+#define REG_TMDS_CLK_EN 0x0211
+#define BIT_TMDS_CLK_EN_CLK_EN BIT(0)
+
+/* TMDS Channel Enable, default value: 0x00 */
+#define REG_TMDS_CH_EN 0x0212
+#define BIT_TMDS_CH_EN_CH0_EN BIT(4)
+#define BIT_TMDS_CH_EN_CH12_EN BIT(0)
+
+/* BGR_BIAS, default value: 0x07 */
+#define REG_BGR_BIAS 0x0215
+#define BIT_BGR_BIAS_BGR_EN BIT(7)
+#define MSK_BGR_BIAS_BIAS_BGR_D 0x0f
+
+/* TMDS 0 Digital I2C BW, default value: 0x0a */
+#define REG_ALICE0_BW_I2C 0x0231
+
+/* TMDS 0 Digital Zone Control, default value: 0xe0 */
+#define REG_ALICE0_ZONE_CTRL 0x024c
+#define BIT_ALICE0_ZONE_CTRL_ICRST_N BIT(7)
+#define BIT_ALICE0_ZONE_CTRL_USE_INT_DIV20 BIT(6)
+#define MSK_ALICE0_ZONE_CTRL_SZONE_I2C 0x30
+#define MSK_ALICE0_ZONE_CTRL_ZONE_CTRL 0x0f
+
+/* TMDS 0 Digital PLL Mode Control, default value: 0x00 */
+#define REG_ALICE0_MODE_CTRL 0x024d
+#define MSK_ALICE0_MODE_CTRL_PLL_MODE_I2C 0x0c
+#define MSK_ALICE0_MODE_CTRL_DIV20_CTRL 0x03
+
+/* MHL Tx Control 6th, default value: 0xa0 */
+#define REG_MHLTX_CTL6 0x0285
+#define MSK_MHLTX_CTL6_EMI_SEL 0xe0
+#define MSK_MHLTX_CTL6_TX_CLK_SHAPE_9_8 0x03
+
+/* Packet Filter0, default value: 0x00 */
+#define REG_PKT_FILTER_0 0x0290
+#define BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT BIT(7)
+#define BIT_PKT_FILTER_0_DROP_CEA_CP_PKT BIT(6)
+#define BIT_PKT_FILTER_0_DROP_MPEG_PKT BIT(5)
+#define BIT_PKT_FILTER_0_DROP_SPIF_PKT BIT(4)
+#define BIT_PKT_FILTER_0_DROP_AIF_PKT BIT(3)
+#define BIT_PKT_FILTER_0_DROP_AVI_PKT BIT(2)
+#define BIT_PKT_FILTER_0_DROP_CTS_PKT BIT(1)
+#define BIT_PKT_FILTER_0_DROP_GCP_PKT BIT(0)
+
+/* Packet Filter1, default value: 0x00 */
+#define REG_PKT_FILTER_1 0x0291
+#define BIT_PKT_FILTER_1_VSI_OVERRIDE_DIS BIT(7)
+#define BIT_PKT_FILTER_1_AVI_OVERRIDE_DIS BIT(6)
+#define BIT_PKT_FILTER_1_DROP_AUDIO_PKT BIT(3)
+#define BIT_PKT_FILTER_1_DROP_GEN2_PKT BIT(2)
+#define BIT_PKT_FILTER_1_DROP_GEN_PKT BIT(1)
+#define BIT_PKT_FILTER_1_DROP_VSIF_PKT BIT(0)
+
+/* TMDS Clock Status, default value: 0x10 */
+#define REG_TMDS_CSTAT_P3 0x02a0
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_CLR_MUTE BIT(7)
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_SET_MUTE BIT(6)
+#define BIT_TMDS_CSTAT_P3_RX_HDMI_CP_NEW_CP BIT(5)
+#define BIT_TMDS_CSTAT_P3_CLR_AVI BIT(3)
+#define BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS BIT(2)
+#define BIT_TMDS_CSTAT_P3_SCDT BIT(1)
+#define BIT_TMDS_CSTAT_P3_CKDT BIT(0)
+
+/* RX_HDMI Control, default value: 0x10 */
+#define REG_RX_HDMI_CTRL0 0x02a1
+#define BIT_RX_HDMI_CTRL0_BYP_DVIFILT_SYNC BIT(5)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_EN_ITSELF_CLR BIT(4)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_SW_VALUE BIT(3)
+#define BIT_RX_HDMI_CTRL0_HDMI_MODE_OVERWRITE BIT(2)
+#define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE_EN BIT(1)
+#define BIT_RX_HDMI_CTRL0_RX_HDMI_HDMI_MODE BIT(0)
+
+/* RX_HDMI Control, default value: 0x38 */
+#define REG_RX_HDMI_CTRL2 0x02a3
+#define MSK_RX_HDMI_CTRL2_IDLE_CNT 0xf0
+#define VAL_RX_HDMI_CTRL2_IDLE_CNT(n) ((n) << 4)
+#define BIT_RX_HDMI_CTRL2_USE_AV_MUTE BIT(3)
+#define BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI BIT(0)
+
+/* RX_HDMI Control, default value: 0x0f */
+#define REG_RX_HDMI_CTRL3 0x02a4
+#define MSK_RX_HDMI_CTRL3_PP_MODE_CLK_EN 0x0f
+
+/* rx_hdmi Clear Buffer, default value: 0x00 */
+#define REG_RX_HDMI_CLR_BUFFER 0x02ac
+#define MSK_RX_HDMI_CLR_BUFFER_AIF4VSI_CMP 0xc0
+#define BIT_RX_HDMI_CLR_BUFFER_USE_AIF4VSI BIT(5)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_W_AVI BIT(4)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_IEEE_ID_CHK_EN BIT(3)
+#define BIT_RX_HDMI_CLR_BUFFER_SWAP_VSI_IEEE_ID BIT(2)
+#define BIT_RX_HDMI_CLR_BUFFER_AIF_CLR_EN BIT(1)
+#define BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN BIT(0)
+
+/* RX_HDMI VSI Header1, default value: 0x00 */
+#define REG_RX_HDMI_MON_PKT_HEADER1 0x02b8
+
+/* RX_HDMI VSI MHL Monitor, default value: 0x3c */
+#define REG_RX_HDMI_VSIF_MHL_MON 0x02d7
+
+#define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_3D_FORMAT 0x3c
+#define MSK_RX_HDMI_VSIF_MHL_MON_RX_HDMI_MHL_VID_FORMAT 0x03
+
+/* Interrupt Source 9, default value: 0x00 */
+#define REG_INTR9 0x02e0
+#define BIT_INTR9_EDID_ERROR BIT(6)
+#define BIT_INTR9_EDID_DONE BIT(5)
+#define BIT_INTR9_DEVCAP_DONE BIT(4)
+
+/* Interrupt 9 Mask, default value: 0x00 */
+#define REG_INTR9_MASK 0x02e1
+
+/* TPI CBUS Start, default value: 0x00 */
+#define REG_TPI_CBUS_START 0x02e2
+#define BIT_TPI_CBUS_START_RCP_REQ_START BIT(7)
+#define BIT_TPI_CBUS_START_RCPK_REPLY_START BIT(6)
+#define BIT_TPI_CBUS_START_RCPE_REPLY_START BIT(5)
+#define BIT_TPI_CBUS_START_PUT_LINK_MODE_START BIT(4)
+#define BIT_TPI_CBUS_START_PUT_DCAPCHG_START BIT(3)
+#define BIT_TPI_CBUS_START_PUT_DCAPRDY_START BIT(2)
+#define BIT_TPI_CBUS_START_GET_EDID_START_0 BIT(1)
+#define BIT_TPI_CBUS_START_GET_DEVCAP_START BIT(0)
+
+/* EDID Control, default value: 0x10 */
+#define REG_EDID_CTRL 0x02e3
+#define BIT_EDID_CTRL_EDID_PRIME_VALID BIT(7)
+#define BIT_EDID_CTRL_XDEVCAP_EN BIT(6)
+#define BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP BIT(5)
+#define BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO BIT(4)
+#define BIT_EDID_CTRL_EDID_FIFO_ACCESS_ALWAYS_EN BIT(3)
+#define BIT_EDID_CTRL_EDID_FIFO_BLOCK_SEL BIT(2)
+#define BIT_EDID_CTRL_INVALID_BKSV BIT(1)
+#define BIT_EDID_CTRL_EDID_MODE_EN BIT(0)
+
+/* EDID FIFO Addr, default value: 0x00 */
+#define REG_EDID_FIFO_ADDR 0x02e9
+
+/* EDID FIFO Write Data, default value: 0x00 */
+#define REG_EDID_FIFO_WR_DATA 0x02ea
+
+/* EDID/DEVCAP FIFO Internal Addr, default value: 0x00 */
+#define REG_EDID_FIFO_ADDR_MON 0x02eb
+
+/* EDID FIFO Read Data, default value: 0x00 */
+#define REG_EDID_FIFO_RD_DATA 0x02ec
+
+/* EDID DDC Segment Pointer, default value: 0x00 */
+#define REG_EDID_START_EXT 0x02ed
+
+/* TX IP BIST CNTL and Status, default value: 0x00 */
+#define REG_TX_IP_BIST_CNTLSTA 0x02f2
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_QUARTER_CLK_SEL BIT(6)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_DONE BIT(5)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_ON BIT(4)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_RUN BIT(3)
+#define BIT_TX_IP_BIST_CNTLSTA_TXCLK_HALF_SEL BIT(2)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_EN BIT(1)
+#define BIT_TX_IP_BIST_CNTLSTA_TXBIST_SEL BIT(0)
+
+/* TX IP BIST INST LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_INST_LOW 0x02f3
+#define REG_TX_IP_BIST_INST_HIGH 0x02f4
+
+/* TX IP BIST PATTERN LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_PAT_LOW 0x02f5
+#define REG_TX_IP_BIST_PAT_HIGH 0x02f6
+
+/* TX IP BIST CONFIGURE LOW, default value: 0x00 */
+#define REG_TX_IP_BIST_CONF_LOW 0x02f7
+#define REG_TX_IP_BIST_CONF_HIGH 0x02f8
+
+/* E-MSC General Control, default value: 0x80 */
+#define REG_GENCTL 0x0300
+#define BIT_GENCTL_SPEC_TRANS_DIS BIT(7)
+#define BIT_GENCTL_DIS_XMIT_ERR_STATE BIT(6)
+#define BIT_GENCTL_SPI_MISO_EDGE BIT(5)
+#define BIT_GENCTL_SPI_MOSI_EDGE BIT(4)
+#define BIT_GENCTL_CLR_EMSC_RFIFO BIT(3)
+#define BIT_GENCTL_CLR_EMSC_XFIFO BIT(2)
+#define BIT_GENCTL_START_TRAIN_SEQ BIT(1)
+#define BIT_GENCTL_EMSC_EN BIT(0)
+
+/* E-MSC Comma ErrorCNT, default value: 0x03 */
+#define REG_COMMECNT 0x0305
+#define BIT_COMMECNT_I2C_TO_EMSC_EN BIT(7)
+#define MSK_COMMECNT_COMMA_CHAR_ERR_CNT 0x0f
+
+/* E-MSC RFIFO ByteCnt, default value: 0x00 */
+#define REG_EMSCRFIFOBCNTL 0x031a
+#define REG_EMSCRFIFOBCNTH 0x031b
+
+/* SPI Burst Cnt Status, default value: 0x00 */
+#define REG_SPIBURSTCNT 0x031e
+
+/* SPI Burst Status and SWRST, default value: 0x00 */
+#define REG_SPIBURSTSTAT 0x0322
+#define BIT_SPIBURSTSTAT_SPI_HDCPRST BIT(7)
+#define BIT_SPIBURSTSTAT_SPI_CBUSRST BIT(6)
+#define BIT_SPIBURSTSTAT_SPI_SRST BIT(5)
+#define BIT_SPIBURSTSTAT_EMSC_NORMAL_MODE BIT(0)
+
+/* E-MSC 1st Interrupt, default value: 0x00 */
+#define REG_EMSCINTR 0x0323
+#define BIT_EMSCINTR_EMSC_XFIFO_EMPTY BIT(7)
+#define BIT_EMSCINTR_EMSC_XMIT_ACK_TOUT BIT(6)
+#define BIT_EMSCINTR_EMSC_RFIFO_READ_ERR BIT(5)
+#define BIT_EMSCINTR_EMSC_XFIFO_WRITE_ERR BIT(4)
+#define BIT_EMSCINTR_EMSC_COMMA_CHAR_ERR BIT(3)
+#define BIT_EMSCINTR_EMSC_XMIT_DONE BIT(2)
+#define BIT_EMSCINTR_EMSC_XMIT_GNT_TOUT BIT(1)
+#define BIT_EMSCINTR_SPI_DVLD BIT(0)
+
+/* E-MSC Interrupt Mask, default value: 0x00 */
+#define REG_EMSCINTRMASK 0x0324
+
+/* I2C E-MSC XMIT FIFO Write Port, default value: 0x00 */
+#define REG_EMSC_XMIT_WRITE_PORT 0x032a
+
+/* I2C E-MSC RCV FIFO Write Port, default value: 0x00 */
+#define REG_EMSC_RCV_READ_PORT 0x032b
+
+/* E-MSC 2nd Interrupt, default value: 0x00 */
+#define REG_EMSCINTR1 0x032c
+#define BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR BIT(0)
+
+/* E-MSC Interrupt Mask, default value: 0x00 */
+#define REG_EMSCINTRMASK1 0x032d
+#define BIT_EMSCINTRMASK1_EMSC_INTRMASK1_0 BIT(0)
+
+/* MHL Top Ctl, default value: 0x00 */
+#define REG_MHL_TOP_CTL 0x0330
+#define BIT_MHL_TOP_CTL_MHL3_DOC_SEL BIT(7)
+#define BIT_MHL_TOP_CTL_MHL_PP_SEL BIT(6)
+#define MSK_MHL_TOP_CTL_IF_TIMING_CTL 0x03
+
+/* MHL DataPath 1st Ctl, default value: 0xbc */
+#define REG_MHL_DP_CTL0 0x0331
+#define BIT_MHL_DP_CTL0_DP_OE BIT(7)
+#define BIT_MHL_DP_CTL0_TX_OE_OVR BIT(6)
+#define MSK_MHL_DP_CTL0_TX_OE 0x3f
+
+/* MHL DataPath 2nd Ctl, default value: 0xbb */
+#define REG_MHL_DP_CTL1 0x0332
+#define MSK_MHL_DP_CTL1_CK_SWING_CTL 0xf0
+#define MSK_MHL_DP_CTL1_DT_SWING_CTL 0x0f
+
+/* MHL DataPath 3rd Ctl, default value: 0x2f */
+#define REG_MHL_DP_CTL2 0x0333
+#define BIT_MHL_DP_CTL2_CLK_BYPASS_EN BIT(7)
+#define MSK_MHL_DP_CTL2_DAMP_TERM_SEL 0x30
+#define MSK_MHL_DP_CTL2_CK_TERM_SEL 0x0c
+#define MSK_MHL_DP_CTL2_DT_TERM_SEL 0x03
+
+/* MHL DataPath 4th Ctl, default value: 0x48 */
+#define REG_MHL_DP_CTL3 0x0334
+#define MSK_MHL_DP_CTL3_DT_DRV_VNBC_CTL 0xf0
+#define MSK_MHL_DP_CTL3_DT_DRV_VNB_CTL 0x0f
+
+/* MHL DataPath 5th Ctl, default value: 0x48 */
+#define REG_MHL_DP_CTL4 0x0335
+#define MSK_MHL_DP_CTL4_CK_DRV_VNBC_CTL 0xf0
+#define MSK_MHL_DP_CTL4_CK_DRV_VNB_CTL 0x0f
+
+/* MHL DataPath 6th Ctl, default value: 0x3f */
+#define REG_MHL_DP_CTL5 0x0336
+#define BIT_MHL_DP_CTL5_RSEN_EN_OVR BIT(7)
+#define BIT_MHL_DP_CTL5_RSEN_EN BIT(6)
+#define MSK_MHL_DP_CTL5_DAMP_TERM_VGS_CTL 0x30
+#define MSK_MHL_DP_CTL5_CK_TERM_VGS_CTL 0x0c
+#define MSK_MHL_DP_CTL5_DT_TERM_VGS_CTL 0x03
+
+/* MHL PLL 1st Ctl, default value: 0x05 */
+#define REG_MHL_PLL_CTL0 0x0337
+#define BIT_MHL_PLL_CTL0_AUD_CLK_EN BIT(7)
+
+#define MSK_MHL_PLL_CTL0_AUD_CLK_RATIO 0x70
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_10 0x70
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_6 0x60
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_4 0x50
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2 0x40
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_5 0x30
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_3 0x20
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_2_PRIME 0x10
+#define VAL_MHL_PLL_CTL0_AUD_CLK_RATIO_5_1 0x00
+
+#define MSK_MHL_PLL_CTL0_HDMI_CLK_RATIO 0x0c
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_4X 0x0c
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_2X 0x08
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X 0x04
+#define VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_HALF_X 0x00
+
+#define BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL BIT(1)
+#define BIT_MHL_PLL_CTL0_ZONE_MASK_OE BIT(0)
+
+/* MHL PLL 3rd Ctl, default value: 0x80 */
+#define REG_MHL_PLL_CTL2 0x0339
+#define BIT_MHL_PLL_CTL2_CLKDETECT_EN BIT(7)
+#define BIT_MHL_PLL_CTL2_MEAS_FVCO BIT(3)
+#define BIT_MHL_PLL_CTL2_PLL_FAST_LOCK BIT(2)
+#define MSK_MHL_PLL_CTL2_PLL_LF_SEL 0x03
+
+/* MHL CBUS 1st Ctl, default value: 0x12 */
+#define REG_MHL_CBUS_CTL0 0x0340
+#define BIT_MHL_CBUS_CTL0_CBUS_RGND_TEST_MODE BIT(7)
+
+#define MSK_MHL_CBUS_CTL0_CBUS_RGND_VTH_CTL 0x30
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734 0x00
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_747 0x10
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_740 0x20
+#define VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_754 0x30
+
+#define MSK_MHL_CBUS_CTL0_CBUS_RES_TEST_SEL 0x0c
+
+#define MSK_MHL_CBUS_CTL0_CBUS_DRV_SEL 0x03
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAKEST 0x00
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_WEAK 0x01
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG 0x02
+#define VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONGEST 0x03
+
+/* MHL CBUS 2nd Ctl, default value: 0x03 */
+#define REG_MHL_CBUS_CTL1 0x0341
+#define MSK_MHL_CBUS_CTL1_CBUS_RGND_RES_CTL 0x07
+#define VAL_MHL_CBUS_CTL1_0888_OHM 0x00
+#define VAL_MHL_CBUS_CTL1_1115_OHM 0x04
+#define VAL_MHL_CBUS_CTL1_1378_OHM 0x07
+
+/* MHL CoC 1st Ctl, default value: 0xc3 */
+#define REG_MHL_COC_CTL0 0x0342
+#define BIT_MHL_COC_CTL0_COC_BIAS_EN BIT(7)
+#define MSK_MHL_COC_CTL0_COC_BIAS_CTL 0x70
+#define MSK_MHL_COC_CTL0_COC_TERM_CTL 0x07
+
+/* MHL CoC 2nd Ctl, default value: 0x87 */
+#define REG_MHL_COC_CTL1 0x0343
+#define BIT_MHL_COC_CTL1_COC_EN BIT(7)
+#define MSK_MHL_COC_CTL1_COC_DRV_CTL 0x3f
+
+/* MHL CoC 4th Ctl, default value: 0x00 */
+#define REG_MHL_COC_CTL3 0x0345
+#define BIT_MHL_COC_CTL3_COC_AECHO_EN BIT(0)
+
+/* MHL CoC 5th Ctl, default value: 0x28 */
+#define REG_MHL_COC_CTL4 0x0346
+#define MSK_MHL_COC_CTL4_COC_IF_CTL 0xf0
+#define MSK_MHL_COC_CTL4_COC_SLEW_CTL 0x0f
+
+/* MHL CoC 6th Ctl, default value: 0x0d */
+#define REG_MHL_COC_CTL5 0x0347
+
+/* MHL DoC 1st Ctl, default value: 0x18 */
+#define REG_MHL_DOC_CTL0 0x0349
+#define BIT_MHL_DOC_CTL0_DOC_RXDATA_EN BIT(7)
+#define MSK_MHL_DOC_CTL0_DOC_DM_TERM 0x38
+#define MSK_MHL_DOC_CTL0_DOC_OPMODE 0x06
+#define BIT_MHL_DOC_CTL0_DOC_RXBIAS_EN BIT(0)
+
+/* MHL DataPath 7th Ctl, default value: 0x2a */
+#define REG_MHL_DP_CTL6 0x0350
+#define BIT_MHL_DP_CTL6_DP_TAP2_SGN BIT(5)
+#define BIT_MHL_DP_CTL6_DP_TAP2_EN BIT(4)
+#define BIT_MHL_DP_CTL6_DP_TAP1_SGN BIT(3)
+#define BIT_MHL_DP_CTL6_DP_TAP1_EN BIT(2)
+#define BIT_MHL_DP_CTL6_DT_PREDRV_FEEDCAP_EN BIT(1)
+#define BIT_MHL_DP_CTL6_DP_PRE_POST_SEL BIT(0)
+
+/* MHL DataPath 8th Ctl, default value: 0x06 */
+#define REG_MHL_DP_CTL7 0x0351
+#define MSK_MHL_DP_CTL7_DT_DRV_VBIAS_CASCTL 0xf0
+#define MSK_MHL_DP_CTL7_DT_DRV_IREF_CTL 0x0f
+
+/* Tx Zone Ctl1, default value: 0x00 */
+#define REG_TX_ZONE_CTL1 0x0361
+#define VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE 0x08
+
+/* MHL3 Tx Zone Ctl, default value: 0x00 */
+#define REG_MHL3_TX_ZONE_CTL 0x0364
+#define BIT_MHL3_TX_ZONE_CTL_MHL2_INTPLT_ZONE_MANU_EN BIT(7)
+#define MSK_MHL3_TX_ZONE_CTL_MHL3_TX_ZONE 0x03
+
+#define MSK_TX_ZONE_CTL3_TX_ZONE 0x03
+#define VAL_TX_ZONE_CTL3_TX_ZONE_6GBPS 0x00
+#define VAL_TX_ZONE_CTL3_TX_ZONE_3GBPS 0x01
+#define VAL_TX_ZONE_CTL3_TX_ZONE_1_5GBPS 0x02
+
+/* HDCP Polling Control and Status, default value: 0x70 */
+#define REG_HDCP2X_POLL_CS 0x0391
+
+#define BIT_HDCP2X_POLL_CS_HDCP2X_MSG_SZ_CLR_OPTION BIT(6)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_RPT_READY_CLR_OPTION BIT(5)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_REAUTH_REQ_CLR_OPTION BIT(4)
+#define MSK_HDCP2X_POLL_CS_ 0x0c
+#define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_GNT BIT(1)
+#define BIT_HDCP2X_POLL_CS_HDCP2X_DIS_POLL_EN BIT(0)
+
+/* HDCP Interrupt 0, default value: 0x00 */
+#define REG_HDCP2X_INTR0 0x0398
+
+/* HDCP Interrupt 0 Mask, default value: 0x00 */
+#define REG_HDCP2X_INTR0_MASK 0x0399
+
+/* HDCP General Control 0, default value: 0x02 */
+#define REG_HDCP2X_CTRL_0 0x03a0
+#define BIT_HDCP2X_CTRL_0_HDCP2X_ENCRYPT_EN BIT(7)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_SEL BIT(6)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_POLINT_OVR BIT(5)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_PRECOMPUTE BIT(4)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_HDMIMODE BIT(3)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_REPEATER BIT(2)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX BIT(1)
+#define BIT_HDCP2X_CTRL_0_HDCP2X_EN BIT(0)
+
+/* HDCP General Control 1, default value: 0x08 */
+#define REG_HDCP2X_CTRL_1 0x03a1
+#define MSK_HDCP2X_CTRL_1_HDCP2X_REAUTH_MSK_3_0 0xf0
+#define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_SW BIT(3)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_HPD_OVR BIT(2)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_CTL3MSK BIT(1)
+#define BIT_HDCP2X_CTRL_1_HDCP2X_REAUTH_SW BIT(0)
+
+/* HDCP Misc Control, default value: 0x00 */
+#define REG_HDCP2X_MISC_CTRL 0x03a5
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_XFER_START BIT(4)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR_START BIT(3)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_SMNG_WR BIT(2)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD_START BIT(1)
+#define BIT_HDCP2X_MISC_CTRL_HDCP2X_RPT_RCVID_RD BIT(0)
+
+/* HDCP RPT SMNG K, default value: 0x00 */
+#define REG_HDCP2X_RPT_SMNG_K 0x03a6
+
+/* HDCP RPT SMNG In, default value: 0x00 */
+#define REG_HDCP2X_RPT_SMNG_IN 0x03a7
+
+/* HDCP Auth Status, default value: 0x00 */
+#define REG_HDCP2X_AUTH_STAT 0x03aa
+
+/* HDCP RPT RCVID Out, default value: 0x00 */
+#define REG_HDCP2X_RPT_RCVID_OUT 0x03ac
+
+/* HDCP TP1, default value: 0x62 */
+#define REG_HDCP2X_TP1 0x03b4
+
+/* HDCP GP Out 0, default value: 0x00 */
+#define REG_HDCP2X_GP_OUT0 0x03c7
+
+/* HDCP Repeater RCVR ID 0, default value: 0x00 */
+#define REG_HDCP2X_RPT_RCVR_ID0 0x03d1
+
+/* HDCP DDCM Status, default value: 0x00 */
+#define REG_HDCP2X_DDCM_STS 0x03d8
+#define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_ERR_STS_3_0 0xf0
+#define MSK_HDCP2X_DDCM_STS_HDCP2X_DDCM_CTL_CS_3_0 0x0f
+
+/* HDMI2MHL3 Control, default value: 0x0a */
+#define REG_M3_CTRL 0x03e0
+#define BIT_M3_CTRL_H2M_SWRST BIT(4)
+#define BIT_M3_CTRL_SW_MHL3_SEL BIT(3)
+#define BIT_M3_CTRL_M3AV_EN BIT(2)
+#define BIT_M3_CTRL_ENC_TMDS BIT(1)
+#define BIT_M3_CTRL_MHL3_MASTER_EN BIT(0)
+
+#define VAL_M3_CTRL_MHL1_2_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \
+ | BIT_M3_CTRL_ENC_TMDS)
+#define VAL_M3_CTRL_MHL3_VALUE (BIT_M3_CTRL_SW_MHL3_SEL \
+ | BIT_M3_CTRL_M3AV_EN \
+ | BIT_M3_CTRL_ENC_TMDS \
+ | BIT_M3_CTRL_MHL3_MASTER_EN)
+
+/* HDMI2MHL3 Port0 Control, default value: 0x04 */
+#define REG_M3_P0CTRL 0x03e1
+#define BIT_M3_P0CTRL_MHL3_P0_HDCP_ENC_EN BIT(4)
+#define BIT_M3_P0CTRL_MHL3_P0_UNLIMIT_EN BIT(3)
+#define BIT_M3_P0CTRL_MHL3_P0_HDCP_EN BIT(2)
+#define BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED BIT(1)
+#define BIT_M3_P0CTRL_MHL3_P0_PORT_EN BIT(0)
+
+#define REG_M3_POSTM 0x03e2
+#define MSK_M3_POSTM_RRP_DECODE 0xf8
+#define MSK_M3_POSTM_MHL3_P0_STM_ID 0x07
+
+/* HDMI2MHL3 Scramble Control, default value: 0x41 */
+#define REG_M3_SCTRL 0x03e6
+#define MSK_M3_SCTRL_MHL3_SR_LENGTH 0xf0
+#define BIT_M3_SCTRL_MHL3_SCRAMBLER_EN BIT(0)
+
+/* HSIC Div Ctl, default value: 0x05 */
+#define REG_DIV_CTL_MAIN 0x03f2
+#define MSK_DIV_CTL_MAIN_PRE_DIV_CTL_MAIN 0x1c
+#define MSK_DIV_CTL_MAIN_FB_DIV_CTL_MAIN 0x03
+
+/* MHL Capability 1st Byte, default value: 0x00 */
+#define REG_MHL_DEVCAP_0 0x0400
+
+/* MHL Interrupt 1st Byte, default value: 0x00 */
+#define REG_MHL_INT_0 0x0420
+
+/* Device Status 1st byte, default value: 0x00 */
+#define REG_MHL_STAT_0 0x0430
+
+/* CBUS Scratch Pad 1st Byte, default value: 0x00 */
+#define REG_MHL_SCRPAD_0 0x0440
+
+/* MHL Extended Capability 1st Byte, default value: 0x00 */
+#define REG_MHL_EXTDEVCAP_0 0x0480
+
+/* Device Extended Status 1st byte, default value: 0x00 */
+#define REG_MHL_EXTSTAT_0 0x0490
+
+/* TPI DTD Byte2, default value: 0x00 */
+#define REG_TPI_DTD_B2 0x0602
+
+#define VAL_TPI_QUAN_RANGE_LIMITED 0x01
+#define VAL_TPI_QUAN_RANGE_FULL 0x02
+#define VAL_TPI_FORMAT_RGB 0x00
+#define VAL_TPI_FORMAT_YCBCR444 0x01
+#define VAL_TPI_FORMAT_YCBCR422 0x02
+#define VAL_TPI_FORMAT_INTERNAL_RGB 0x03
+#define VAL_TPI_FORMAT(_fmt, _qr) \
+ (VAL_TPI_FORMAT_##_fmt | (VAL_TPI_QUAN_RANGE_##_qr << 2))
+
+/* Input Format, default value: 0x00 */
+#define REG_TPI_INPUT 0x0609
+#define BIT_TPI_INPUT_EXTENDEDBITMODE BIT(7)
+#define BIT_TPI_INPUT_ENDITHER BIT(6)
+#define MSK_TPI_INPUT_INPUT_QUAN_RANGE 0x0c
+#define MSK_TPI_INPUT_INPUT_FORMAT 0x03
+
+/* Output Format, default value: 0x00 */
+#define REG_TPI_OUTPUT 0x060a
+#define BIT_TPI_OUTPUT_CSCMODE709 BIT(4)
+#define MSK_TPI_OUTPUT_OUTPUT_QUAN_RANGE 0x0c
+#define MSK_TPI_OUTPUT_OUTPUT_FORMAT 0x03
+
+/* TPI AVI Check Sum, default value: 0x00 */
+#define REG_TPI_AVI_CHSUM 0x060c
+
+/* TPI System Control, default value: 0x00 */
+#define REG_TPI_SC 0x061a
+#define BIT_TPI_SC_TPI_UPDATE_FLG BIT(7)
+#define BIT_TPI_SC_TPI_REAUTH_CTL BIT(6)
+#define BIT_TPI_SC_TPI_OUTPUT_MODE_1 BIT(5)
+#define BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN BIT(4)
+#define BIT_TPI_SC_TPI_AV_MUTE BIT(3)
+#define BIT_TPI_SC_DDC_GPU_REQUEST BIT(2)
+#define BIT_TPI_SC_DDC_TPI_SW BIT(1)
+#define BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI BIT(0)
+
+/* TPI COPP Query Data, default value: 0x00 */
+#define REG_TPI_COPP_DATA1 0x0629
+#define BIT_TPI_COPP_DATA1_COPP_GPROT BIT(7)
+#define BIT_TPI_COPP_DATA1_COPP_LPROT BIT(6)
+#define MSK_TPI_COPP_DATA1_COPP_LINK_STATUS 0x30
+#define VAL_TPI_COPP_LINK_STATUS_NORMAL 0x00
+#define VAL_TPI_COPP_LINK_STATUS_LINK_LOST 0x10
+#define VAL_TPI_COPP_LINK_STATUS_RENEGOTIATION_REQ 0x20
+#define VAL_TPI_COPP_LINK_STATUS_LINK_SUSPENDED 0x30
+#define BIT_TPI_COPP_DATA1_COPP_HDCP_REP BIT(3)
+#define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_0 BIT(2)
+#define BIT_TPI_COPP_DATA1_COPP_PROTYPE BIT(1)
+#define BIT_TPI_COPP_DATA1_COPP_CONNTYPE_1 BIT(0)
+
+/* TPI COPP Control Data, default value: 0x00 */
+#define REG_TPI_COPP_DATA2 0x062a
+#define BIT_TPI_COPP_DATA2_INTR_ENCRYPTION BIT(5)
+#define BIT_TPI_COPP_DATA2_KSV_FORWARD BIT(4)
+#define BIT_TPI_COPP_DATA2_INTERM_RI_CHECK_EN BIT(3)
+#define BIT_TPI_COPP_DATA2_DOUBLE_RI_CHECK BIT(2)
+#define BIT_TPI_COPP_DATA2_DDC_SHORT_RI_RD BIT(1)
+#define BIT_TPI_COPP_DATA2_COPP_PROTLEVEL BIT(0)
+
+/* TPI Interrupt Enable, default value: 0x00 */
+#define REG_TPI_INTR_EN 0x063c
+
+/* TPI Interrupt Status Low Byte, default value: 0x00 */
+#define REG_TPI_INTR_ST0 0x063d
+#define BIT_TPI_INTR_ST0_TPI_AUTH_CHNGE_STAT BIT(7)
+#define BIT_TPI_INTR_ST0_TPI_V_RDY_STAT BIT(6)
+#define BIT_TPI_INTR_ST0_TPI_COPP_CHNGE_STAT BIT(5)
+#define BIT_TPI_INTR_ST0_KSV_FIFO_FIRST_STAT BIT(3)
+#define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_DONE_STAT BIT(2)
+#define BIT_TPI_INTR_ST0_READ_BKSV_BCAPS_ERR_STAT BIT(1)
+#define BIT_TPI_INTR_ST0_READ_BKSV_ERR_STAT BIT(0)
+
+/* TPI DS BCAPS Status, default value: 0x00 */
+#define REG_TPI_DS_BCAPS 0x0644
+
+/* TPI BStatus1, default value: 0x00 */
+#define REG_TPI_BSTATUS1 0x0645
+#define BIT_TPI_BSTATUS1_DS_DEV_EXCEED BIT(7)
+#define MSK_TPI_BSTATUS1_DS_DEV_CNT 0x7f
+
+/* TPI BStatus2, default value: 0x10 */
+#define REG_TPI_BSTATUS2 0x0646
+#define MSK_TPI_BSTATUS2_DS_BSTATUS 0xe0
+#define BIT_TPI_BSTATUS2_DS_HDMI_MODE BIT(4)
+#define BIT_TPI_BSTATUS2_DS_CASC_EXCEED BIT(3)
+#define MSK_TPI_BSTATUS2_DS_DEPTH 0x07
+
+/* TPI HW Optimization Control #3, default value: 0x00 */
+#define REG_TPI_HW_OPT3 0x06bb
+#define BIT_TPI_HW_OPT3_DDC_DEBUG BIT(7)
+#define BIT_TPI_HW_OPT3_RI_CHECK_SKIP BIT(3)
+#define BIT_TPI_HW_OPT3_TPI_DDC_BURST_MODE BIT(2)
+#define MSK_TPI_HW_OPT3_TPI_DDC_REQ_LEVEL 0x03
+
+/* TPI Info Frame Select, default value: 0x00 */
+#define REG_TPI_INFO_FSEL 0x06bf
+#define BIT_TPI_INFO_FSEL_TPI_INFO_EN BIT(7)
+#define BIT_TPI_INFO_FSEL_TPI_INFO_RPT BIT(6)
+#define BIT_TPI_INFO_FSEL_TPI_INFO_READ_FLAG BIT(5)
+#define MSK_TPI_INFO_FSEL_TPI_INFO_SEL 0x07
+
+/* TPI Info Byte #0, default value: 0x00 */
+#define REG_TPI_INFO_B0 0x06c0
+
+/* CoC Status, default value: 0x00 */
+#define REG_COC_STAT_0 0x0700
+#define REG_COC_STAT_1 0x0701
+#define REG_COC_STAT_2 0x0702
+#define REG_COC_STAT_3 0x0703
+#define REG_COC_STAT_4 0x0704
+#define REG_COC_STAT_5 0x0705
+
+/* CoC 1st Ctl, default value: 0x40 */
+#define REG_COC_CTL0 0x0710
+
+/* CoC 2nd Ctl, default value: 0x0a */
+#define REG_COC_CTL1 0x0711
+#define MSK_COC_CTL1_COC_CTRL1_7_6 0xc0
+#define MSK_COC_CTL1_COC_CTRL1_5_0 0x3f
+
+/* CoC 3rd Ctl, default value: 0x14 */
+#define REG_COC_CTL2 0x0712
+#define MSK_COC_CTL2_COC_CTRL2_7_6 0xc0
+#define MSK_COC_CTL2_COC_CTRL2_5_0 0x3f
+
+/* CoC 4th Ctl, default value: 0x40 */
+#define REG_COC_CTL3 0x0713
+#define BIT_COC_CTL3_COC_CTRL3_7 BIT(7)
+#define MSK_COC_CTL3_COC_CTRL3_6_0 0x7f
+
+/* CoC 7th Ctl, default value: 0x00 */
+#define REG_COC_CTL6 0x0716
+#define BIT_COC_CTL6_COC_CTRL6_7 BIT(7)
+#define BIT_COC_CTL6_COC_CTRL6_6 BIT(6)
+#define MSK_COC_CTL6_COC_CTRL6_5_0 0x3f
+
+/* CoC 8th Ctl, default value: 0x06 */
+#define REG_COC_CTL7 0x0717
+#define BIT_COC_CTL7_COC_CTRL7_7 BIT(7)
+#define BIT_COC_CTL7_COC_CTRL7_6 BIT(6)
+#define BIT_COC_CTL7_COC_CTRL7_5 BIT(5)
+#define MSK_COC_CTL7_COC_CTRL7_4_3 0x18
+#define MSK_COC_CTL7_COC_CTRL7_2_0 0x07
+
+/* CoC 10th Ctl, default value: 0x00 */
+#define REG_COC_CTL9 0x0719
+
+/* CoC 11th Ctl, default value: 0x00 */
+#define REG_COC_CTLA 0x071a
+
+/* CoC 12th Ctl, default value: 0x00 */
+#define REG_COC_CTLB 0x071b
+
+/* CoC 13th Ctl, default value: 0x0f */
+#define REG_COC_CTLC 0x071c
+
+/* CoC 14th Ctl, default value: 0x0a */
+#define REG_COC_CTLD 0x071d
+#define BIT_COC_CTLD_COC_CTRLD_7 BIT(7)
+#define MSK_COC_CTLD_COC_CTRLD_6_0 0x7f
+
+/* CoC 15th Ctl, default value: 0x0a */
+#define REG_COC_CTLE 0x071e
+#define BIT_COC_CTLE_COC_CTRLE_7 BIT(7)
+#define MSK_COC_CTLE_COC_CTRLE_6_0 0x7f
+
+/* CoC 16th Ctl, default value: 0x00 */
+#define REG_COC_CTLF 0x071f
+#define MSK_COC_CTLF_COC_CTRLF_7_3 0xf8
+#define MSK_COC_CTLF_COC_CTRLF_2_0 0x07
+
+/* CoC 18th Ctl, default value: 0x32 */
+#define REG_COC_CTL11 0x0721
+#define MSK_COC_CTL11_COC_CTRL11_7_4 0xf0
+#define MSK_COC_CTL11_COC_CTRL11_3_0 0x0f
+
+/* CoC 21st Ctl, default value: 0x00 */
+#define REG_COC_CTL14 0x0724
+#define MSK_COC_CTL14_COC_CTRL14_7_4 0xf0
+#define MSK_COC_CTL14_COC_CTRL14_3_0 0x0f
+
+/* CoC 22nd Ctl, default value: 0x00 */
+#define REG_COC_CTL15 0x0725
+#define BIT_COC_CTL15_COC_CTRL15_7 BIT(7)
+#define MSK_COC_CTL15_COC_CTRL15_6_4 0x70
+#define MSK_COC_CTL15_COC_CTRL15_3_0 0x0f
+
+/* CoC Interrupt, default value: 0x00 */
+#define REG_COC_INTR 0x0726
+
+/* CoC Interrupt Mask, default value: 0x00 */
+#define REG_COC_INTR_MASK 0x0727
+#define BIT_COC_PLL_LOCK_STATUS_CHANGE BIT(0)
+#define BIT_COC_CALIBRATION_DONE BIT(1)
+
+/* CoC Misc Ctl, default value: 0x00 */
+#define REG_COC_MISC_CTL0 0x0728
+#define BIT_COC_MISC_CTL0_FSM_MON BIT(7)
+
+/* CoC 24th Ctl, default value: 0x00 */
+#define REG_COC_CTL17 0x072a
+#define MSK_COC_CTL17_COC_CTRL17_7_4 0xf0
+#define MSK_COC_CTL17_COC_CTRL17_3_0 0x0f
+
+/* CoC 25th Ctl, default value: 0x00 */
+#define REG_COC_CTL18 0x072b
+#define MSK_COC_CTL18_COC_CTRL18_7_4 0xf0
+#define MSK_COC_CTL18_COC_CTRL18_3_0 0x0f
+
+/* CoC 26th Ctl, default value: 0x00 */
+#define REG_COC_CTL19 0x072c
+#define MSK_COC_CTL19_COC_CTRL19_7_4 0xf0
+#define MSK_COC_CTL19_COC_CTRL19_3_0 0x0f
+
+/* CoC 27th Ctl, default value: 0x00 */
+#define REG_COC_CTL1A 0x072d
+#define MSK_COC_CTL1A_COC_CTRL1A_7_2 0xfc
+#define MSK_COC_CTL1A_COC_CTRL1A_1_0 0x03
+
+/* DoC 9th Status, default value: 0x00 */
+#define REG_DOC_STAT_8 0x0740
+
+/* DoC 10th Status, default value: 0x00 */
+#define REG_DOC_STAT_9 0x0741
+
+/* DoC 5th CFG, default value: 0x00 */
+#define REG_DOC_CFG4 0x074e
+#define MSK_DOC_CFG4_DBG_STATE_DOC_FSM 0x0f
+
+/* DoC 1st Ctl, default value: 0x40 */
+#define REG_DOC_CTL0 0x0751
+
+/* DoC 7th Ctl, default value: 0x00 */
+#define REG_DOC_CTL6 0x0757
+#define BIT_DOC_CTL6_DOC_CTRL6_7 BIT(7)
+#define BIT_DOC_CTL6_DOC_CTRL6_6 BIT(6)
+#define MSK_DOC_CTL6_DOC_CTRL6_5_4 0x30
+#define MSK_DOC_CTL6_DOC_CTRL6_3_0 0x0f
+
+/* DoC 8th Ctl, default value: 0x00 */
+#define REG_DOC_CTL7 0x0758
+#define BIT_DOC_CTL7_DOC_CTRL7_7 BIT(7)
+#define BIT_DOC_CTL7_DOC_CTRL7_6 BIT(6)
+#define BIT_DOC_CTL7_DOC_CTRL7_5 BIT(5)
+#define MSK_DOC_CTL7_DOC_CTRL7_4_3 0x18
+#define MSK_DOC_CTL7_DOC_CTRL7_2_0 0x07
+
+/* DoC 9th Ctl, default value: 0x00 */
+#define REG_DOC_CTL8 0x076c
+#define BIT_DOC_CTL8_DOC_CTRL8_7 BIT(7)
+#define MSK_DOC_CTL8_DOC_CTRL8_6_4 0x70
+#define MSK_DOC_CTL8_DOC_CTRL8_3_2 0x0c
+#define MSK_DOC_CTL8_DOC_CTRL8_1_0 0x03
+
+/* DoC 10th Ctl, default value: 0x00 */
+#define REG_DOC_CTL9 0x076d
+
+/* DoC 11th Ctl, default value: 0x00 */
+#define REG_DOC_CTLA 0x076e
+
+/* DoC 15th Ctl, default value: 0x00 */
+#define REG_DOC_CTLE 0x0772
+#define BIT_DOC_CTLE_DOC_CTRLE_7 BIT(7)
+#define BIT_DOC_CTLE_DOC_CTRLE_6 BIT(6)
+#define MSK_DOC_CTLE_DOC_CTRLE_5_4 0x30
+#define MSK_DOC_CTLE_DOC_CTRLE_3_0 0x0f
+
+/* Interrupt Mask 1st, default value: 0x00 */
+#define REG_MHL_INT_0_MASK 0x0580
+
+/* Interrupt Mask 2nd, default value: 0x00 */
+#define REG_MHL_INT_1_MASK 0x0581
+
+/* Interrupt Mask 3rd, default value: 0x00 */
+#define REG_MHL_INT_2_MASK 0x0582
+
+/* Interrupt Mask 4th, default value: 0x00 */
+#define REG_MHL_INT_3_MASK 0x0583
+
+/* MDT Receive Time Out, default value: 0x00 */
+#define REG_MDT_RCV_TIMEOUT 0x0584
+
+/* MDT Transmit Time Out, default value: 0x00 */
+#define REG_MDT_XMIT_TIMEOUT 0x0585
+
+/* MDT Receive Control, default value: 0x00 */
+#define REG_MDT_RCV_CTRL 0x0586
+#define BIT_MDT_RCV_CTRL_MDT_RCV_EN BIT(7)
+#define BIT_MDT_RCV_CTRL_MDT_DELAY_RCV_EN BIT(6)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_OVER_WR_EN BIT(4)
+#define BIT_MDT_RCV_CTRL_MDT_XFIFO_OVER_WR_EN BIT(3)
+#define BIT_MDT_RCV_CTRL_MDT_DISABLE BIT(2)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_ALL BIT(1)
+#define BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_CUR BIT(0)
+
+/* MDT Receive Read Port, default value: 0x00 */
+#define REG_MDT_RCV_READ_PORT 0x0587
+
+/* MDT Transmit Control, default value: 0x70 */
+#define REG_MDT_XMIT_CTRL 0x0588
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_EN BIT(7)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_CMD_MERGE_EN BIT(6)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_BURST_LEN BIT(5)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_AID BIT(4)
+#define BIT_MDT_XMIT_CTRL_MDT_XMIT_SINGLE_RUN_EN BIT(3)
+#define BIT_MDT_XMIT_CTRL_MDT_CLR_ABORT_WAIT BIT(2)
+#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_ALL BIT(1)
+#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_CUR BIT(0)
+
+/* MDT Receive WRITE Port, default value: 0x00 */
+#define REG_MDT_XMIT_WRITE_PORT 0x0589
+
+/* MDT RFIFO Status, default value: 0x00 */
+#define REG_MDT_RFIFO_STAT 0x058a
+#define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CNT 0xe0
+#define MSK_MDT_RFIFO_STAT_MDT_RFIFO_CUR_BYTE_CNT 0x1f
+
+/* MDT XFIFO Status, default value: 0x80 */
+#define REG_MDT_XFIFO_STAT 0x058b
+#define MSK_MDT_XFIFO_STAT_MDT_XFIFO_LEVEL_AVAIL 0xe0
+#define BIT_MDT_XFIFO_STAT_MDT_XMIT_PRE_HS_EN BIT(4)
+#define MSK_MDT_XFIFO_STAT_MDT_WRITE_BURST_LEN 0x0f
+
+/* MDT Interrupt 0, default value: 0x0c */
+#define REG_MDT_INT_0 0x058c
+#define BIT_MDT_RFIFO_DATA_RDY BIT(0)
+#define BIT_MDT_IDLE_AFTER_HAWB_DISABLE BIT(2)
+#define BIT_MDT_XFIFO_EMPTY BIT(3)
+
+/* MDT Interrupt 0 Mask, default value: 0x00 */
+#define REG_MDT_INT_0_MASK 0x058d
+
+/* MDT Interrupt 1, default value: 0x00 */
+#define REG_MDT_INT_1 0x058e
+#define BIT_MDT_RCV_TIMEOUT BIT(0)
+#define BIT_MDT_RCV_SM_ABORT_PKT_RCVD BIT(1)
+#define BIT_MDT_RCV_SM_ERROR BIT(2)
+#define BIT_MDT_XMIT_TIMEOUT BIT(5)
+#define BIT_MDT_XMIT_SM_ABORT_PKT_RCVD BIT(6)
+#define BIT_MDT_XMIT_SM_ERROR BIT(7)
+
+/* MDT Interrupt 1 Mask, default value: 0x00 */
+#define REG_MDT_INT_1_MASK 0x058f
+
+/* CBUS Vendor ID, default value: 0x01 */
+#define REG_CBUS_VENDOR_ID 0x0590
+
+/* CBUS Connection Status, default value: 0x00 */
+#define REG_CBUS_STATUS 0x0591
+#define BIT_CBUS_STATUS_MHL_CABLE_PRESENT BIT(4)
+#define BIT_CBUS_STATUS_MSC_HB_SUCCESS BIT(3)
+#define BIT_CBUS_STATUS_CBUS_HPD BIT(2)
+#define BIT_CBUS_STATUS_MHL_MODE BIT(1)
+#define BIT_CBUS_STATUS_CBUS_CONNECTED BIT(0)
+
+/* CBUS Interrupt 1st, default value: 0x00 */
+#define REG_CBUS_INT_0 0x0592
+#define BIT_CBUS_MSC_MT_DONE_NACK BIT(7)
+#define BIT_CBUS_MSC_MR_SET_INT BIT(6)
+#define BIT_CBUS_MSC_MR_WRITE_BURST BIT(5)
+#define BIT_CBUS_MSC_MR_MSC_MSG BIT(4)
+#define BIT_CBUS_MSC_MR_WRITE_STAT BIT(3)
+#define BIT_CBUS_HPD_CHG BIT(2)
+#define BIT_CBUS_MSC_MT_DONE BIT(1)
+#define BIT_CBUS_CNX_CHG BIT(0)
+
+/* CBUS Interrupt Mask 1st, default value: 0x00 */
+#define REG_CBUS_INT_0_MASK 0x0593
+
+/* CBUS Interrupt 2nd, default value: 0x00 */
+#define REG_CBUS_INT_1 0x0594
+#define BIT_CBUS_CMD_ABORT BIT(6)
+#define BIT_CBUS_MSC_ABORT_RCVD BIT(3)
+#define BIT_CBUS_DDC_ABORT BIT(2)
+#define BIT_CBUS_CEC_ABORT BIT(1)
+
+/* CBUS Interrupt Mask 2nd, default value: 0x00 */
+#define REG_CBUS_INT_1_MASK 0x0595
+
+/* CBUS DDC Abort Interrupt, default value: 0x00 */
+#define REG_DDC_ABORT_INT 0x0598
+
+/* CBUS DDC Abort Interrupt Mask, default value: 0x00 */
+#define REG_DDC_ABORT_INT_MASK 0x0599
+
+/* CBUS MSC Requester Abort Interrupt, default value: 0x00 */
+#define REG_MSC_MT_ABORT_INT 0x059a
+
+/* CBUS MSC Requester Abort Interrupt Mask, default value: 0x00 */
+#define REG_MSC_MT_ABORT_INT_MASK 0x059b
+
+/* CBUS MSC Responder Abort Interrupt, default value: 0x00 */
+#define REG_MSC_MR_ABORT_INT 0x059c
+
+/* CBUS MSC Responder Abort Interrupt Mask, default value: 0x00 */
+#define REG_MSC_MR_ABORT_INT_MASK 0x059d
+
+/* CBUS RX DISCOVERY interrupt, default value: 0x00 */
+#define REG_CBUS_RX_DISC_INT0 0x059e
+
+/* CBUS RX DISCOVERY Interrupt Mask, default value: 0x00 */
+#define REG_CBUS_RX_DISC_INT0_MASK 0x059f
+
+/* CBUS_Link_Layer Control #8, default value: 0x00 */
+#define REG_CBUS_LINK_CTRL_8 0x05a7
+
+/* MDT State Machine Status, default value: 0x00 */
+#define REG_MDT_SM_STAT 0x05b5
+#define MSK_MDT_SM_STAT_MDT_RCV_STATE 0xf0
+#define MSK_MDT_SM_STAT_MDT_XMIT_STATE 0x0f
+
+/* CBUS MSC command trigger, default value: 0x00 */
+#define REG_MSC_COMMAND_START 0x05b8
+#define BIT_MSC_COMMAND_START_DEBUG BIT(5)
+#define BIT_MSC_COMMAND_START_WRITE_BURST BIT(4)
+#define BIT_MSC_COMMAND_START_WRITE_STAT BIT(3)
+#define BIT_MSC_COMMAND_START_READ_DEVCAP BIT(2)
+#define BIT_MSC_COMMAND_START_MSC_MSG BIT(1)
+#define BIT_MSC_COMMAND_START_PEER BIT(0)
+
+/* CBUS MSC Command/Offset, default value: 0x00 */
+#define REG_MSC_CMD_OR_OFFSET 0x05b9
+
+/* CBUS MSC Transmit Data */
+#define REG_MSC_1ST_TRANSMIT_DATA 0x05ba
+#define REG_MSC_2ND_TRANSMIT_DATA 0x05bb
+
+/* CBUS MSC Requester Received Data */
+#define REG_MSC_MT_RCVD_DATA0 0x05bc
+#define REG_MSC_MT_RCVD_DATA1 0x05bd
+
+/* CBUS MSC Responder MSC_MSG Received Data */
+#define REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA 0x05bf
+#define REG_MSC_MR_MSC_MSG_RCVD_2ND_DATA 0x05c0
+
+/* CBUS MSC Heartbeat Control, default value: 0x27 */
+#define REG_MSC_HEARTBEAT_CTRL 0x05c4
+#define BIT_MSC_HEARTBEAT_CTRL_MSC_HB_EN BIT(7)
+#define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_FAIL_LIMIT 0x70
+#define MSK_MSC_HEARTBEAT_CTRL_MSC_HB_PERIOD_MSB 0x0f
+
+/* CBUS MSC Compatibility Control, default value: 0x02 */
+#define REG_CBUS_MSC_COMPAT_CTRL 0x05c7
+#define BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN BIT(7)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_MSC_ON_CBUS BIT(6)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_DDC_ON_CBUS BIT(5)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_DDC_ERRORCODE BIT(3)
+#define BIT_CBUS_MSC_COMPAT_CTRL_DISABLE_GET_VS1_ERRORCODE BIT(2)
+
+/* CBUS3 Converter Control, default value: 0x24 */
+#define REG_CBUS3_CNVT 0x05dc
+#define MSK_CBUS3_CNVT_CBUS3_RETRYLMT 0xf0
+#define MSK_CBUS3_CNVT_CBUS3_PEERTOUT_SEL 0x0c
+#define BIT_CBUS3_CNVT_TEARCBUS_EN BIT(1)
+#define BIT_CBUS3_CNVT_CBUS3CNVT_EN BIT(0)
+
+/* Discovery Control1, default value: 0x24 */
+#define REG_DISC_CTRL1 0x05e0
+#define BIT_DISC_CTRL1_CBUS_INTR_EN BIT(7)
+#define BIT_DISC_CTRL1_HB_ONLY BIT(6)
+#define MSK_DISC_CTRL1_DISC_ATT 0x30
+#define MSK_DISC_CTRL1_DISC_CYC 0x0c
+#define BIT_DISC_CTRL1_DISC_EN BIT(0)
+
+#define VAL_PUP_OFF 0
+#define VAL_PUP_20K 1
+#define VAL_PUP_5K 2
+
+/* Discovery Control4, default value: 0x80 */
+#define REG_DISC_CTRL4 0x05e3
+#define MSK_DISC_CTRL4_CBUSDISC_PUP_SEL 0xc0
+#define MSK_DISC_CTRL4_CBUSIDLE_PUP_SEL 0x30
+#define VAL_DISC_CTRL4(pup_disc, pup_idle) (((pup_disc) << 6) | (pup_idle << 4))
+
+/* Discovery Control5, default value: 0x03 */
+#define REG_DISC_CTRL5 0x05e4
+#define BIT_DISC_CTRL5_DSM_OVRIDE BIT(3)
+#define MSK_DISC_CTRL5_CBUSMHL_PUP_SEL 0x03
+
+/* Discovery Control8, default value: 0x81 */
+#define REG_DISC_CTRL8 0x05e7
+#define BIT_DISC_CTRL8_NOMHLINT_CLR_BYPASS BIT(7)
+#define BIT_DISC_CTRL8_DELAY_CBUS_INTR_EN BIT(0)
+
+/* Discovery Control9, default value: 0x54 */
+#define REG_DISC_CTRL9 0x05e8
+#define BIT_DISC_CTRL9_MHL3_RSEN_BYP BIT(7)
+#define BIT_DISC_CTRL9_MHL3DISC_EN BIT(6)
+#define BIT_DISC_CTRL9_WAKE_DRVFLT BIT(4)
+#define BIT_DISC_CTRL9_NOMHL_EST BIT(3)
+#define BIT_DISC_CTRL9_DISC_PULSE_PROCEED BIT(2)
+#define BIT_DISC_CTRL9_WAKE_PULSE_BYPASS BIT(1)
+#define BIT_DISC_CTRL9_VBUS_OUTPUT_CAPABILITY_SRC BIT(0)
+
+/* Discovery Status1, default value: 0x00 */
+#define REG_DISC_STAT1 0x05eb
+#define BIT_DISC_STAT1_PSM_OVRIDE BIT(5)
+#define MSK_DISC_STAT1_DISC_SM 0x0f
+
+/* Discovery Status2, default value: 0x00 */
+#define REG_DISC_STAT2 0x05ec
+#define BIT_DISC_STAT2_CBUS_OE_POL BIT(6)
+#define BIT_DISC_STAT2_CBUS_SATUS BIT(5)
+#define BIT_DISC_STAT2_RSEN BIT(4)
+
+#define MSK_DISC_STAT2_MHL_VRSN 0x0c
+#define VAL_DISC_STAT2_DEFAULT 0x00
+#define VAL_DISC_STAT2_MHL1_2 0x04
+#define VAL_DISC_STAT2_MHL3 0x08
+#define VAL_DISC_STAT2_RESERVED 0x0c
+
+#define MSK_DISC_STAT2_RGND 0x03
+#define VAL_RGND_OPEN 0x00
+#define VAL_RGND_2K 0x01
+#define VAL_RGND_1K 0x02
+#define VAL_RGND_SHORT 0x03
+
+/* Interrupt CBUS_reg1 INTR0, default value: 0x00 */
+#define REG_CBUS_DISC_INTR0 0x05ed
+#define BIT_RGND_READY_INT BIT(6)
+#define BIT_CBUS_MHL12_DISCON_INT BIT(5)
+#define BIT_CBUS_MHL3_DISCON_INT BIT(4)
+#define BIT_NOT_MHL_EST_INT BIT(3)
+#define BIT_MHL_EST_INT BIT(2)
+#define BIT_MHL3_EST_INT BIT(1)
+#define VAL_CBUS_MHL_DISCON (BIT_CBUS_MHL12_DISCON_INT \
+ | BIT_CBUS_MHL3_DISCON_INT \
+ | BIT_NOT_MHL_EST_INT)
+
+/* Interrupt CBUS_reg1 INTR0 Mask, default value: 0x00 */
+#define REG_CBUS_DISC_INTR0_MASK 0x05ee
+
+#endif /* __SIL_SII8620_H__ */
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index daecf1ad76a4..3a6309d7d8e4 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -138,12 +138,12 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
{
struct drm_device *dev = afbdev->helper.dev;
struct cirrus_device *cdev = dev->dev_private;
- u32 bpp, depth;
+ u32 bpp;
u32 size;
struct drm_gem_object *gobj;
-
int ret = 0;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
bpp, mode_cmd->pitches[0]))
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 76bcb43e7c06..2c3c0d4072ce 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -52,10 +52,10 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
struct cirrus_device *cdev = dev->dev_private;
struct drm_gem_object *obj;
struct cirrus_framebuffer *cirrus_fb;
+ u32 bpp;
int ret;
- u32 bpp, depth;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
bpp, mode_cmd->pitches[0]))
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 5e7e63ce7bce..d6da848f7c6f 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver cirrus_bo_driver = {
.ttm_tt_populate = cirrus_ttm_tt_populate,
.ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
.init_mem_type = cirrus_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = cirrus_bo_evict_flags,
.move = NULL,
.verify_access = cirrus_bo_verify_access,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index e6862a744210..1d6ab371cd52 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -74,6 +74,8 @@ EXPORT_SYMBOL(drm_atomic_state_default_release);
int
drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
{
+ kref_init(&state->ref);
+
/* TODO legacy paths should maybe do a better job about
* setting this appropriately?
*/
@@ -215,22 +217,16 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
EXPORT_SYMBOL(drm_atomic_state_clear);
/**
- * drm_atomic_state_free - free all memory for an atomic state
- * @state: atomic state to deallocate
+ * __drm_atomic_state_free - free all memory for an atomic state
+ * @ref: This atomic state to deallocate
*
* This frees all memory associated with an atomic state, including all the
* per-object state for planes, crtcs and connectors.
*/
-void drm_atomic_state_free(struct drm_atomic_state *state)
+void __drm_atomic_state_free(struct kref *ref)
{
- struct drm_device *dev;
- struct drm_mode_config *config;
-
- if (!state)
- return;
-
- dev = state->dev;
- config = &dev->mode_config;
+ struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
+ struct drm_mode_config *config = &state->dev->mode_config;
drm_atomic_state_clear(state);
@@ -243,7 +239,7 @@ void drm_atomic_state_free(struct drm_atomic_state *state)
kfree(state);
}
}
-EXPORT_SYMBOL(drm_atomic_state_free);
+EXPORT_SYMBOL(__drm_atomic_state_free);
/**
* drm_atomic_get_crtc_state - get crtc state
@@ -712,7 +708,9 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
state->src_w = val;
} else if (property == config->prop_src_h) {
state->src_h = val;
- } else if (property == config->rotation_property) {
+ } else if (property == plane->rotation_property) {
+ if (!is_power_of_2(val & DRM_ROTATE_MASK))
+ return -EINVAL;
state->rotation = val;
} else if (property == plane->zpos_property) {
state->zpos = val;
@@ -770,7 +768,7 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->src_w;
} else if (property == config->prop_src_h) {
*val = state->src_h;
- } else if (property == config->rotation_property) {
+ } else if (property == plane->rotation_property) {
*val = state->rotation;
} else if (property == plane->zpos_property) {
*val = state->zpos;
@@ -1468,7 +1466,7 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
static struct drm_pending_vblank_event *create_vblank_event(
struct drm_device *dev, struct drm_file *file_priv,
- struct fence *fence, uint64_t user_data)
+ struct dma_fence *fence, uint64_t user_data)
{
struct drm_pending_vblank_event *e = NULL;
int ret;
@@ -1745,7 +1743,7 @@ retry:
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
/*
* Unlike commit, check_only does not clean up state.
- * Below we call drm_atomic_state_free for it.
+ * Below we call drm_atomic_state_put for it.
*/
ret = drm_atomic_check_only(state);
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
@@ -1778,8 +1776,7 @@ out:
goto retry;
}
- if (ret || arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 21f992605541..50077961228a 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -30,7 +30,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include "drm_crtc_internal.h"
@@ -458,10 +458,11 @@ mode_fixup(struct drm_atomic_state *state)
* removed from the crtc.
* crtc_state->active_changed is set when crtc_state->active changes,
* which is used for dpms.
+ * See also: drm_atomic_crtc_needs_modeset()
*
* IMPORTANT:
*
- * Drivers which update ->mode_changed (e.g. in their ->atomic_check hooks if a
+ * Drivers which set ->mode_changed (e.g. in their ->atomic_check hooks if a
* plane update can't be done without a full modeset) _must_ call this function
* afterwards after that change. It is permitted to call this function multiple
* times for the same update, e.g. when the ->atomic_check functions depend upon
@@ -510,9 +511,9 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
for_each_connector_in_state(state, connector, connector_state, i) {
/*
- * This only sets crtc->mode_changed for routing changes,
- * drivers must set crtc->mode_changed themselves when connector
- * properties need to be updated.
+ * This only sets crtc->connectors_changed for routing changes,
+ * drivers must set crtc->connectors_changed themselves when
+ * connector properties need to be updated.
*/
ret = update_connector_routing(state, connector,
connector_state);
@@ -1012,7 +1013,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
* drm_atomic_helper_swap_state() so it uses the current plane state (and
* just uses the atomic state to find the changed planes)
*
- * Returns zero if success or < 0 if fence_wait() fails.
+ * Returns zero if success or < 0 if dma_fence_wait() fails.
*/
int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
struct drm_atomic_state *state,
@@ -1036,11 +1037,11 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
* still interrupt the operation. Instead of blocking until the
* timer expires, make the wait interruptible.
*/
- ret = fence_wait(plane_state->fence, pre_swap);
+ ret = dma_fence_wait(plane_state->fence, pre_swap);
if (ret)
return ret;
- fence_put(plane_state->fence);
+ dma_fence_put(plane_state->fence);
plane_state->fence = NULL;
}
@@ -1203,7 +1204,7 @@ static void commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_cleanup_done(state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
static void commit_work(struct work_struct *work)
@@ -1285,6 +1286,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
* make sure work items don't artifically stall on each another.
*/
+ drm_atomic_state_get(state);
if (nonblock)
queue_work(system_unbound_wq, &state->commit_work);
else
@@ -1586,7 +1588,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
*
* This signals completion of the atomic update @state, including any cleanup
* work. If used, it must be called right before calling
- * drm_atomic_state_free().
+ * drm_atomic_state_put().
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
@@ -2109,18 +2111,13 @@ retry:
state->legacy_cursor_update = true;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2182,18 +2179,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2322,18 +2314,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2408,7 +2395,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
primary_state->crtc_h = vdisplay;
primary_state->src_x = set->x << 16;
primary_state->src_y = set->y << 16;
- if (primary_state->rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) {
+ if (drm_rotation_90_or_270(primary_state->rotation)) {
primary_state->src_w = vdisplay << 16;
primary_state->src_h = hdisplay << 16;
} else {
@@ -2475,11 +2462,8 @@ int drm_atomic_helper_disable_all(struct drm_device *dev,
}
err = drm_atomic_commit(state);
-
free:
- if (err < 0)
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return err;
}
EXPORT_SYMBOL(drm_atomic_helper_disable_all);
@@ -2530,7 +2514,7 @@ retry:
err = drm_atomic_helper_disable_all(dev, &ctx);
if (err < 0) {
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
state = ERR_PTR(err);
goto unlock;
}
@@ -2619,18 +2603,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2679,18 +2658,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2739,18 +2713,13 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2823,18 +2792,13 @@ retry:
}
ret = drm_atomic_nonblocking_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -2910,19 +2874,14 @@ retry:
crtc_state->active = active;
ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
connector->dpms = old_mode;
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@@ -3329,7 +3288,7 @@ drm_atomic_helper_duplicate_state(struct drm_device *dev,
free:
if (err < 0) {
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
state = ERR_PTR(err);
}
@@ -3444,22 +3403,14 @@ retry:
goto fail;
ret = drm_atomic_commit(state);
- if (ret)
- goto fail;
-
- /* Driver takes ownership of state on successful commit. */
-
- drm_property_unreference_blob(blob);
-
- return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
drm_property_unreference_blob(blob);
-
return ret;
+
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 85172a977bf3..1f2412c7ccfd 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -89,7 +89,7 @@
* On top of this basic transformation additional properties can be exposed by
* the driver:
*
- * - Rotation is set up with drm_mode_create_rotation_property(). It adds a
+ * - Rotation is set up with drm_plane_create_rotation_property(). It adds a
* rotation and reflection step between the source and destination rectangles.
* Without this property the rectangle is only scaled, but not rotated or
* reflected.
@@ -105,18 +105,12 @@
*/
/**
- * drm_mode_create_rotation_property - create a new rotation property
- * @dev: DRM device
+ * drm_plane_create_rotation_property - create a new rotation property
+ * @plane: drm plane
+ * @rotation: initial value of the rotation property
* @supported_rotations: bitmask of supported rotations and reflections
*
* This creates a new property with the selected support for transformations.
- * The resulting property should be stored in @rotation_property in
- * &drm_mode_config. It then must be attached to each plane which supports
- * rotations using drm_object_attach_property().
- *
- * FIXME: Probably better if the rotation property is created on each plane,
- * like the zpos property. Otherwise it's not possible to allow different
- * rotation modes on different planes.
*
* Since a rotation by 180° degress is the same as reflecting both along the x
* and the y axis the rotation property is somewhat redundant. Drivers can use
@@ -144,8 +138,9 @@
* rotation. After reflection, the rotation is applied to the image sampled from
* the source rectangle, before scaling it to fit the destination rectangle.
*/
-struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
- unsigned int supported_rotations)
+int drm_plane_create_rotation_property(struct drm_plane *plane,
+ unsigned int rotation,
+ unsigned int supported_rotations)
{
static const struct drm_prop_enum_list props[] = {
{ __builtin_ffs(DRM_ROTATE_0) - 1, "rotate-0" },
@@ -155,12 +150,28 @@ struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
{ __builtin_ffs(DRM_REFLECT_X) - 1, "reflect-x" },
{ __builtin_ffs(DRM_REFLECT_Y) - 1, "reflect-y" },
};
+ struct drm_property *prop;
+
+ WARN_ON((supported_rotations & DRM_ROTATE_MASK) == 0);
+ WARN_ON(!is_power_of_2(rotation & DRM_ROTATE_MASK));
+ WARN_ON(rotation & ~supported_rotations);
- return drm_property_create_bitmask(dev, 0, "rotation",
+ prop = drm_property_create_bitmask(plane->dev, 0, "rotation",
props, ARRAY_SIZE(props),
supported_rotations);
+ if (!prop)
+ return -ENOMEM;
+
+ drm_object_attach_property(&plane->base, prop, rotation);
+
+ if (plane->state)
+ plane->state->rotation = rotation;
+
+ plane->rotation_property = prop;
+
+ return 0;
}
-EXPORT_SYMBOL(drm_mode_create_rotation_property);
+EXPORT_SYMBOL(drm_plane_create_rotation_property);
/**
* drm_rotation_simplify() - Try to simplify the rotation
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 2d7bedf28647..13441e21117c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -40,7 +40,7 @@
#include <drm/drm_modeset_lock.h>
#include <drm/drm_atomic.h>
#include <drm/drm_auth.h>
-#include <drm/drm_framebuffer.h>
+#include <drm/drm_debugfs_crc.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -122,6 +122,10 @@ static int drm_crtc_register_all(struct drm_device *dev)
int ret = 0;
drm_for_each_crtc(crtc, dev) {
+ if (drm_debugfs_crtc_add(crtc))
+ DRM_ERROR("Failed to initialize debugfs entry for CRTC '%s'.\n",
+ crtc->name);
+
if (crtc->funcs->late_register)
ret = crtc->funcs->late_register(crtc);
if (ret)
@@ -138,9 +142,29 @@ static void drm_crtc_unregister_all(struct drm_device *dev)
drm_for_each_crtc(crtc, dev) {
if (crtc->funcs->early_unregister)
crtc->funcs->early_unregister(crtc);
+ drm_debugfs_crtc_remove(crtc);
}
}
+static int drm_crtc_crc_init(struct drm_crtc *crtc)
+{
+#ifdef CONFIG_DEBUG_FS
+ spin_lock_init(&crtc->crc.lock);
+ init_waitqueue_head(&crtc->crc.wq);
+ crtc->crc.source = kstrdup("auto", GFP_KERNEL);
+ if (!crtc->crc.source)
+ return -ENOMEM;
+#endif
+ return 0;
+}
+
+static void drm_crtc_crc_fini(struct drm_crtc *crtc)
+{
+#ifdef CONFIG_DEBUG_FS
+ kfree(crtc->crc.source);
+#endif
+}
+
/**
* drm_crtc_init_with_planes - Initialise a new CRTC object with
* specified primary and cursor planes.
@@ -210,6 +234,12 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
if (cursor)
cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
+ ret = drm_crtc_crc_init(crtc);
+ if (ret) {
+ drm_mode_object_unregister(dev, &crtc->base);
+ return ret;
+ }
+
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&crtc->base, config->prop_active, 0);
drm_object_attach_property(&crtc->base, config->prop_mode_id, 0);
@@ -236,6 +266,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
* the indices on the drm_crtc after us in the crtc_list.
*/
+ drm_crtc_crc_fini(crtc);
+
kfree(crtc->gamma_store);
crtc->gamma_store = NULL;
@@ -695,8 +727,7 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
if (crtc->state &&
- crtc->primary->state->rotation & (DRM_ROTATE_90 |
- DRM_ROTATE_270))
+ drm_rotation_90_or_270(crtc->primary->state->rotation))
swap(hdisplay, vdisplay);
return drm_framebuffer_check_src_coords(x << 16, y << 16,
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 1205790ed960..800055c39cdb 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -415,5 +415,37 @@ void drm_debugfs_connector_remove(struct drm_connector *connector)
connector->debugfs_entry = NULL;
}
-#endif /* CONFIG_DEBUG_FS */
+int drm_debugfs_crtc_add(struct drm_crtc *crtc)
+{
+ struct drm_minor *minor = crtc->dev->primary;
+ struct dentry *root;
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "crtc-%d", crtc->index);
+ if (!name)
+ return -ENOMEM;
+
+ root = debugfs_create_dir(name, minor->debugfs_root);
+ kfree(name);
+ if (!root)
+ return -ENOMEM;
+
+ crtc->debugfs_entry = root;
+
+ if (drm_debugfs_crtc_crc_add(crtc))
+ goto error;
+ return 0;
+
+error:
+ drm_debugfs_crtc_remove(crtc);
+ return -ENOMEM;
+}
+
+void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
+{
+ debugfs_remove_recursive(crtc->debugfs_entry);
+ crtc->debugfs_entry = NULL;
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
new file mode 100644
index 000000000000..00e771fb7df2
--- /dev/null
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ * Copyright © 2016 Collabora Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Based on code from the i915 driver.
+ * Original author: Damien Lespiau <damien.lespiau@intel.com>
+ *
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <drm/drmP.h>
+#include "drm_internal.h"
+
+/**
+ * DOC: CRC ABI
+ *
+ * DRM device drivers can provide to userspace CRC information of each frame as
+ * it reached a given hardware component (a "source").
+ *
+ * Userspace can control generation of CRCs in a given CRTC by writing to the
+ * file dri/0/crtc-N/crc/control in debugfs, with N being the index of the CRTC.
+ * Accepted values are source names (which are driver-specific) and the "auto"
+ * keyword, which will let the driver select a default source of frame CRCs
+ * for this CRTC.
+ *
+ * Once frame CRC generation is enabled, userspace can capture them by reading
+ * the dri/0/crtc-N/crc/data file. Each line in that file contains the frame
+ * number in the first field and then a number of unsigned integer fields
+ * containing the CRC data. Fields are separated by a single space and the number
+ * of CRC fields is source-specific.
+ *
+ * Note that though in some cases the CRC is computed in a specified way and on
+ * the frame contents as supplied by userspace (eDP 1.3), in general the CRC
+ * computation is performed in an unspecified way and on frame contents that have
+ * been already processed in also an unspecified way and thus userspace cannot
+ * rely on being able to generate matching CRC values for the frame contents that
+ * it submits. In this general case, the maximum userspace can do is to compare
+ * the reported CRCs of frames that should have the same contents.
+ */
+
+static int crc_control_show(struct seq_file *m, void *data)
+{
+ struct drm_crtc *crtc = m->private;
+
+ seq_printf(m, "%s\n", crtc->crc.source);
+
+ return 0;
+}
+
+static int crc_control_open(struct inode *inode, struct file *file)
+{
+ struct drm_crtc *crtc = inode->i_private;
+
+ return single_open(file, crc_control_show, crtc);
+}
+
+static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_crtc *crtc = m->private;
+ struct drm_crtc_crc *crc = &crtc->crc;
+ char *source;
+
+ if (len == 0)
+ return 0;
+
+ if (len > PAGE_SIZE - 1) {
+ DRM_DEBUG_KMS("Expected < %lu bytes into crtc crc control\n",
+ PAGE_SIZE);
+ return -E2BIG;
+ }
+
+ source = memdup_user_nul(ubuf, len);
+ if (IS_ERR(source))
+ return PTR_ERR(source);
+
+ if (source[len] == '\n')
+ source[len] = '\0';
+
+ spin_lock_irq(&crc->lock);
+
+ if (crc->opened) {
+ spin_unlock_irq(&crc->lock);
+ kfree(source);
+ return -EBUSY;
+ }
+
+ kfree(crc->source);
+ crc->source = source;
+
+ spin_unlock_irq(&crc->lock);
+
+ *offp += len;
+ return len;
+}
+
+static const struct file_operations drm_crtc_crc_control_fops = {
+ .owner = THIS_MODULE,
+ .open = crc_control_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = crc_control_write
+};
+
+static int crtc_crc_open(struct inode *inode, struct file *filep)
+{
+ struct drm_crtc *crtc = inode->i_private;
+ struct drm_crtc_crc *crc = &crtc->crc;
+ struct drm_crtc_crc_entry *entries = NULL;
+ size_t values_cnt;
+ int ret;
+
+ if (crc->opened)
+ return -EBUSY;
+
+ ret = crtc->funcs->set_crc_source(crtc, crc->source, &values_cnt);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(values_cnt > DRM_MAX_CRC_NR)) {
+ ret = -EINVAL;
+ goto err_disable;
+ }
+
+ if (WARN_ON(values_cnt == 0)) {
+ ret = -EINVAL;
+ goto err_disable;
+ }
+
+ entries = kcalloc(DRM_CRC_ENTRIES_NR, sizeof(*entries), GFP_KERNEL);
+ if (!entries) {
+ ret = -ENOMEM;
+ goto err_disable;
+ }
+
+ spin_lock_irq(&crc->lock);
+ crc->entries = entries;
+ crc->values_cnt = values_cnt;
+ crc->opened = true;
+ spin_unlock_irq(&crc->lock);
+
+ return 0;
+
+err_disable:
+ crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
+ return ret;
+}
+
+static int crtc_crc_release(struct inode *inode, struct file *filep)
+{
+ struct drm_crtc *crtc = filep->f_inode->i_private;
+ struct drm_crtc_crc *crc = &crtc->crc;
+ size_t values_cnt;
+
+ spin_lock_irq(&crc->lock);
+ kfree(crc->entries);
+ crc->entries = NULL;
+ crc->head = 0;
+ crc->tail = 0;
+ crc->values_cnt = 0;
+ crc->opened = false;
+ spin_unlock_irq(&crc->lock);
+
+ crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
+
+ return 0;
+}
+
+static int crtc_crc_data_count(struct drm_crtc_crc *crc)
+{
+ assert_spin_locked(&crc->lock);
+ return CIRC_CNT(crc->head, crc->tail, DRM_CRC_ENTRIES_NR);
+}
+
+/*
+ * 1 frame field of 10 chars plus a number of CRC fields of 10 chars each, space
+ * separated, with a newline at the end and null-terminated.
+ */
+#define LINE_LEN(values_cnt) (10 + 11 * values_cnt + 1 + 1)
+#define MAX_LINE_LEN (LINE_LEN(DRM_MAX_CRC_NR))
+
+static ssize_t crtc_crc_read(struct file *filep, char __user *user_buf,
+ size_t count, loff_t *pos)
+{
+ struct drm_crtc *crtc = filep->f_inode->i_private;
+ struct drm_crtc_crc *crc = &crtc->crc;
+ struct drm_crtc_crc_entry *entry;
+ char buf[MAX_LINE_LEN];
+ int ret, i;
+
+ spin_lock_irq(&crc->lock);
+
+ if (!crc->source) {
+ spin_unlock_irq(&crc->lock);
+ return 0;
+ }
+
+ /* Nothing to read? */
+ while (crtc_crc_data_count(crc) == 0) {
+ if (filep->f_flags & O_NONBLOCK) {
+ spin_unlock_irq(&crc->lock);
+ return -EAGAIN;
+ }
+
+ ret = wait_event_interruptible_lock_irq(crc->wq,
+ crtc_crc_data_count(crc),
+ crc->lock);
+ if (ret) {
+ spin_unlock_irq(&crc->lock);
+ return ret;
+ }
+ }
+
+ /* We know we have an entry to be read */
+ entry = &crc->entries[crc->tail];
+
+ if (count < LINE_LEN(crc->values_cnt)) {
+ spin_unlock_irq(&crc->lock);
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(DRM_CRC_ENTRIES_NR);
+ crc->tail = (crc->tail + 1) & (DRM_CRC_ENTRIES_NR - 1);
+
+ spin_unlock_irq(&crc->lock);
+
+ if (entry->has_frame_counter)
+ sprintf(buf, "0x%08x", entry->frame);
+ else
+ sprintf(buf, "XXXXXXXXXX");
+
+ for (i = 0; i < crc->values_cnt; i++)
+ sprintf(buf + 10 + i * 11, " 0x%08x", entry->crcs[i]);
+ sprintf(buf + 10 + crc->values_cnt * 11, "\n");
+
+ if (copy_to_user(user_buf, buf, LINE_LEN(crc->values_cnt)))
+ return -EFAULT;
+
+ return LINE_LEN(crc->values_cnt);
+}
+
+static const struct file_operations drm_crtc_crc_data_fops = {
+ .owner = THIS_MODULE,
+ .open = crtc_crc_open,
+ .read = crtc_crc_read,
+ .release = crtc_crc_release,
+};
+
+/**
+ * drm_debugfs_crtc_crc_add - Add files to debugfs for capture of frame CRCs
+ * @crtc: CRTC to whom the frames will belong
+ *
+ * Adds files to debugfs directory that allows userspace to control the
+ * generation of frame CRCs and to read them.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
+{
+ struct dentry *crc_ent, *ent;
+
+ if (!crtc->funcs->set_crc_source)
+ return 0;
+
+ crc_ent = debugfs_create_dir("crc", crtc->debugfs_entry);
+ if (!crc_ent)
+ return -ENOMEM;
+
+ ent = debugfs_create_file("control", S_IRUGO, crc_ent, crtc,
+ &drm_crtc_crc_control_fops);
+ if (!ent)
+ goto error;
+
+ ent = debugfs_create_file("data", S_IRUGO, crc_ent, crtc,
+ &drm_crtc_crc_data_fops);
+ if (!ent)
+ goto error;
+
+ return 0;
+
+error:
+ debugfs_remove_recursive(crc_ent);
+
+ return -ENOMEM;
+}
+
+/**
+ * drm_crtc_add_crc_entry - Add entry with CRC information for a frame
+ * @crtc: CRTC to which the frame belongs
+ * @has_frame: whether this entry has a frame number to go with
+ * @frame: number of the frame these CRCs are about
+ * @crcs: array of CRC values, with length matching #drm_crtc_crc.values_cnt
+ *
+ * For each frame, the driver polls the source of CRCs for new data and calls
+ * this function to add them to the buffer from where userspace reads.
+ */
+int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
+ uint32_t frame, uint32_t *crcs)
+{
+ struct drm_crtc_crc *crc = &crtc->crc;
+ struct drm_crtc_crc_entry *entry;
+ int head, tail;
+
+ assert_spin_locked(&crc->lock);
+
+ /* Caller may not have noticed yet that userspace has stopped reading */
+ if (!crc->opened)
+ return -EINVAL;
+
+ head = crc->head;
+ tail = crc->tail;
+
+ if (CIRC_SPACE(head, tail, DRM_CRC_ENTRIES_NR) < 1) {
+ DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
+ return -ENOBUFS;
+ }
+
+ entry = &crc->entries[head];
+ entry->frame = frame;
+ entry->has_frame_counter = has_frame;
+ memcpy(&entry->crcs, crcs, sizeof(*crcs) * crc->values_cnt);
+
+ head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1);
+ crc->head = head;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_crtc_add_crc_entry);
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index a7b2a751f6fe..e02563966271 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -142,12 +142,25 @@ static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
sizeof(dp_dual_mode_hdmi_id)) == 0;
}
+static bool is_type1_adaptor(uint8_t adaptor_id)
+{
+ return adaptor_id == 0 || adaptor_id == 0xff;
+}
+
static bool is_type2_adaptor(uint8_t adaptor_id)
{
return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
DP_DUAL_MODE_REV_TYPE2);
}
+static bool is_lspcon_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN],
+ const uint8_t adaptor_id)
+{
+ return is_hdmi_adaptor(hdmi_id) &&
+ (adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
+ DP_DUAL_MODE_TYPE_HAS_DPCD));
+}
+
/**
* drm_dp_dual_mode_detect - Identify the DP dual mode adaptor
* @adapter: I2C adapter for the DDC bus
@@ -185,6 +198,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
*/
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID,
hdmi_id, sizeof(hdmi_id));
+ DRM_DEBUG_KMS("DP dual mode HDMI ID: %*pE (err %zd)\n",
+ ret ? 0 : (int)sizeof(hdmi_id), hdmi_id, ret);
if (ret)
return DRM_DP_DUAL_MODE_UNKNOWN;
@@ -202,13 +217,26 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
*/
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
&adaptor_id, sizeof(adaptor_id));
+ DRM_DEBUG_KMS("DP dual mode adaptor ID: %02x (err %zd)\n",
+ adaptor_id, ret);
if (ret == 0) {
+ if (is_lspcon_adaptor(hdmi_id, adaptor_id))
+ return DRM_DP_DUAL_MODE_LSPCON;
if (is_type2_adaptor(adaptor_id)) {
if (is_hdmi_adaptor(hdmi_id))
return DRM_DP_DUAL_MODE_TYPE2_HDMI;
else
return DRM_DP_DUAL_MODE_TYPE2_DVI;
}
+ /*
+ * If neither a proper type 1 ID nor a broken type 1 adaptor
+ * as described above, assume type 1, but let the user know
+ * that we may have misdetected the type.
+ */
+ if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0])
+ DRM_ERROR("Unexpected DP dual mode adaptor ID %02x\n",
+ adaptor_id);
+
}
if (is_hdmi_adaptor(hdmi_id))
@@ -364,3 +392,96 @@ const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type)
}
}
EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name);
+
+/**
+ * drm_lspcon_get_mode: Get LSPCON's current mode of operation by
+ * reading offset (0x80, 0x41)
+ * @adapter: I2C-over-aux adapter
+ * @mode: current lspcon mode of operation output variable
+ *
+ * Returns:
+ * 0 on success, sets the current_mode value to appropriate mode
+ * -error on failure
+ */
+int drm_lspcon_get_mode(struct i2c_adapter *adapter,
+ enum drm_lspcon_mode *mode)
+{
+ u8 data;
+ int ret = 0;
+
+ if (!mode) {
+ DRM_ERROR("NULL input\n");
+ return -EINVAL;
+ }
+
+ /* Read Status: i2c over aux */
+ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_LSPCON_CURRENT_MODE,
+ &data, sizeof(data));
+ if (ret < 0) {
+ DRM_ERROR("LSPCON read(0x80, 0x41) failed\n");
+ return -EFAULT;
+ }
+
+ if (data & DP_DUAL_MODE_LSPCON_MODE_PCON)
+ *mode = DRM_LSPCON_MODE_PCON;
+ else
+ *mode = DRM_LSPCON_MODE_LS;
+ return 0;
+}
+EXPORT_SYMBOL(drm_lspcon_get_mode);
+
+/**
+ * drm_lspcon_set_mode: Change LSPCON's mode of operation by
+ * writing offset (0x80, 0x40)
+ * @adapter: I2C-over-aux adapter
+ * @mode: required mode of operation
+ *
+ * Returns:
+ * 0 on success, -error on failure/timeout
+ */
+int drm_lspcon_set_mode(struct i2c_adapter *adapter,
+ enum drm_lspcon_mode mode)
+{
+ u8 data = 0;
+ int ret;
+ int time_out = 200;
+ enum drm_lspcon_mode current_mode;
+
+ if (mode == DRM_LSPCON_MODE_PCON)
+ data = DP_DUAL_MODE_LSPCON_MODE_PCON;
+
+ /* Change mode */
+ ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_LSPCON_MODE_CHANGE,
+ &data, sizeof(data));
+ if (ret < 0) {
+ DRM_ERROR("LSPCON mode change failed\n");
+ return ret;
+ }
+
+ /*
+ * Confirm mode change by reading the status bit.
+ * Sometimes, it takes a while to change the mode,
+ * so wait and retry until time out or done.
+ */
+ do {
+ ret = drm_lspcon_get_mode(adapter, &current_mode);
+ if (ret) {
+ DRM_ERROR("can't confirm LSPCON mode change\n");
+ return ret;
+ } else {
+ if (current_mode != mode) {
+ msleep(10);
+ time_out -= 10;
+ } else {
+ DRM_DEBUG_KMS("LSPCON mode changed to %s\n",
+ mode == DRM_LSPCON_MODE_LS ?
+ "LS" : "PCON");
+ return 0;
+ }
+ }
+ } while (time_out);
+
+ DRM_ERROR("LSPCON mode change timed out\n");
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(drm_lspcon_set_mode);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ec77bd3e1f08..9506933b41cd 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1260,6 +1260,34 @@ drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
return ret == xfers ? 0 : -1;
}
+static void connector_bad_edid(struct drm_connector *connector,
+ u8 *edid, int num_blocks)
+{
+ int i;
+
+ if (connector->bad_edid_counter++ && !(drm_debug & DRM_UT_KMS))
+ return;
+
+ dev_warn(connector->dev->dev,
+ "%s: EDID is invalid:\n",
+ connector->name);
+ for (i = 0; i < num_blocks; i++) {
+ u8 *block = edid + i * EDID_LENGTH;
+ char prefix[20];
+
+ if (drm_edid_is_zero(block, EDID_LENGTH))
+ sprintf(prefix, "\t[%02x] ZERO ", i);
+ else if (!drm_edid_block_valid(block, i, false, NULL))
+ sprintf(prefix, "\t[%02x] BAD ", i);
+ else
+ sprintf(prefix, "\t[%02x] GOOD ", i);
+
+ print_hex_dump(KERN_WARNING,
+ prefix, DUMP_PREFIX_NONE, 16, 1,
+ block, EDID_LENGTH, false);
+ }
+}
+
/**
* drm_do_get_edid - get EDID data using a custom EDID block read function
* @connector: connector we're probing
@@ -1282,20 +1310,19 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
void *data)
{
int i, j = 0, valid_extensions = 0;
- u8 *block, *new;
- bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
+ u8 *edid, *new;
- if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+ if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
return NULL;
/* base block fetch */
for (i = 0; i < 4; i++) {
- if (get_edid_block(data, block, 0, EDID_LENGTH))
+ if (get_edid_block(data, edid, 0, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block, 0, print_bad_edid,
+ if (drm_edid_block_valid(edid, 0, false,
&connector->edid_corrupt))
break;
- if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
+ if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) {
connector->null_edid_counter++;
goto carp;
}
@@ -1304,58 +1331,62 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
goto carp;
/* if there's no extensions, we're done */
- if (block[0x7e] == 0)
- return (struct edid *)block;
+ valid_extensions = edid[0x7e];
+ if (valid_extensions == 0)
+ return (struct edid *)edid;
- new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+ new = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
goto out;
- block = new;
+ edid = new;
+
+ for (j = 1; j <= edid[0x7e]; j++) {
+ u8 *block = edid + j * EDID_LENGTH;
- for (j = 1; j <= block[0x7e]; j++) {
for (i = 0; i < 4; i++) {
- if (get_edid_block(data,
- block + (valid_extensions + 1) * EDID_LENGTH,
- j, EDID_LENGTH))
+ if (get_edid_block(data, block, j, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block + (valid_extensions + 1)
- * EDID_LENGTH, j,
- print_bad_edid,
- NULL)) {
- valid_extensions++;
+ if (drm_edid_block_valid(block, j, false, NULL))
break;
- }
}
- if (i == 4 && print_bad_edid) {
- dev_warn(connector->dev->dev,
- "%s: Ignoring invalid EDID block %d.\n",
- connector->name, j);
-
- connector->bad_edid_counter++;
- }
+ if (i == 4)
+ valid_extensions--;
}
- if (valid_extensions != block[0x7e]) {
- block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
- block[0x7e] = valid_extensions;
- new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (valid_extensions != edid[0x7e]) {
+ u8 *base;
+
+ connector_bad_edid(connector, edid, edid[0x7e] + 1);
+
+ edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
+ edid[0x7e] = valid_extensions;
+
+ new = kmalloc((valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
goto out;
- block = new;
- }
- return (struct edid *)block;
+ base = new;
+ for (i = 0; i <= edid[0x7e]; i++) {
+ u8 *block = edid + i * EDID_LENGTH;
-carp:
- if (print_bad_edid) {
- dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
- connector->name, j);
+ if (!drm_edid_block_valid(block, i, false, NULL))
+ continue;
+
+ memcpy(base, block, EDID_LENGTH);
+ base += EDID_LENGTH;
+ }
+
+ kfree(edid);
+ edid = new;
}
- connector->bad_edid_counter++;
+ return (struct edid *)edid;
+
+carp:
+ connector_bad_edid(connector, edid, 1);
out:
- kfree(block);
+ kfree(edid);
return NULL;
}
EXPORT_SYMBOL_GPL(drm_do_get_edid);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 1fd6eac1400c..4c6664407bfb 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -176,20 +176,20 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs)
{
+ const struct drm_format_info *info;
struct drm_fb_cma *fb_cma;
struct drm_gem_cma_object *objs[4];
struct drm_gem_object *obj;
- unsigned int hsub;
- unsigned int vsub;
int ret;
int i;
- hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
- vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+ info = drm_format_info(mode_cmd->pixel_format);
+ if (!info)
+ return ERR_PTR(-EINVAL);
- for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
- unsigned int width = mode_cmd->width / (i ? hsub : 1);
- unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
unsigned int min_size;
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
@@ -200,7 +200,7 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
}
min_size = (height - 1) * mode_cmd->pitches[i]
- + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
+ + width * info->cpp[i]
+ mode_cmd->offsets[i];
if (obj->size < min_size) {
@@ -269,12 +269,15 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
struct drm_fb_cma *fb_cma = to_fb_cma(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ const struct drm_format_info *info;
+ int i;
seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
(char *)&fb->pixel_format);
- for (i = 0; i < n; i++) {
+ info = drm_format_info(fb->pixel_format);
+
+ for (i = 0; i < info->num_planes; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[i], fb->pitches[i]);
drm_gem_cma_describe(fb_cma->obj[i], m);
@@ -557,7 +560,8 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
{
drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
- drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
+ if (fbdev_cma->fb_helper.fbdev)
+ drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
if (fbdev_cma->fb) {
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 6c75e62c0b22..36797c465edc 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -372,9 +372,7 @@ fail:
if (ret == -EDEADLK)
goto backoff;
- if (ret != 0)
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
backoff:
@@ -399,11 +397,10 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
drm_plane_force_disable(plane);
- if (dev->mode_config.rotation_property) {
+ if (plane->rotation_property)
drm_mode_plane_set_obj_prop(plane,
- dev->mode_config.rotation_property,
+ plane->rotation_property,
DRM_ROTATE_0);
- }
}
for (i = 0; i < fb_helper->crtc_count; i++) {
@@ -1238,11 +1235,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
if (var->pixclock != 0 || in_dbg_master())
return -EINVAL;
- /* Need to resize the fb object !!! */
- if (var->bits_per_pixel > fb->bits_per_pixel ||
- var->xres > fb->width || var->yres > fb->height ||
- var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
- DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
+ /*
+ * Changes struct fb_var_screeninfo are currently not pushed back
+ * to KMS, hence fail if different settings are requested.
+ */
+ if (var->bits_per_pixel != fb->bits_per_pixel ||
+ var->xres != fb->width || var->yres != fb->height ||
+ var->xres_virtual != fb->width || var->yres_virtual != fb->height) {
+ DRM_DEBUG("fb userspace requested width/height/bpp different than current fb "
"request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
var->xres, var->yres, var->bits_per_pixel,
var->xres_virtual, var->yres_virtual,
@@ -1388,16 +1388,13 @@ retry:
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
-
fail:
drm_atomic_clean_old_fb(dev, plane_mask, ret);
if (ret == -EDEADLK)
goto backoff;
- if (ret != 0)
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
return ret;
backoff:
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index e84faecf5225..cf993dbf602e 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -663,6 +663,10 @@ void drm_event_cancel_free(struct drm_device *dev,
list_del(&p->pending_link);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (p->fence)
+ dma_fence_put(p->fence);
+
kfree(p);
}
EXPORT_SYMBOL(drm_event_cancel_free);
@@ -692,8 +696,8 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
}
if (e->fence) {
- fence_signal(e->fence);
- fence_put(e->fence);
+ dma_fence_signal(e->fence);
+ dma_fence_put(e->fence);
}
if (!e->file_priv) {
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 29c56b4331e0..cbb8b77c363c 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -102,83 +102,105 @@ char *drm_get_format_name(uint32_t format)
}
EXPORT_SYMBOL(drm_get_format_name);
+/*
+ * Internal function to query information for a given format. See
+ * drm_format_info() for the public API.
+ */
+const struct drm_format_info *__drm_format_info(u32 format)
+{
+ static const struct drm_format_info formats[] = {
+ { .format = DRM_FORMAT_C8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB332, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGR233, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XRGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBX4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRX4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ABGR4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XRGB1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBX5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRX5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ABGR1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGR565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGR888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBX1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRX1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_YUV410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
+ { .format = DRM_FORMAT_YVU410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
+ { .format = DRM_FORMAT_YUV411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
+ { .format = DRM_FORMAT_YVU411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
+ { .format = DRM_FORMAT_YUV420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
+ { .format = DRM_FORMAT_YVU420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
+ { .format = DRM_FORMAT_YUV422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_YVU422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_YUV444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_YVU444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_NV12, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
+ { .format = DRM_FORMAT_NV21, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
+ { .format = DRM_FORMAT_NV16, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ };
+
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (formats[i].format == format)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
/**
- * drm_fb_get_bpp_depth - get the bpp/depth values for format
+ * drm_format_info - query information for a given format
* @format: pixel format (DRM_FORMAT_*)
- * @depth: storage for the depth value
- * @bpp: storage for the bpp value
*
- * This only supports RGB formats here for compat with code that doesn't use
- * pixel formats directly yet.
+ * The caller should only pass a supported pixel format to this function.
+ * Unsupported pixel formats will generate a warning in the kernel log.
+ *
+ * Returns:
+ * The instance of struct drm_format_info that describes the pixel format, or
+ * NULL if the format is unsupported.
*/
-void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
- int *bpp)
+const struct drm_format_info *drm_format_info(u32 format)
{
- char *format_name;
-
- switch (format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB332:
- case DRM_FORMAT_BGR233:
- *depth = 8;
- *bpp = 8;
- break;
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_XBGR1555:
- case DRM_FORMAT_RGBX5551:
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_ABGR1555:
- case DRM_FORMAT_RGBA5551:
- case DRM_FORMAT_BGRA5551:
- *depth = 15;
- *bpp = 16;
- break;
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_BGR565:
- *depth = 16;
- *bpp = 16;
- break;
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_BGR888:
- *depth = 24;
- *bpp = 24;
- break;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_RGBX8888:
- case DRM_FORMAT_BGRX8888:
- *depth = 24;
- *bpp = 32;
- break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_RGBX1010102:
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_RGBA1010102:
- case DRM_FORMAT_BGRA1010102:
- *depth = 30;
- *bpp = 32;
- break;
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_RGBA8888:
- case DRM_FORMAT_BGRA8888:
- *depth = 32;
- *bpp = 32;
- break;
- default:
- format_name = drm_get_format_name(format);
- DRM_DEBUG_KMS("unsupported pixel format %s\n", format_name);
- kfree(format_name);
- *depth = 0;
- *bpp = 0;
- break;
- }
+ const struct drm_format_info *info;
+
+ info = __drm_format_info(format);
+ WARN_ON(!info);
+ return info;
}
-EXPORT_SYMBOL(drm_fb_get_bpp_depth);
+EXPORT_SYMBOL(drm_format_info);
/**
* drm_format_num_planes - get the number of planes for format
@@ -189,28 +211,10 @@ EXPORT_SYMBOL(drm_fb_get_bpp_depth);
*/
int drm_format_num_planes(uint32_t format)
{
- switch (format) {
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 3;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_NV24:
- case DRM_FORMAT_NV42:
- return 2;
- default:
- return 1;
- }
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ return info ? info->num_planes : 1;
}
EXPORT_SYMBOL(drm_format_num_planes);
@@ -224,40 +228,13 @@ EXPORT_SYMBOL(drm_format_num_planes);
*/
int drm_format_plane_cpp(uint32_t format, int plane)
{
- unsigned int depth;
- int bpp;
+ const struct drm_format_info *info;
- if (plane >= drm_format_num_planes(format))
+ info = drm_format_info(format);
+ if (!info || plane >= info->num_planes)
return 0;
- switch (format) {
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- return 2;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_NV24:
- case DRM_FORMAT_NV42:
- return plane ? 2 : 1;
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 1;
- default:
- drm_fb_get_bpp_depth(format, &depth, &bpp);
- return bpp >> 3;
- }
+ return info->cpp[plane];
}
EXPORT_SYMBOL(drm_format_plane_cpp);
@@ -271,28 +248,10 @@ EXPORT_SYMBOL(drm_format_plane_cpp);
*/
int drm_format_horz_chroma_subsampling(uint32_t format)
{
- switch (format) {
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- return 4;
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- return 2;
- default:
- return 1;
- }
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ return info ? info->hsub : 1;
}
EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
@@ -306,18 +265,10 @@ EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
*/
int drm_format_vert_chroma_subsampling(uint32_t format)
{
- switch (format) {
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- return 4;
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- return 2;
- default:
- return 1;
- }
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ return info ? info->vsub : 1;
}
EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
@@ -332,13 +283,16 @@ EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
*/
int drm_format_plane_width(int width, uint32_t format, int plane)
{
- if (plane >= drm_format_num_planes(format))
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ if (!info || plane >= info->num_planes)
return 0;
if (plane == 0)
return width;
- return width / drm_format_horz_chroma_subsampling(format);
+ return width / info->hsub;
}
EXPORT_SYMBOL(drm_format_plane_width);
@@ -353,12 +307,15 @@ EXPORT_SYMBOL(drm_format_plane_width);
*/
int drm_format_plane_height(int height, uint32_t format, int plane)
{
- if (plane >= drm_format_num_planes(format))
+ const struct drm_format_info *info;
+
+ info = drm_format_info(format);
+ if (!info || plane >= info->num_planes)
return 0;
if (plane == 0)
return height;
- return height / drm_format_vert_chroma_subsampling(format);
+ return height / info->vsub;
}
EXPORT_SYMBOL(drm_format_plane_height);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 398efd67cb93..49fd7db758e0 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -126,111 +126,33 @@ int drm_mode_addfb(struct drm_device *dev,
return 0;
}
-static int format_check(const struct drm_mode_fb_cmd2 *r)
-{
- uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
- char *format_name;
-
- switch (format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB332:
- case DRM_FORMAT_BGR233:
- case DRM_FORMAT_XRGB4444:
- case DRM_FORMAT_XBGR4444:
- case DRM_FORMAT_RGBX4444:
- case DRM_FORMAT_BGRX4444:
- case DRM_FORMAT_ARGB4444:
- case DRM_FORMAT_ABGR4444:
- case DRM_FORMAT_RGBA4444:
- case DRM_FORMAT_BGRA4444:
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_XBGR1555:
- case DRM_FORMAT_RGBX5551:
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_ABGR1555:
- case DRM_FORMAT_RGBA5551:
- case DRM_FORMAT_BGRA5551:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_BGR565:
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_BGR888:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_RGBX8888:
- case DRM_FORMAT_BGRX8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_RGBA8888:
- case DRM_FORMAT_BGRA8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_RGBX1010102:
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_RGBA1010102:
- case DRM_FORMAT_BGRA1010102:
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_AYUV:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_NV24:
- case DRM_FORMAT_NV42:
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 0;
- default:
- format_name = drm_get_format_name(r->pixel_format);
- DRM_DEBUG_KMS("invalid pixel format %s\n", format_name);
- kfree(format_name);
- return -EINVAL;
- }
-}
-
static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
{
- int ret, hsub, vsub, num_planes, i;
+ const struct drm_format_info *info;
+ int i;
- ret = format_check(r);
- if (ret) {
+ info = __drm_format_info(r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN);
+ if (!info) {
char *format_name = drm_get_format_name(r->pixel_format);
DRM_DEBUG_KMS("bad framebuffer format %s\n", format_name);
kfree(format_name);
- return ret;
+ return -EINVAL;
}
- hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
- vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
- num_planes = drm_format_num_planes(r->pixel_format);
-
- if (r->width == 0 || r->width % hsub) {
+ if (r->width == 0 || r->width % info->hsub) {
DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
return -EINVAL;
}
- if (r->height == 0 || r->height % vsub) {
+ if (r->height == 0 || r->height % info->vsub) {
DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
return -EINVAL;
}
- for (i = 0; i < num_planes; i++) {
- unsigned int width = r->width / (i != 0 ? hsub : 1);
- unsigned int height = r->height / (i != 0 ? vsub : 1);
- unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = r->width / (i != 0 ? info->hsub : 1);
+ unsigned int height = r->height / (i != 0 ? info->vsub : 1);
+ unsigned int cpp = info->cpp[i];
if (!r->handles[i]) {
DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
@@ -273,7 +195,7 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
}
}
- for (i = num_planes; i < 4; i++) {
+ for (i = info->num_planes; i < 4; i++) {
if (r->modifier[i]) {
DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i);
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index e66af289a016..abd209863ef4 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -100,6 +100,9 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
int drm_debugfs_cleanup(struct drm_minor *minor);
int drm_debugfs_connector_add(struct drm_connector *connector);
void drm_debugfs_connector_remove(struct drm_connector *connector);
+int drm_debugfs_crtc_add(struct drm_crtc *crtc);
+void drm_debugfs_crtc_remove(struct drm_crtc *crtc);
+int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc);
#else
static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root)
@@ -119,4 +122,17 @@ static inline int drm_debugfs_connector_add(struct drm_connector *connector)
static inline void drm_debugfs_connector_remove(struct drm_connector *connector)
{
}
+
+static inline int drm_debugfs_crtc_add(struct drm_crtc *crtc)
+{
+ return 0;
+}
+static inline void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
+{
+}
+
+static inline int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
+{
+ return 0;
+}
#endif
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index b969a64a1514..48a6167f5e7b 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -952,8 +952,10 @@ static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
u32 vblank_count;
unsigned int seq;
- if (WARN_ON(pipe >= dev->num_crtcs))
+ if (WARN_ON(pipe >= dev->num_crtcs)) {
+ *vblanktime = (struct timeval) { 0 };
return 0;
+ }
do {
seq = read_seqbegin(&vblank->seqlock);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 53f07ac7c174..f64ac86deb84 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -165,6 +165,7 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
unsigned int vfieldrate, hperiod;
int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
int interlace;
+ u64 tmp;
/* allocate the drm_display_mode structure. If failure, we will
* return directly
@@ -322,8 +323,11 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
drm_mode->vsync_end = drm_mode->vsync_start + vsync;
}
/* 15/13. Find pixel clock frequency (kHz for xf86) */
- drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
- drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
+ tmp = drm_mode->htotal; /* perform intermediate calcs in u64 */
+ tmp *= HV_FACTOR * 1000;
+ do_div(tmp, hperiod);
+ tmp -= drm_mode->clock % CVT_CLOCK_STEP;
+ drm_mode->clock = tmp;
/* 18/16. Find actual vertical frame frequency */
/* ignore - just set the mode flag for interlaced */
if (interlaced) {
@@ -997,6 +1001,7 @@ bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
mode1->vsync_end == mode2->vsync_end &&
mode1->vtotal == mode2->vtotal &&
mode1->vscan == mode2->vscan &&
+ mode1->picture_aspect_ratio == mode2->picture_aspect_ratio &&
(mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
(mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
return true;
@@ -1499,6 +1504,27 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
out->vrefresh = in->vrefresh;
out->flags = in->flags;
out->type = in->type;
+ out->flags &= ~DRM_MODE_FLAG_PIC_AR_MASK;
+
+ switch (in->picture_aspect_ratio) {
+ case HDMI_PICTURE_ASPECT_4_3:
+ out->flags |= DRM_MODE_FLAG_PIC_AR_4_3;
+ break;
+ case HDMI_PICTURE_ASPECT_16_9:
+ out->flags |= DRM_MODE_FLAG_PIC_AR_16_9;
+ break;
+ case HDMI_PICTURE_ASPECT_64_27:
+ out->flags |= DRM_MODE_FLAG_PIC_AR_64_27;
+ break;
+ case DRM_MODE_PICTURE_ASPECT_256_135:
+ out->flags |= DRM_MODE_FLAG_PIC_AR_256_135;
+ break;
+ case HDMI_PICTURE_ASPECT_RESERVED:
+ default:
+ out->flags |= DRM_MODE_FLAG_PIC_AR_NONE;
+ break;
+ }
+
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
}
@@ -1544,6 +1570,27 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+ /* Clearing picture aspect ratio bits from out flags */
+ out->flags &= ~DRM_MODE_FLAG_PIC_AR_MASK;
+
+ switch (in->flags & DRM_MODE_FLAG_PIC_AR_MASK) {
+ case DRM_MODE_FLAG_PIC_AR_4_3:
+ out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_4_3;
+ break;
+ case DRM_MODE_FLAG_PIC_AR_16_9:
+ out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_16_9;
+ break;
+ case DRM_MODE_FLAG_PIC_AR_64_27:
+ out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_64_27;
+ break;
+ case DRM_MODE_FLAG_PIC_AR_256_135:
+ out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_256_135;
+ break;
+ default:
+ out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+ break;
+ }
+
out->status = drm_mode_validate_basic(out);
if (out->status != MODE_OK)
goto out;
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index 1d45738f8f98..2544dfe7354c 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -70,8 +70,23 @@ EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
+ const struct drm_format_info *info;
int i;
+ info = drm_format_info(mode_cmd->pixel_format);
+ if (!info || !info->depth) {
+ char *format_name = drm_get_format_name(mode_cmd->pixel_format);
+
+ DRM_DEBUG_KMS("non-RGB pixel format %s\n", format_name);
+ kfree(format_name);
+
+ fb->depth = 0;
+ fb->bits_per_pixel = 0;
+ } else {
+ fb->depth = info->depth;
+ fb->bits_per_pixel = info->cpp[0] * 8;
+ }
+
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
for (i = 0; i < 4; i++) {
@@ -79,8 +94,6 @@ void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
fb->offsets[i] = mode_cmd->offsets[i];
fb->modifier[i] = mode_cmd->modifier[i];
}
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
- &fb->bits_per_pixel);
fb->pixel_format = mode_cmd->pixel_format;
fb->flags = mode_cmd->flags;
}
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index bc98bb94264d..47848ed8ca48 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -6,6 +6,11 @@
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
+static void drm_release_of(struct device *dev, void *data)
+{
+ of_node_put(data);
+}
+
/**
* drm_crtc_port_mask - find the mask of a registered CRTC by port OF node
* @dev: DRM device
@@ -64,6 +69,24 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
EXPORT_SYMBOL(drm_of_find_possible_crtcs);
/**
+ * drm_of_component_match_add - Add a component helper OF node match rule
+ * @master: master device
+ * @matchptr: component match pointer
+ * @compare: compare function used for matching component
+ * @node: of_node
+ */
+void drm_of_component_match_add(struct device *master,
+ struct component_match **matchptr,
+ int (*compare)(struct device *, void *),
+ struct device_node *node)
+{
+ of_node_get(node);
+ component_match_add_release(master, matchptr, drm_release_of,
+ compare, node);
+}
+EXPORT_SYMBOL_GPL(drm_of_component_match_add);
+
+/**
* drm_of_component_probe - Generic probe function for a component based master
* @dev: master device containing the OF node
* @compare_of: compare function used for matching components
@@ -101,7 +124,7 @@ int drm_of_component_probe(struct device *dev,
continue;
}
- component_match_add(dev, &match, compare_of, port);
+ drm_of_component_match_add(dev, &match, compare_of, port);
of_node_put(port);
}
@@ -140,7 +163,8 @@ int drm_of_component_probe(struct device *dev,
continue;
}
- component_match_add(dev, &match, compare_of, remote);
+ drm_of_component_match_add(dev, &match, compare_of,
+ remote);
of_node_put(remote);
}
of_node_put(port);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index aa687669e22b..0dee6acbd880 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -16,6 +16,7 @@
#include <linux/component.h>
#include <linux/of_platform.h>
+#include <drm/drm_of.h>
#include "etnaviv_drv.h"
#include "etnaviv_gpu.h"
@@ -629,8 +630,8 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
if (!core_node)
break;
- component_match_add(&pdev->dev, &match, compare_of,
- core_node);
+ drm_of_component_match_add(&pdev->dev, &match,
+ compare_of, core_node);
of_node_put(core_node);
}
} else if (dev->platform_data) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 0370b842d9cc..7d066a91d778 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -409,20 +409,16 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct drm_device *dev = obj->dev;
bool write = !!(op & ETNA_PREP_WRITE);
- int ret;
-
- if (op & ETNA_PREP_NOSYNC) {
- if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
- write))
- return -EBUSY;
- } else {
- unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
+ unsigned long remain =
+ op & ETNA_PREP_NOSYNC ? 0 : etnaviv_timeout_to_jiffies(timeout);
+ long lret;
- ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
- write, true, remain);
- if (ret <= 0)
- return ret == 0 ? -ETIMEDOUT : ret;
- }
+ lret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
+ write, true, remain);
+ if (lret < 0)
+ return lret;
+ else if (lret == 0)
+ return remain == 0 ? -EBUSY : -ETIMEDOUT;
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
if (!etnaviv_obj->sgt) {
@@ -470,10 +466,10 @@ int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
}
#ifdef CONFIG_DEBUG_FS
-static void etnaviv_gem_describe_fence(struct fence *fence,
+static void etnaviv_gem_describe_fence(struct dma_fence *fence,
const char *type, struct seq_file *m)
{
- if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
seq_printf(m, "\t%9s: %s %s seq %u\n",
type,
fence->ops->get_driver_name(fence),
@@ -486,7 +482,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct reservation_object *robj = etnaviv_obj->resv;
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
unsigned long off = drm_vma_node_start(&obj->vma_node);
seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index b1254f885fed..d2211825e5c8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -15,7 +15,7 @@
*/
#include <linux/component.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/moduleparam.h>
#include <linux/of_device.h>
#include "etnaviv_dump.h"
@@ -882,7 +882,7 @@ static void recover_worker(struct work_struct *work)
for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
if (!gpu->event[i].used)
continue;
- fence_signal(gpu->event[i].fence);
+ dma_fence_signal(gpu->event[i].fence);
gpu->event[i].fence = NULL;
gpu->event[i].used = false;
complete(&gpu->event_free);
@@ -952,55 +952,55 @@ static void hangcheck_disable(struct etnaviv_gpu *gpu)
/* fence object management */
struct etnaviv_fence {
struct etnaviv_gpu *gpu;
- struct fence base;
+ struct dma_fence base;
};
-static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence)
+static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
{
return container_of(fence, struct etnaviv_fence, base);
}
-static const char *etnaviv_fence_get_driver_name(struct fence *fence)
+static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
{
return "etnaviv";
}
-static const char *etnaviv_fence_get_timeline_name(struct fence *fence)
+static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
return dev_name(f->gpu->dev);
}
-static bool etnaviv_fence_enable_signaling(struct fence *fence)
+static bool etnaviv_fence_enable_signaling(struct dma_fence *fence)
{
return true;
}
-static bool etnaviv_fence_signaled(struct fence *fence)
+static bool etnaviv_fence_signaled(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
return fence_completed(f->gpu, f->base.seqno);
}
-static void etnaviv_fence_release(struct fence *fence)
+static void etnaviv_fence_release(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
kfree_rcu(f, base.rcu);
}
-static const struct fence_ops etnaviv_fence_ops = {
+static const struct dma_fence_ops etnaviv_fence_ops = {
.get_driver_name = etnaviv_fence_get_driver_name,
.get_timeline_name = etnaviv_fence_get_timeline_name,
.enable_signaling = etnaviv_fence_enable_signaling,
.signaled = etnaviv_fence_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = etnaviv_fence_release,
};
-static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
+static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
{
struct etnaviv_fence *f;
@@ -1010,8 +1010,8 @@ static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
f->gpu = gpu;
- fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
- gpu->fence_context, ++gpu->next_fence);
+ dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
+ gpu->fence_context, ++gpu->next_fence);
return &f->base;
}
@@ -1021,7 +1021,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
{
struct reservation_object *robj = etnaviv_obj->resv;
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
int i, ret;
if (!exclusive) {
@@ -1039,7 +1039,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
/* Wait on any existing exclusive fence which isn't our own */
fence = reservation_object_get_excl(robj);
if (fence && fence->context != context) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -1052,7 +1052,7 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(robj));
if (fence->context != context) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -1158,11 +1158,11 @@ static void retire_worker(struct work_struct *work)
mutex_lock(&gpu->lock);
list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
- if (!fence_is_signaled(cmdbuf->fence))
+ if (!dma_fence_is_signaled(cmdbuf->fence))
break;
list_del(&cmdbuf->node);
- fence_put(cmdbuf->fence);
+ dma_fence_put(cmdbuf->fence);
for (i = 0; i < cmdbuf->nr_bos; i++) {
struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
@@ -1275,7 +1275,7 @@ void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
{
- struct fence *fence;
+ struct dma_fence *fence;
unsigned int event, i;
int ret;
@@ -1391,7 +1391,7 @@ static irqreturn_t irq_handler(int irq, void *data)
}
while ((event = ffs(intr)) != 0) {
- struct fence *fence;
+ struct dma_fence *fence;
event -= 1;
@@ -1401,7 +1401,7 @@ static irqreturn_t irq_handler(int irq, void *data)
fence = gpu->event[event].fence;
gpu->event[event].fence = NULL;
- fence_signal(fence);
+ dma_fence_signal(fence);
/*
* Events can be processed out of order. Eg,
@@ -1553,7 +1553,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
return ret;
gpu->drm = drm;
- gpu->fence_context = fence_context_alloc(1);
+ gpu->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&gpu->fence_spinlock);
INIT_LIST_HEAD(&gpu->active_cmd_list);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 73c278dc3706..8c6b824e9d0a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -89,7 +89,7 @@ struct etnaviv_chip_identity {
struct etnaviv_event {
bool used;
- struct fence *fence;
+ struct dma_fence *fence;
};
struct etnaviv_cmdbuf;
@@ -163,7 +163,7 @@ struct etnaviv_cmdbuf {
/* vram node used if the cmdbuf is mapped through the MMUv2 */
struct drm_mm_node vram_node;
/* fence after which this buffer is to be disposed */
- struct fence *fence;
+ struct dma_fence *fence;
/* target exec state */
u32 exec_state;
/* per GPU in-flight list */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index f86e7c846678..463d6fd56756 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -69,7 +69,7 @@ static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
drm_atomic_helper_cleanup_planes(dev, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
spin_lock(&priv->lock);
priv->pending &= ~commit->crtcs;
@@ -254,6 +254,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
schedule_work(&commit->work);
else
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 3a44e705db53..97daf23f3fef 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -124,7 +124,7 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
psbfb->gtt->offset;
- page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ page_num = vma_pages(vma);
address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -236,22 +236,20 @@ static int psb_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct gtt_range *gt)
{
- u32 bpp, depth;
+ const struct drm_format_info *info;
int ret;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ /*
+ * Reject unknown formats, YUV formats, and formats with more than
+ * 4 bytes per pixel.
+ */
+ info = drm_format_info(mode_cmd->pixel_format);
+ if (!info || !info->depth || info->cpp[0] > 4)
+ return -EINVAL;
if (mode_cmd->pitches[0] & 63)
return -EINVAL;
- switch (bpp) {
- case 8:
- case 16:
- case 24:
- case 32:
- break;
- default:
- return -EINVAL;
- }
+
drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
fb->gtt = gt;
ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
@@ -298,7 +296,6 @@ static struct drm_framebuffer *psb_framebuffer_create
* psbfb_alloc - allocate frame buffer memory
* @dev: the DRM device
* @aligned_size: space needed
- * @force: fall back to GEM buffers if need be
*
* Allocate the frame buffer. In the usual case we get a GTT range that
* is stolen memory backed and life is simple. If there isn't sufficient
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 8f69225ce2b4..76aea2e7fb9d 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -76,6 +76,7 @@ static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
* psb_gtt_insert - put an object into the GTT
* @dev: our DRM device
* @r: our GTT range
+ * @resume: on resume
*
* Take our preallocated GTT range and insert the GEM object into
* the GTT. This is protected via the gtt mutex which the caller
@@ -321,6 +322,7 @@ out:
* @len: length (bytes) of address space required
* @name: resource name
* @backed: resource should be backed by stolen pages
+ * @align: requested alignment
*
* Ask the kernel core to find us a suitable range of addresses
* to use for a GTT mapping.
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 90377a609c98..e88fde18c946 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -24,6 +24,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
#include "kirin_drm_drv.h"
@@ -260,14 +261,13 @@ static struct device_node *kirin_get_remote_node(struct device_node *np)
DRM_ERROR("no valid endpoint node\n");
return ERR_PTR(-ENODEV);
}
- of_node_put(endpoint);
remote = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
if (!remote) {
DRM_ERROR("no valid remote node\n");
return ERR_PTR(-ENODEV);
}
- of_node_put(remote);
if (!of_device_is_available(remote)) {
DRM_ERROR("not available for remote node\n");
@@ -294,7 +294,8 @@ static int kirin_drm_platform_probe(struct platform_device *pdev)
if (IS_ERR(remote))
return PTR_ERR(remote);
- component_match_add(dev, &match, compare_of, remote);
+ drm_of_component_match_add(dev, &match, compare_of, remote);
+ of_node_put(remote);
return component_master_add_with_match(dev, &kirin_drm_ops, match);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 9798d400d817..af8683e0dd54 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1289,7 +1289,8 @@ static void tda998x_audio_shutdown(struct device *dev, void *data)
mutex_unlock(&priv->audio_mutex);
}
-int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
+static int
+tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 7769e469118f..1c1b19ccb92f 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -46,6 +46,31 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
If in doubt, say "N".
+config DRM_I915_CAPTURE_ERROR
+ bool "Enable capturing GPU state following a hang"
+ depends on DRM_I915
+ default y
+ help
+ This option enables capturing the GPU state when a hang is detected.
+ This information is vital for triaging hangs and assists in debugging.
+ Please report any hang to
+ https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
+ for triaging.
+
+ If in doubt, say "Y".
+
+config DRM_I915_COMPRESS_ERROR
+ bool "Compress GPU error state"
+ depends on DRM_I915_CAPTURE_ERROR
+ select ZLIB_DEFLATE
+ default y
+ help
+ This option selects ZLIB_DEFLATE if it isn't already
+ selected and causes any error state captured upon a GPU hang
+ to be compressed using zlib.
+
+ If in doubt, say "Y".
+
config DRM_I915_USERPTR
bool "Always enable userptr support"
depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index a998c2bce70a..612340097f4b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -42,7 +42,6 @@ i915-y += i915_cmd_parser.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
i915_gem_userptr.o \
- i915_gpu_error.o \
i915_trace_points.o \
intel_breadcrumbs.o \
intel_engine_cs.o \
@@ -102,11 +101,15 @@ i915-y += dvo_ch7017.o \
intel_dvo.o \
intel_hdmi.o \
intel_i2c.o \
+ intel_lspcon.o \
intel_lvds.o \
intel_panel.o \
intel_sdvo.o \
intel_tv.o
+# Post-mortem debug and GPU hang state capture
+i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
+
# virtual gpu code
i915-y += i915_vgpu.o
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index d0f21a6ad60d..34ea4776af70 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -1,5 +1,7 @@
GVT_DIR := gvt
-GVT_SOURCE := gvt.o
+GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
+ interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
+ execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
new file mode 100644
index 000000000000..0d41ebc4aea6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Dexuan Cui
+ *
+ * Contributors:
+ * Pei Zhang <pei.zhang@intel.com>
+ * Min He <min.he@intel.com>
+ * Niu Bing <bing.niu@intel.com>
+ * Yulei Zhang <yulei.zhang@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define MB_TO_BYTES(mb) ((mb) << 20ULL)
+#define BYTES_TO_MB(b) ((b) >> 20ULL)
+
+#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
+#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
+#define HOST_FENCE 4
+
+static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ u32 alloc_flag, search_flag;
+ u64 start, end, size;
+ struct drm_mm_node *node;
+ int retried = 0;
+ int ret;
+
+ if (high_gm) {
+ search_flag = DRM_MM_SEARCH_BELOW;
+ alloc_flag = DRM_MM_CREATE_TOP;
+ node = &vgpu->gm.high_gm_node;
+ size = vgpu_hidden_sz(vgpu);
+ start = gvt_hidden_gmadr_base(gvt);
+ end = gvt_hidden_gmadr_end(gvt);
+ } else {
+ search_flag = DRM_MM_SEARCH_DEFAULT;
+ alloc_flag = DRM_MM_CREATE_DEFAULT;
+ node = &vgpu->gm.low_gm_node;
+ size = vgpu_aperture_sz(vgpu);
+ start = gvt_aperture_gmadr_base(gvt);
+ end = gvt_aperture_gmadr_end(gvt);
+ }
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+search_again:
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
+ node, size, 4096, 0,
+ start, end, search_flag,
+ alloc_flag);
+ if (ret) {
+ ret = i915_gem_evict_something(&dev_priv->ggtt.base,
+ size, 4096, 0, start, end, 0);
+ if (ret == 0 && ++retried < 3)
+ goto search_again;
+
+ gvt_err("fail to alloc %s gm space from host, retried %d\n",
+ high_gm ? "high" : "low", retried);
+ }
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return ret;
+}
+
+static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ ret = alloc_gm(vgpu, false);
+ if (ret)
+ return ret;
+
+ ret = alloc_gm(vgpu, true);
+ if (ret)
+ goto out_free_aperture;
+
+ gvt_dbg_core("vgpu%d: alloc low GM start %llx size %llx\n", vgpu->id,
+ vgpu_aperture_offset(vgpu), vgpu_aperture_sz(vgpu));
+
+ gvt_dbg_core("vgpu%d: alloc high GM start %llx size %llx\n", vgpu->id,
+ vgpu_hidden_offset(vgpu), vgpu_hidden_sz(vgpu));
+
+ return 0;
+out_free_aperture:
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ drm_mm_remove_node(&vgpu->gm.low_gm_node);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return ret;
+}
+
+static void free_vgpu_gm(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ drm_mm_remove_node(&vgpu->gm.low_gm_node);
+ drm_mm_remove_node(&vgpu->gm.high_gm_node);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+/**
+ * intel_vgpu_write_fence - write fence registers owned by a vGPU
+ * @vgpu: vGPU instance
+ * @fence: vGPU fence register number
+ * @value: Fence register value to be written
+ *
+ * This function is used to write fence registers owned by a vGPU. The vGPU
+ * fence register number will be translated into HW fence register number.
+ *
+ */
+void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
+ u32 fence, u64 value)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_fence_reg *reg;
+ i915_reg_t fence_reg_lo, fence_reg_hi;
+
+ assert_rpm_wakelock_held(dev_priv);
+
+ if (WARN_ON(fence > vgpu_fence_sz(vgpu)))
+ return;
+
+ reg = vgpu->fence.regs[fence];
+ if (WARN_ON(!reg))
+ return;
+
+ fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
+ fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
+
+ I915_WRITE(fence_reg_lo, 0);
+ POSTING_READ(fence_reg_lo);
+
+ I915_WRITE(fence_reg_hi, upper_32_bits(value));
+ I915_WRITE(fence_reg_lo, lower_32_bits(value));
+ POSTING_READ(fence_reg_lo);
+}
+
+static void free_vgpu_fence(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_fence_reg *reg;
+ u32 i;
+
+ if (WARN_ON(!vgpu_fence_sz(vgpu)))
+ return;
+
+ intel_runtime_pm_get(dev_priv);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
+ reg = vgpu->fence.regs[i];
+ intel_vgpu_write_fence(vgpu, i, 0);
+ list_add_tail(&reg->link,
+ &dev_priv->mm.fence_list);
+ }
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ intel_runtime_pm_put(dev_priv);
+}
+
+static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_fence_reg *reg;
+ int i;
+ struct list_head *pos, *q;
+
+ intel_runtime_pm_get(dev_priv);
+
+ /* Request fences from host */
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i = 0;
+ list_for_each_safe(pos, q, &dev_priv->mm.fence_list) {
+ reg = list_entry(pos, struct drm_i915_fence_reg, link);
+ if (reg->pin_count || reg->vma)
+ continue;
+ list_del(pos);
+ vgpu->fence.regs[i] = reg;
+ intel_vgpu_write_fence(vgpu, i, 0);
+ if (++i == vgpu_fence_sz(vgpu))
+ break;
+ }
+ if (i != vgpu_fence_sz(vgpu))
+ goto out_free_fence;
+
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(dev_priv);
+ return 0;
+out_free_fence:
+ /* Return fences to host, if fail */
+ for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
+ reg = vgpu->fence.regs[i];
+ if (!reg)
+ continue;
+ list_add_tail(&reg->link,
+ &dev_priv->mm.fence_list);
+ }
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(dev_priv);
+ return -ENOSPC;
+}
+
+static void free_resource(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+
+ gvt->gm.vgpu_allocated_low_gm_size -= vgpu_aperture_sz(vgpu);
+ gvt->gm.vgpu_allocated_high_gm_size -= vgpu_hidden_sz(vgpu);
+ gvt->fence.vgpu_allocated_fence_num -= vgpu_fence_sz(vgpu);
+}
+
+static int alloc_resource(struct intel_vgpu *vgpu,
+ struct intel_vgpu_creation_params *param)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ unsigned long request, avail, max, taken;
+ const char *item;
+
+ if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
+ gvt_err("Invalid vGPU creation params\n");
+ return -EINVAL;
+ }
+
+ item = "low GM space";
+ max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
+ taken = gvt->gm.vgpu_allocated_low_gm_size;
+ avail = max - taken;
+ request = MB_TO_BYTES(param->low_gm_sz);
+
+ if (request > avail)
+ goto no_enough_resource;
+
+ vgpu_aperture_sz(vgpu) = request;
+
+ item = "high GM space";
+ max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
+ taken = gvt->gm.vgpu_allocated_high_gm_size;
+ avail = max - taken;
+ request = MB_TO_BYTES(param->high_gm_sz);
+
+ if (request > avail)
+ goto no_enough_resource;
+
+ vgpu_hidden_sz(vgpu) = request;
+
+ item = "fence";
+ max = gvt_fence_sz(gvt) - HOST_FENCE;
+ taken = gvt->fence.vgpu_allocated_fence_num;
+ avail = max - taken;
+ request = param->fence_sz;
+
+ if (request > avail)
+ goto no_enough_resource;
+
+ vgpu_fence_sz(vgpu) = request;
+
+ gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz);
+ gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz);
+ gvt->fence.vgpu_allocated_fence_num += param->fence_sz;
+ return 0;
+
+no_enough_resource:
+ gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
+ gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n",
+ vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail),
+ BYTES_TO_MB(max), BYTES_TO_MB(taken));
+ return -ENOSPC;
+}
+
+/**
+ * inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is used to free the HW resource owned by a vGPU.
+ *
+ */
+void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
+{
+ free_vgpu_gm(vgpu);
+ free_vgpu_fence(vgpu);
+ free_resource(vgpu);
+}
+
+/**
+ * intel_alloc_vgpu_resource - allocate HW resource for a vGPU
+ * @vgpu: vGPU
+ * @param: vGPU creation params
+ *
+ * This function is used to allocate HW resource for a vGPU. User specifies
+ * the resource configuration through the creation params.
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
+ struct intel_vgpu_creation_params *param)
+{
+ int ret;
+
+ ret = alloc_resource(vgpu, param);
+ if (ret)
+ return ret;
+
+ ret = alloc_vgpu_gm(vgpu);
+ if (ret)
+ goto out_free_resource;
+
+ ret = alloc_vgpu_fence(vgpu);
+ if (ret)
+ goto out_free_vgpu_gm;
+
+ return 0;
+
+out_free_vgpu_gm:
+ free_vgpu_gm(vgpu);
+out_free_resource:
+ free_resource(vgpu);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
new file mode 100644
index 000000000000..4c687740f5f1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+enum {
+ INTEL_GVT_PCI_BAR_GTTMMIO = 0,
+ INTEL_GVT_PCI_BAR_APERTURE,
+ INTEL_GVT_PCI_BAR_PIO,
+ INTEL_GVT_PCI_BAR_MAX,
+};
+
+/**
+ * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_vgpu *vgpu = __vgpu;
+
+ if (WARN_ON(bytes > 4))
+ return -EINVAL;
+
+ if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
+ return -EINVAL;
+
+ memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
+ return 0;
+}
+
+static int map_aperture(struct intel_vgpu *vgpu, bool map)
+{
+ u64 first_gfn, first_mfn;
+ u64 val;
+ int ret;
+
+ if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
+ return 0;
+
+ val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
+ if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
+ else
+ val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
+
+ first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
+ first_mfn = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
+
+ ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
+ first_mfn,
+ vgpu_aperture_sz(vgpu)
+ >> PAGE_SHIFT, map,
+ GVT_MAP_APERTURE);
+ if (ret)
+ return ret;
+
+ vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
+ return 0;
+}
+
+static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
+{
+ u64 start, end;
+ u64 val;
+ int ret;
+
+ if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
+ return 0;
+
+ val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
+ if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
+ else
+ start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
+
+ start &= ~GENMASK(3, 0);
+ end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
+
+ ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
+ if (ret)
+ return ret;
+
+ vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
+ return 0;
+}
+
+static int emulate_pci_command_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u8 old = vgpu_cfg_space(vgpu)[offset];
+ u8 new = *(u8 *)p_data;
+ u8 changed = old ^ new;
+ int ret;
+
+ if (!(changed & PCI_COMMAND_MEMORY))
+ return 0;
+
+ if (old & PCI_COMMAND_MEMORY) {
+ ret = trap_gttmmio(vgpu, false);
+ if (ret)
+ return ret;
+ ret = map_aperture(vgpu, false);
+ if (ret)
+ return ret;
+ } else {
+ ret = trap_gttmmio(vgpu, true);
+ if (ret)
+ return ret;
+ ret = map_aperture(vgpu, true);
+ if (ret)
+ return ret;
+ }
+
+ memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ return 0;
+}
+
+static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ unsigned int bar_index =
+ (rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8;
+ u32 new = *(u32 *)(p_data);
+ bool lo = IS_ALIGNED(offset, 8);
+ u64 size;
+ int ret = 0;
+ bool mmio_enabled =
+ vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
+
+ if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX))
+ return -EINVAL;
+
+ if (new == 0xffffffff) {
+ /*
+ * Power-up software can determine how much address
+ * space the device requires by writing a value of
+ * all 1's to the register and then reading the value
+ * back. The device will return 0's in all don't-care
+ * address bits.
+ */
+ size = vgpu->cfg_space.bar[bar_index].size;
+ if (lo) {
+ new = rounddown(new, size);
+ } else {
+ u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)];
+ /* for 32bit mode bar it returns all-0 in upper 32
+ * bit, for 64bit mode bar it will calculate the
+ * size with lower 32bit and return the corresponding
+ * value
+ */
+ if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ new &= (~(size-1)) >> 32;
+ else
+ new = 0;
+ }
+ /*
+ * Unmapp & untrap the BAR, since guest hasn't configured a
+ * valid GPA
+ */
+ switch (bar_index) {
+ case INTEL_GVT_PCI_BAR_GTTMMIO:
+ ret = trap_gttmmio(vgpu, false);
+ break;
+ case INTEL_GVT_PCI_BAR_APERTURE:
+ ret = map_aperture(vgpu, false);
+ break;
+ }
+ intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+ } else {
+ /*
+ * Unmapp & untrap the old BAR first, since guest has
+ * re-configured the BAR
+ */
+ switch (bar_index) {
+ case INTEL_GVT_PCI_BAR_GTTMMIO:
+ ret = trap_gttmmio(vgpu, false);
+ break;
+ case INTEL_GVT_PCI_BAR_APERTURE:
+ ret = map_aperture(vgpu, false);
+ break;
+ }
+ intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+ /* Track the new BAR */
+ if (mmio_enabled) {
+ switch (bar_index) {
+ case INTEL_GVT_PCI_BAR_GTTMMIO:
+ ret = trap_gttmmio(vgpu, true);
+ break;
+ case INTEL_GVT_PCI_BAR_APERTURE:
+ ret = map_aperture(vgpu, true);
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+/**
+ * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_vgpu *vgpu = __vgpu;
+ int ret;
+
+ if (WARN_ON(bytes > 4))
+ return -EINVAL;
+
+ if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ))
+ return -EINVAL;
+
+ /* First check if it's PCI_COMMAND */
+ if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
+ if (WARN_ON(bytes > 2))
+ return -EINVAL;
+ return emulate_pci_command_write(vgpu, offset, p_data, bytes);
+ }
+
+ switch (rounddown(offset, 4)) {
+ case PCI_BASE_ADDRESS_0:
+ case PCI_BASE_ADDRESS_1:
+ case PCI_BASE_ADDRESS_2:
+ case PCI_BASE_ADDRESS_3:
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+ return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
+
+ case INTEL_GVT_PCI_SWSCI:
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+ ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
+ if (ret)
+ return ret;
+ break;
+
+ case INTEL_GVT_PCI_OPREGION:
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+ ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data);
+ if (ret)
+ return ret;
+
+ memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ break;
+ default:
+ memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ break;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
new file mode 100644
index 000000000000..aafb57e26288
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -0,0 +1,2831 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Kevin Tian <kevin.tian@intel.com>
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Yulei Zhang <yulei.zhang@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include <linux/slab.h>
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+#include "trace.h"
+
+#define INVALID_OP (~0U)
+
+#define OP_LEN_MI 9
+#define OP_LEN_2D 10
+#define OP_LEN_3D_MEDIA 16
+#define OP_LEN_MFX_VC 16
+#define OP_LEN_VEBOX 16
+
+#define CMD_TYPE(cmd) (((cmd) >> 29) & 7)
+
+struct sub_op_bits {
+ int hi;
+ int low;
+};
+struct decode_info {
+ char *name;
+ int op_len;
+ int nr_sub_op;
+ struct sub_op_bits *sub_op;
+};
+
+#define MAX_CMD_BUDGET 0x7fffffff
+#define MI_WAIT_FOR_PLANE_C_FLIP_PENDING (1<<15)
+#define MI_WAIT_FOR_PLANE_B_FLIP_PENDING (1<<9)
+#define MI_WAIT_FOR_PLANE_A_FLIP_PENDING (1<<1)
+
+#define MI_WAIT_FOR_SPRITE_C_FLIP_PENDING (1<<20)
+#define MI_WAIT_FOR_SPRITE_B_FLIP_PENDING (1<<10)
+#define MI_WAIT_FOR_SPRITE_A_FLIP_PENDING (1<<2)
+
+/* Render Command Map */
+
+/* MI_* command Opcode (28:23) */
+#define OP_MI_NOOP 0x0
+#define OP_MI_SET_PREDICATE 0x1 /* HSW+ */
+#define OP_MI_USER_INTERRUPT 0x2
+#define OP_MI_WAIT_FOR_EVENT 0x3
+#define OP_MI_FLUSH 0x4
+#define OP_MI_ARB_CHECK 0x5
+#define OP_MI_RS_CONTROL 0x6 /* HSW+ */
+#define OP_MI_REPORT_HEAD 0x7
+#define OP_MI_ARB_ON_OFF 0x8
+#define OP_MI_URB_ATOMIC_ALLOC 0x9 /* HSW+ */
+#define OP_MI_BATCH_BUFFER_END 0xA
+#define OP_MI_SUSPEND_FLUSH 0xB
+#define OP_MI_PREDICATE 0xC /* IVB+ */
+#define OP_MI_TOPOLOGY_FILTER 0xD /* IVB+ */
+#define OP_MI_SET_APPID 0xE /* IVB+ */
+#define OP_MI_RS_CONTEXT 0xF /* HSW+ */
+#define OP_MI_LOAD_SCAN_LINES_INCL 0x12 /* HSW+ */
+#define OP_MI_DISPLAY_FLIP 0x14
+#define OP_MI_SEMAPHORE_MBOX 0x16
+#define OP_MI_SET_CONTEXT 0x18
+#define OP_MI_MATH 0x1A
+#define OP_MI_URB_CLEAR 0x19
+#define OP_MI_SEMAPHORE_SIGNAL 0x1B /* BDW+ */
+#define OP_MI_SEMAPHORE_WAIT 0x1C /* BDW+ */
+
+#define OP_MI_STORE_DATA_IMM 0x20
+#define OP_MI_STORE_DATA_INDEX 0x21
+#define OP_MI_LOAD_REGISTER_IMM 0x22
+#define OP_MI_UPDATE_GTT 0x23
+#define OP_MI_STORE_REGISTER_MEM 0x24
+#define OP_MI_FLUSH_DW 0x26
+#define OP_MI_CLFLUSH 0x27
+#define OP_MI_REPORT_PERF_COUNT 0x28
+#define OP_MI_LOAD_REGISTER_MEM 0x29 /* HSW+ */
+#define OP_MI_LOAD_REGISTER_REG 0x2A /* HSW+ */
+#define OP_MI_RS_STORE_DATA_IMM 0x2B /* HSW+ */
+#define OP_MI_LOAD_URB_MEM 0x2C /* HSW+ */
+#define OP_MI_STORE_URM_MEM 0x2D /* HSW+ */
+#define OP_MI_2E 0x2E /* BDW+ */
+#define OP_MI_2F 0x2F /* BDW+ */
+#define OP_MI_BATCH_BUFFER_START 0x31
+
+/* Bit definition for dword 0 */
+#define _CMDBIT_BB_START_IN_PPGTT (1UL << 8)
+
+#define OP_MI_CONDITIONAL_BATCH_BUFFER_END 0x36
+
+#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
+#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
+#define BATCH_BUFFER_ADR_SPACE_BIT(x) (((x) >> 8) & 1U)
+#define BATCH_BUFFER_2ND_LEVEL_BIT(x) ((x) >> 22 & 1U)
+
+/* 2D command: Opcode (28:22) */
+#define OP_2D(x) ((2<<7) | x)
+
+#define OP_XY_SETUP_BLT OP_2D(0x1)
+#define OP_XY_SETUP_CLIP_BLT OP_2D(0x3)
+#define OP_XY_SETUP_MONO_PATTERN_SL_BLT OP_2D(0x11)
+#define OP_XY_PIXEL_BLT OP_2D(0x24)
+#define OP_XY_SCANLINES_BLT OP_2D(0x25)
+#define OP_XY_TEXT_BLT OP_2D(0x26)
+#define OP_XY_TEXT_IMMEDIATE_BLT OP_2D(0x31)
+#define OP_XY_COLOR_BLT OP_2D(0x50)
+#define OP_XY_PAT_BLT OP_2D(0x51)
+#define OP_XY_MONO_PAT_BLT OP_2D(0x52)
+#define OP_XY_SRC_COPY_BLT OP_2D(0x53)
+#define OP_XY_MONO_SRC_COPY_BLT OP_2D(0x54)
+#define OP_XY_FULL_BLT OP_2D(0x55)
+#define OP_XY_FULL_MONO_SRC_BLT OP_2D(0x56)
+#define OP_XY_FULL_MONO_PATTERN_BLT OP_2D(0x57)
+#define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT OP_2D(0x58)
+#define OP_XY_MONO_PAT_FIXED_BLT OP_2D(0x59)
+#define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT OP_2D(0x71)
+#define OP_XY_PAT_BLT_IMMEDIATE OP_2D(0x72)
+#define OP_XY_SRC_COPY_CHROMA_BLT OP_2D(0x73)
+#define OP_XY_FULL_IMMEDIATE_PATTERN_BLT OP_2D(0x74)
+#define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT OP_2D(0x75)
+#define OP_XY_PAT_CHROMA_BLT OP_2D(0x76)
+#define OP_XY_PAT_CHROMA_BLT_IMMEDIATE OP_2D(0x77)
+
+/* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
+#define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
+ ((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
+
+#define OP_STATE_PREFETCH OP_3D_MEDIA(0x0, 0x0, 0x03)
+
+#define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01)
+#define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02)
+#define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04)
+
+#define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B)
+
+#define OP_PIPELINE_SELECT OP_3D_MEDIA(0x1, 0x1, 0x04)
+
+#define OP_MEDIA_VFE_STATE OP_3D_MEDIA(0x2, 0x0, 0x0)
+#define OP_MEDIA_CURBE_LOAD OP_3D_MEDIA(0x2, 0x0, 0x1)
+#define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD OP_3D_MEDIA(0x2, 0x0, 0x2)
+#define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3)
+#define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4)
+
+#define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0)
+#define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2)
+#define OP_MEDIA_OBJECT_WALKER OP_3D_MEDIA(0x2, 0x1, 0x3)
+#define OP_GPGPU_WALKER OP_3D_MEDIA(0x2, 0x1, 0x5)
+
+#define OP_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
+#define OP_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
+#define OP_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
+#define OP_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
+#define OP_3DSTATE_VERTEX_BUFFERS OP_3D_MEDIA(0x3, 0x0, 0x08)
+#define OP_3DSTATE_VERTEX_ELEMENTS OP_3D_MEDIA(0x3, 0x0, 0x09)
+#define OP_3DSTATE_INDEX_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x0A)
+#define OP_3DSTATE_VF_STATISTICS OP_3D_MEDIA(0x3, 0x0, 0x0B)
+#define OP_3DSTATE_VF OP_3D_MEDIA(0x3, 0x0, 0x0C) /* HSW+ */
+#define OP_3DSTATE_CC_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0E)
+#define OP_3DSTATE_SCISSOR_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0F)
+#define OP_3DSTATE_VS OP_3D_MEDIA(0x3, 0x0, 0x10)
+#define OP_3DSTATE_GS OP_3D_MEDIA(0x3, 0x0, 0x11)
+#define OP_3DSTATE_CLIP OP_3D_MEDIA(0x3, 0x0, 0x12)
+#define OP_3DSTATE_SF OP_3D_MEDIA(0x3, 0x0, 0x13)
+#define OP_3DSTATE_WM OP_3D_MEDIA(0x3, 0x0, 0x14)
+#define OP_3DSTATE_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x15)
+#define OP_3DSTATE_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x16)
+#define OP_3DSTATE_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x17)
+#define OP_3DSTATE_SAMPLE_MASK OP_3D_MEDIA(0x3, 0x0, 0x18)
+#define OP_3DSTATE_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
+#define OP_3DSTATE_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
+#define OP_3DSTATE_HS OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
+#define OP_3DSTATE_TE OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
+#define OP_3DSTATE_DS OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
+#define OP_3DSTATE_STREAMOUT OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
+#define OP_3DSTATE_SBE OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
+#define OP_3DSTATE_PS OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
+#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
+#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
+#define OP_3DSTATE_BLEND_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
+#define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
+#define OP_3DSTATE_BINDING_TABLE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
+#define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
+#define OP_3DSTATE_URB_VS OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
+#define OP_3DSTATE_URB_HS OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
+#define OP_3DSTATE_URB_DS OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
+#define OP_3DSTATE_URB_GS OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
+#define OP_3DSTATE_GATHER_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
+#define OP_3DSTATE_GATHER_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTF_VS OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTF_PS OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTI_VS OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTI_PS OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTB_VS OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANTB_PS OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
+#define OP_3DSTATE_DX9_LOCAL_VALID_VS OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
+#define OP_3DSTATE_DX9_LOCAL_VALID_PS OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
+#define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
+#define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_VS OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_GS OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_HS OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_DS OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
+#define OP_3DSTATE_BINDING_TABLE_EDIT_PS OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
+
+#define OP_3DSTATE_VF_INSTANCING OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
+#define OP_3DSTATE_VF_SGVS OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
+#define OP_3DSTATE_VF_TOPOLOGY OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
+#define OP_3DSTATE_WM_CHROMAKEY OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
+#define OP_3DSTATE_PS_BLEND OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
+#define OP_3DSTATE_WM_DEPTH_STENCIL OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
+#define OP_3DSTATE_PS_EXTRA OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
+#define OP_3DSTATE_RASTER OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
+#define OP_3DSTATE_SBE_SWIZ OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
+#define OP_3DSTATE_WM_HZ_OP OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
+#define OP_3DSTATE_COMPONENT_PACKING OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
+
+#define OP_3DSTATE_DRAWING_RECTANGLE OP_3D_MEDIA(0x3, 0x1, 0x00)
+#define OP_3DSTATE_SAMPLER_PALETTE_LOAD0 OP_3D_MEDIA(0x3, 0x1, 0x02)
+#define OP_3DSTATE_CHROMA_KEY OP_3D_MEDIA(0x3, 0x1, 0x04)
+#define OP_SNB_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x05)
+#define OP_3DSTATE_POLY_STIPPLE_OFFSET OP_3D_MEDIA(0x3, 0x1, 0x06)
+#define OP_3DSTATE_POLY_STIPPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x07)
+#define OP_3DSTATE_LINE_STIPPLE OP_3D_MEDIA(0x3, 0x1, 0x08)
+#define OP_3DSTATE_AA_LINE_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x0A)
+#define OP_3DSTATE_GS_SVB_INDEX OP_3D_MEDIA(0x3, 0x1, 0x0B)
+#define OP_3DSTATE_SAMPLER_PALETTE_LOAD1 OP_3D_MEDIA(0x3, 0x1, 0x0C)
+#define OP_3DSTATE_MULTISAMPLE_BDW OP_3D_MEDIA(0x3, 0x0, 0x0D)
+#define OP_SNB_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0E)
+#define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0F)
+#define OP_SNB_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x10)
+#define OP_3DSTATE_MONOFILTER_SIZE OP_3D_MEDIA(0x3, 0x1, 0x11)
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
+#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
+#define OP_3DSTATE_SO_DECL_LIST OP_3D_MEDIA(0x3, 0x1, 0x17)
+#define OP_3DSTATE_SO_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x18)
+#define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
+#define OP_3DSTATE_GATHER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
+#define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
+#define OP_3DSTATE_SAMPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x1C)
+#define OP_PIPE_CONTROL OP_3D_MEDIA(0x3, 0x2, 0x00)
+#define OP_3DPRIMITIVE OP_3D_MEDIA(0x3, 0x3, 0x00)
+
+/* VCCP Command Parser */
+
+/*
+ * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
+ * git://anongit.freedesktop.org/vaapi/intel-driver
+ * src/i965_defines.h
+ *
+ */
+
+#define OP_MFX(pipeline, op, sub_opa, sub_opb) \
+ (3 << 13 | \
+ (pipeline) << 11 | \
+ (op) << 8 | \
+ (sub_opa) << 5 | \
+ (sub_opb))
+
+#define OP_MFX_PIPE_MODE_SELECT OP_MFX(2, 0, 0, 0) /* ALL */
+#define OP_MFX_SURFACE_STATE OP_MFX(2, 0, 0, 1) /* ALL */
+#define OP_MFX_PIPE_BUF_ADDR_STATE OP_MFX(2, 0, 0, 2) /* ALL */
+#define OP_MFX_IND_OBJ_BASE_ADDR_STATE OP_MFX(2, 0, 0, 3) /* ALL */
+#define OP_MFX_BSP_BUF_BASE_ADDR_STATE OP_MFX(2, 0, 0, 4) /* ALL */
+#define OP_2_0_0_5 OP_MFX(2, 0, 0, 5) /* ALL */
+#define OP_MFX_STATE_POINTER OP_MFX(2, 0, 0, 6) /* ALL */
+#define OP_MFX_QM_STATE OP_MFX(2, 0, 0, 7) /* IVB+ */
+#define OP_MFX_FQM_STATE OP_MFX(2, 0, 0, 8) /* IVB+ */
+#define OP_MFX_PAK_INSERT_OBJECT OP_MFX(2, 0, 2, 8) /* IVB+ */
+#define OP_MFX_STITCH_OBJECT OP_MFX(2, 0, 2, 0xA) /* IVB+ */
+
+#define OP_MFD_IT_OBJECT OP_MFX(2, 0, 1, 9) /* ALL */
+
+#define OP_MFX_WAIT OP_MFX(1, 0, 0, 0) /* IVB+ */
+#define OP_MFX_AVC_IMG_STATE OP_MFX(2, 1, 0, 0) /* ALL */
+#define OP_MFX_AVC_QM_STATE OP_MFX(2, 1, 0, 1) /* ALL */
+#define OP_MFX_AVC_DIRECTMODE_STATE OP_MFX(2, 1, 0, 2) /* ALL */
+#define OP_MFX_AVC_SLICE_STATE OP_MFX(2, 1, 0, 3) /* ALL */
+#define OP_MFX_AVC_REF_IDX_STATE OP_MFX(2, 1, 0, 4) /* ALL */
+#define OP_MFX_AVC_WEIGHTOFFSET_STATE OP_MFX(2, 1, 0, 5) /* ALL */
+#define OP_MFD_AVC_PICID_STATE OP_MFX(2, 1, 1, 5) /* HSW+ */
+#define OP_MFD_AVC_DPB_STATE OP_MFX(2, 1, 1, 6) /* IVB+ */
+#define OP_MFD_AVC_SLICEADDR OP_MFX(2, 1, 1, 7) /* IVB+ */
+#define OP_MFD_AVC_BSD_OBJECT OP_MFX(2, 1, 1, 8) /* ALL */
+#define OP_MFC_AVC_PAK_OBJECT OP_MFX(2, 1, 2, 9) /* ALL */
+
+#define OP_MFX_VC1_PRED_PIPE_STATE OP_MFX(2, 2, 0, 1) /* ALL */
+#define OP_MFX_VC1_DIRECTMODE_STATE OP_MFX(2, 2, 0, 2) /* ALL */
+#define OP_MFD_VC1_SHORT_PIC_STATE OP_MFX(2, 2, 1, 0) /* IVB+ */
+#define OP_MFD_VC1_LONG_PIC_STATE OP_MFX(2, 2, 1, 1) /* IVB+ */
+#define OP_MFD_VC1_BSD_OBJECT OP_MFX(2, 2, 1, 8) /* ALL */
+
+#define OP_MFX_MPEG2_PIC_STATE OP_MFX(2, 3, 0, 0) /* ALL */
+#define OP_MFX_MPEG2_QM_STATE OP_MFX(2, 3, 0, 1) /* ALL */
+#define OP_MFD_MPEG2_BSD_OBJECT OP_MFX(2, 3, 1, 8) /* ALL */
+#define OP_MFC_MPEG2_SLICEGROUP_STATE OP_MFX(2, 3, 2, 3) /* ALL */
+#define OP_MFC_MPEG2_PAK_OBJECT OP_MFX(2, 3, 2, 9) /* ALL */
+
+#define OP_MFX_2_6_0_0 OP_MFX(2, 6, 0, 0) /* IVB+ */
+#define OP_MFX_2_6_0_8 OP_MFX(2, 6, 0, 8) /* IVB+ */
+#define OP_MFX_2_6_0_9 OP_MFX(2, 6, 0, 9) /* IVB+ */
+
+#define OP_MFX_JPEG_PIC_STATE OP_MFX(2, 7, 0, 0)
+#define OP_MFX_JPEG_HUFF_TABLE_STATE OP_MFX(2, 7, 0, 2)
+#define OP_MFD_JPEG_BSD_OBJECT OP_MFX(2, 7, 1, 8)
+
+#define OP_VEB(pipeline, op, sub_opa, sub_opb) \
+ (3 << 13 | \
+ (pipeline) << 11 | \
+ (op) << 8 | \
+ (sub_opa) << 5 | \
+ (sub_opb))
+
+#define OP_VEB_SURFACE_STATE OP_VEB(2, 4, 0, 0)
+#define OP_VEB_STATE OP_VEB(2, 4, 0, 2)
+#define OP_VEB_DNDI_IECP_STATE OP_VEB(2, 4, 0, 3)
+
+struct parser_exec_state;
+
+typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
+
+#define GVT_CMD_HASH_BITS 7
+
+/* which DWords need address fix */
+#define ADDR_FIX_1(x1) (1 << (x1))
+#define ADDR_FIX_2(x1, x2) (ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
+#define ADDR_FIX_3(x1, x2, x3) (ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
+#define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
+#define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
+
+struct cmd_info {
+ char *name;
+ u32 opcode;
+
+#define F_LEN_MASK (1U<<0)
+#define F_LEN_CONST 1U
+#define F_LEN_VAR 0U
+
+/*
+ * command has its own ip advance logic
+ * e.g. MI_BATCH_START, MI_BATCH_END
+ */
+#define F_IP_ADVANCE_CUSTOM (1<<1)
+
+#define F_POST_HANDLE (1<<2)
+ u32 flag;
+
+#define R_RCS (1 << RCS)
+#define R_VCS1 (1 << VCS)
+#define R_VCS2 (1 << VCS2)
+#define R_VCS (R_VCS1 | R_VCS2)
+#define R_BCS (1 << BCS)
+#define R_VECS (1 << VECS)
+#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
+ /* rings that support this cmd: BLT/RCS/VCS/VECS */
+ uint16_t rings;
+
+ /* devices that support this cmd: SNB/IVB/HSW/... */
+ uint16_t devices;
+
+ /* which DWords are address that need fix up.
+ * bit 0 means a 32-bit non address operand in command
+ * bit 1 means address operand, which could be 32-bit
+ * or 64-bit depending on different architectures.(
+ * defined by "gmadr_bytes_in_cmd" in intel_gvt.
+ * No matter the address length, each address only takes
+ * one bit in the bitmap.
+ */
+ uint16_t addr_bitmap;
+
+ /* flag == F_LEN_CONST : command length
+ * flag == F_LEN_VAR : length bias bits
+ * Note: length is in DWord
+ */
+ uint8_t len;
+
+ parser_cmd_handler handler;
+};
+
+struct cmd_entry {
+ struct hlist_node hlist;
+ struct cmd_info *info;
+};
+
+enum {
+ RING_BUFFER_INSTRUCTION,
+ BATCH_BUFFER_INSTRUCTION,
+ BATCH_BUFFER_2ND_LEVEL,
+};
+
+enum {
+ GTT_BUFFER,
+ PPGTT_BUFFER
+};
+
+struct parser_exec_state {
+ struct intel_vgpu *vgpu;
+ int ring_id;
+
+ int buf_type;
+
+ /* batch buffer address type */
+ int buf_addr_type;
+
+ /* graphics memory address of ring buffer start */
+ unsigned long ring_start;
+ unsigned long ring_size;
+ unsigned long ring_head;
+ unsigned long ring_tail;
+
+ /* instruction graphics memory address */
+ unsigned long ip_gma;
+
+ /* mapped va of the instr_gma */
+ void *ip_va;
+ void *rb_va;
+
+ void *ret_bb_va;
+ /* next instruction when return from batch buffer to ring buffer */
+ unsigned long ret_ip_gma_ring;
+
+ /* next instruction when return from 2nd batch buffer to batch buffer */
+ unsigned long ret_ip_gma_bb;
+
+ /* batch buffer address type (GTT or PPGTT)
+ * used when ret from 2nd level batch buffer
+ */
+ int saved_buf_addr_type;
+
+ struct cmd_info *info;
+
+ struct intel_vgpu_workload *workload;
+};
+
+#define gmadr_dw_number(s) \
+ (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
+
+static unsigned long bypass_scan_mask = 0;
+static bool bypass_batch_buffer_scan = true;
+
+/* ring ALL, type = 0 */
+static struct sub_op_bits sub_op_mi[] = {
+ {31, 29},
+ {28, 23},
+};
+
+static struct decode_info decode_info_mi = {
+ "MI",
+ OP_LEN_MI,
+ ARRAY_SIZE(sub_op_mi),
+ sub_op_mi,
+};
+
+/* ring RCS, command type 2 */
+static struct sub_op_bits sub_op_2d[] = {
+ {31, 29},
+ {28, 22},
+};
+
+static struct decode_info decode_info_2d = {
+ "2D",
+ OP_LEN_2D,
+ ARRAY_SIZE(sub_op_2d),
+ sub_op_2d,
+};
+
+/* ring RCS, command type 3 */
+static struct sub_op_bits sub_op_3d_media[] = {
+ {31, 29},
+ {28, 27},
+ {26, 24},
+ {23, 16},
+};
+
+static struct decode_info decode_info_3d_media = {
+ "3D_Media",
+ OP_LEN_3D_MEDIA,
+ ARRAY_SIZE(sub_op_3d_media),
+ sub_op_3d_media,
+};
+
+/* ring VCS, command type 3 */
+static struct sub_op_bits sub_op_mfx_vc[] = {
+ {31, 29},
+ {28, 27},
+ {26, 24},
+ {23, 21},
+ {20, 16},
+};
+
+static struct decode_info decode_info_mfx_vc = {
+ "MFX_VC",
+ OP_LEN_MFX_VC,
+ ARRAY_SIZE(sub_op_mfx_vc),
+ sub_op_mfx_vc,
+};
+
+/* ring VECS, command type 3 */
+static struct sub_op_bits sub_op_vebox[] = {
+ {31, 29},
+ {28, 27},
+ {26, 24},
+ {23, 21},
+ {20, 16},
+};
+
+static struct decode_info decode_info_vebox = {
+ "VEBOX",
+ OP_LEN_VEBOX,
+ ARRAY_SIZE(sub_op_vebox),
+ sub_op_vebox,
+};
+
+static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
+ [RCS] = {
+ &decode_info_mi,
+ NULL,
+ NULL,
+ &decode_info_3d_media,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+
+ [VCS] = {
+ &decode_info_mi,
+ NULL,
+ NULL,
+ &decode_info_mfx_vc,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+
+ [BCS] = {
+ &decode_info_mi,
+ NULL,
+ &decode_info_2d,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+
+ [VECS] = {
+ &decode_info_mi,
+ NULL,
+ NULL,
+ &decode_info_vebox,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+
+ [VCS2] = {
+ &decode_info_mi,
+ NULL,
+ NULL,
+ &decode_info_mfx_vc,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ },
+};
+
+static inline u32 get_opcode(u32 cmd, int ring_id)
+{
+ struct decode_info *d_info;
+
+ if (ring_id >= I915_NUM_ENGINES)
+ return INVALID_OP;
+
+ d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ if (d_info == NULL)
+ return INVALID_OP;
+
+ return cmd >> (32 - d_info->op_len);
+}
+
+static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
+ unsigned int opcode, int ring_id)
+{
+ struct cmd_entry *e;
+
+ hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
+ if ((opcode == e->info->opcode) &&
+ (e->info->rings & (1 << ring_id)))
+ return e->info;
+ }
+ return NULL;
+}
+
+static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
+ u32 cmd, int ring_id)
+{
+ u32 opcode;
+
+ opcode = get_opcode(cmd, ring_id);
+ if (opcode == INVALID_OP)
+ return NULL;
+
+ return find_cmd_entry(gvt, opcode, ring_id);
+}
+
+static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
+{
+ return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
+}
+
+static inline void print_opcode(u32 cmd, int ring_id)
+{
+ struct decode_info *d_info;
+ int i;
+
+ if (ring_id >= I915_NUM_ENGINES)
+ return;
+
+ d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ if (d_info == NULL)
+ return;
+
+ gvt_err("opcode=0x%x %s sub_ops:",
+ cmd >> (32 - d_info->op_len), d_info->name);
+
+ for (i = 0; i < d_info->nr_sub_op; i++)
+ pr_err("0x%x ", sub_op_val(cmd, d_info->sub_op[i].hi,
+ d_info->sub_op[i].low));
+
+ pr_err("\n");
+}
+
+static inline u32 *cmd_ptr(struct parser_exec_state *s, int index)
+{
+ return s->ip_va + (index << 2);
+}
+
+static inline u32 cmd_val(struct parser_exec_state *s, int index)
+{
+ return *cmd_ptr(s, index);
+}
+
+static void parser_exec_state_dump(struct parser_exec_state *s)
+{
+ int cnt = 0;
+ int i;
+
+ gvt_err(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
+ " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
+ s->ring_id, s->ring_start, s->ring_start + s->ring_size,
+ s->ring_head, s->ring_tail);
+
+ gvt_err(" %s %s ip_gma(%08lx) ",
+ s->buf_type == RING_BUFFER_INSTRUCTION ?
+ "RING_BUFFER" : "BATCH_BUFFER",
+ s->buf_addr_type == GTT_BUFFER ?
+ "GTT" : "PPGTT", s->ip_gma);
+
+ if (s->ip_va == NULL) {
+ gvt_err(" ip_va(NULL)");
+ return;
+ }
+
+ gvt_err(" ip_va=%p: %08x %08x %08x %08x\n",
+ s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
+ cmd_val(s, 2), cmd_val(s, 3));
+
+ print_opcode(cmd_val(s, 0), s->ring_id);
+
+ /* print the whole page to trace */
+ pr_err(" ip_va=%p: %08x %08x %08x %08x\n",
+ s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
+ cmd_val(s, 2), cmd_val(s, 3));
+
+ s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
+
+ while (cnt < 1024) {
+ pr_err("ip_va=%p: ", s->ip_va);
+ for (i = 0; i < 8; i++)
+ pr_err("%08x ", cmd_val(s, i));
+ pr_err("\n");
+
+ s->ip_va += 8 * sizeof(u32);
+ cnt += 8;
+ }
+}
+
+static inline void update_ip_va(struct parser_exec_state *s)
+{
+ unsigned long len = 0;
+
+ if (WARN_ON(s->ring_head == s->ring_tail))
+ return;
+
+ if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ unsigned long ring_top = s->ring_start + s->ring_size;
+
+ if (s->ring_head > s->ring_tail) {
+ if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top)
+ len = (s->ip_gma - s->ring_head);
+ else if (s->ip_gma >= s->ring_start &&
+ s->ip_gma <= s->ring_tail)
+ len = (ring_top - s->ring_head) +
+ (s->ip_gma - s->ring_start);
+ } else
+ len = (s->ip_gma - s->ring_head);
+
+ s->ip_va = s->rb_va + len;
+ } else {/* shadow batch buffer */
+ s->ip_va = s->ret_bb_va;
+ }
+}
+
+static inline int ip_gma_set(struct parser_exec_state *s,
+ unsigned long ip_gma)
+{
+ WARN_ON(!IS_ALIGNED(ip_gma, 4));
+
+ s->ip_gma = ip_gma;
+ update_ip_va(s);
+ return 0;
+}
+
+static inline int ip_gma_advance(struct parser_exec_state *s,
+ unsigned int dw_len)
+{
+ s->ip_gma += (dw_len << 2);
+
+ if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ if (s->ip_gma >= s->ring_start + s->ring_size)
+ s->ip_gma -= s->ring_size;
+ update_ip_va(s);
+ } else {
+ s->ip_va += (dw_len << 2);
+ }
+
+ return 0;
+}
+
+static inline int get_cmd_length(struct cmd_info *info, u32 cmd)
+{
+ if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
+ return info->len;
+ else
+ return (cmd & ((1U << info->len) - 1)) + 2;
+ return 0;
+}
+
+static inline int cmd_length(struct parser_exec_state *s)
+{
+ return get_cmd_length(s->info, cmd_val(s, 0));
+}
+
+/* do not remove this, some platform may need clflush here */
+#define patch_value(s, addr, val) do { \
+ *addr = val; \
+} while (0)
+
+static bool is_shadowed_mmio(unsigned int offset)
+{
+ bool ret = false;
+
+ if ((offset == 0x2168) || /*BB current head register UDW */
+ (offset == 0x2140) || /*BB current header register */
+ (offset == 0x211c) || /*second BB header register UDW */
+ (offset == 0x2114)) { /*second BB header register UDW */
+ ret = true;
+ }
+ return ret;
+}
+
+static int cmd_reg_handler(struct parser_exec_state *s,
+ unsigned int offset, unsigned int index, char *cmd)
+{
+ struct intel_vgpu *vgpu = s->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+
+ if (offset + 4 > gvt->device_info.mmio_size) {
+ gvt_err("%s access to (%x) outside of MMIO range\n",
+ cmd, offset);
+ return -EINVAL;
+ }
+
+ if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
+ gvt_err("vgpu%d: %s access to non-render register (%x)\n",
+ s->vgpu->id, cmd, offset);
+ return 0;
+ }
+
+ if (is_shadowed_mmio(offset)) {
+ gvt_err("vgpu%d: found access of shadowed MMIO %x\n",
+ s->vgpu->id, offset);
+ return 0;
+ }
+
+ if (offset == i915_mmio_reg_offset(DERRMR) ||
+ offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
+ /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
+ patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
+ }
+
+ /* TODO: Update the global mask if this MMIO is a masked-MMIO */
+ intel_gvt_mmio_set_cmd_accessed(gvt, offset);
+ return 0;
+}
+
+#define cmd_reg(s, i) \
+ (cmd_val(s, i) & GENMASK(22, 2))
+
+#define cmd_reg_inhibit(s, i) \
+ (cmd_val(s, i) & GENMASK(22, 18))
+
+#define cmd_gma(s, i) \
+ (cmd_val(s, i) & GENMASK(31, 2))
+
+#define cmd_gma_hi(s, i) \
+ (cmd_val(s, i) & GENMASK(15, 0))
+
+static int cmd_handler_lri(struct parser_exec_state *s)
+{
+ int i, ret = 0;
+ int cmd_len = cmd_length(s);
+ struct intel_gvt *gvt = s->vgpu->gvt;
+
+ for (i = 1; i < cmd_len; i += 2) {
+ if (IS_BROADWELL(gvt->dev_priv) &&
+ (s->ring_id != RCS)) {
+ if (s->ring_id == BCS &&
+ cmd_reg(s, i) ==
+ i915_mmio_reg_offset(DERRMR))
+ ret |= 0;
+ else
+ ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
+ }
+ if (ret)
+ break;
+ ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
+ }
+ return ret;
+}
+
+static int cmd_handler_lrr(struct parser_exec_state *s)
+{
+ int i, ret = 0;
+ int cmd_len = cmd_length(s);
+
+ for (i = 1; i < cmd_len; i += 2) {
+ if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
+ ret |= ((cmd_reg_inhibit(s, i) ||
+ (cmd_reg_inhibit(s, i + 1)))) ?
+ -EINVAL : 0;
+ if (ret)
+ break;
+ ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
+ ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
+ }
+ return ret;
+}
+
+static inline int cmd_address_audit(struct parser_exec_state *s,
+ unsigned long guest_gma, int op_size, bool index_mode);
+
+static int cmd_handler_lrm(struct parser_exec_state *s)
+{
+ struct intel_gvt *gvt = s->vgpu->gvt;
+ int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
+ unsigned long gma;
+ int i, ret = 0;
+ int cmd_len = cmd_length(s);
+
+ for (i = 1; i < cmd_len;) {
+ if (IS_BROADWELL(gvt->dev_priv))
+ ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
+ if (ret)
+ break;
+ ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
+ if (cmd_val(s, 0) & (1 << 22)) {
+ gma = cmd_gma(s, i + 1);
+ if (gmadr_bytes == 8)
+ gma |= (cmd_gma_hi(s, i + 2)) << 32;
+ ret |= cmd_address_audit(s, gma, sizeof(u32), false);
+ }
+ i += gmadr_dw_number(s) + 1;
+ }
+ return ret;
+}
+
+static int cmd_handler_srm(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ unsigned long gma;
+ int i, ret = 0;
+ int cmd_len = cmd_length(s);
+
+ for (i = 1; i < cmd_len;) {
+ ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
+ if (cmd_val(s, 0) & (1 << 22)) {
+ gma = cmd_gma(s, i + 1);
+ if (gmadr_bytes == 8)
+ gma |= (cmd_gma_hi(s, i + 2)) << 32;
+ ret |= cmd_address_audit(s, gma, sizeof(u32), false);
+ }
+ i += gmadr_dw_number(s) + 1;
+ }
+ return ret;
+}
+
+struct cmd_interrupt_event {
+ int pipe_control_notify;
+ int mi_flush_dw;
+ int mi_user_interrupt;
+};
+
+static struct cmd_interrupt_event cmd_interrupt_events[] = {
+ [RCS] = {
+ .pipe_control_notify = RCS_PIPE_CONTROL,
+ .mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
+ .mi_user_interrupt = RCS_MI_USER_INTERRUPT,
+ },
+ [BCS] = {
+ .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+ .mi_flush_dw = BCS_MI_FLUSH_DW,
+ .mi_user_interrupt = BCS_MI_USER_INTERRUPT,
+ },
+ [VCS] = {
+ .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+ .mi_flush_dw = VCS_MI_FLUSH_DW,
+ .mi_user_interrupt = VCS_MI_USER_INTERRUPT,
+ },
+ [VCS2] = {
+ .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+ .mi_flush_dw = VCS2_MI_FLUSH_DW,
+ .mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
+ },
+ [VECS] = {
+ .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
+ .mi_flush_dw = VECS_MI_FLUSH_DW,
+ .mi_user_interrupt = VECS_MI_USER_INTERRUPT,
+ },
+};
+
+static int cmd_handler_pipe_control(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ unsigned long gma;
+ bool index_mode = false;
+ unsigned int post_sync;
+ int ret = 0;
+
+ post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
+
+ /* LRI post sync */
+ if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE)
+ ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl");
+ /* post sync */
+ else if (post_sync) {
+ if (post_sync == 2)
+ ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl");
+ else if (post_sync == 3)
+ ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
+ else if (post_sync == 1) {
+ /* check ggtt*/
+ if ((cmd_val(s, 2) & (1 << 2))) {
+ gma = cmd_val(s, 2) & GENMASK(31, 3);
+ if (gmadr_bytes == 8)
+ gma |= (cmd_gma_hi(s, 3)) << 32;
+ /* Store Data Index */
+ if (cmd_val(s, 1) & (1 << 21))
+ index_mode = true;
+ ret |= cmd_address_audit(s, gma, sizeof(u64),
+ index_mode);
+ }
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
+ set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
+ s->workload->pending_events);
+ return 0;
+}
+
+static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
+{
+ set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
+ s->workload->pending_events);
+ return 0;
+}
+
+static int cmd_advance_default(struct parser_exec_state *s)
+{
+ return ip_gma_advance(s, cmd_length(s));
+}
+
+static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
+{
+ int ret;
+
+ if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
+ s->buf_type = BATCH_BUFFER_INSTRUCTION;
+ ret = ip_gma_set(s, s->ret_ip_gma_bb);
+ s->buf_addr_type = s->saved_buf_addr_type;
+ } else {
+ s->buf_type = RING_BUFFER_INSTRUCTION;
+ s->buf_addr_type = GTT_BUFFER;
+ if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size)
+ s->ret_ip_gma_ring -= s->ring_size;
+ ret = ip_gma_set(s, s->ret_ip_gma_ring);
+ }
+ return ret;
+}
+
+struct mi_display_flip_command_info {
+ int pipe;
+ int plane;
+ int event;
+ i915_reg_t stride_reg;
+ i915_reg_t ctrl_reg;
+ i915_reg_t surf_reg;
+ u64 stride_val;
+ u64 tile_val;
+ u64 surf_val;
+ bool async_flip;
+};
+
+struct plane_code_mapping {
+ int pipe;
+ int plane;
+ int event;
+};
+
+static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct plane_code_mapping gen8_plane_code[] = {
+ [0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
+ [1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
+ [2] = {PIPE_A, PLANE_B, SPRITE_A_FLIP_DONE},
+ [3] = {PIPE_B, PLANE_B, SPRITE_B_FLIP_DONE},
+ [4] = {PIPE_C, PLANE_A, PRIMARY_C_FLIP_DONE},
+ [5] = {PIPE_C, PLANE_B, SPRITE_C_FLIP_DONE},
+ };
+ u32 dword0, dword1, dword2;
+ u32 v;
+
+ dword0 = cmd_val(s, 0);
+ dword1 = cmd_val(s, 1);
+ dword2 = cmd_val(s, 2);
+
+ v = (dword0 & GENMASK(21, 19)) >> 19;
+ if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
+ return -EINVAL;
+
+ info->pipe = gen8_plane_code[v].pipe;
+ info->plane = gen8_plane_code[v].plane;
+ info->event = gen8_plane_code[v].event;
+ info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
+ info->tile_val = (dword1 & 0x1);
+ info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
+ info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
+
+ if (info->plane == PLANE_A) {
+ info->ctrl_reg = DSPCNTR(info->pipe);
+ info->stride_reg = DSPSTRIDE(info->pipe);
+ info->surf_reg = DSPSURF(info->pipe);
+ } else if (info->plane == PLANE_B) {
+ info->ctrl_reg = SPRCTL(info->pipe);
+ info->stride_reg = SPRSTRIDE(info->pipe);
+ info->surf_reg = SPRSURF(info->pipe);
+ } else {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int skl_decode_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ u32 dword0 = cmd_val(s, 0);
+ u32 dword1 = cmd_val(s, 1);
+ u32 dword2 = cmd_val(s, 2);
+ u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
+
+ switch (plane) {
+ case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
+ info->pipe = PIPE_A;
+ info->event = PRIMARY_A_FLIP_DONE;
+ break;
+ case MI_DISPLAY_FLIP_SKL_PLANE_1_B:
+ info->pipe = PIPE_B;
+ info->event = PRIMARY_B_FLIP_DONE;
+ break;
+ case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
+ info->pipe = PIPE_B;
+ info->event = PRIMARY_C_FLIP_DONE;
+ break;
+ default:
+ gvt_err("unknown plane code %d\n", plane);
+ return -EINVAL;
+ }
+
+ info->pipe = PRIMARY_PLANE;
+ info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
+ info->tile_val = (dword1 & GENMASK(2, 0));
+ info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
+ info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
+
+ info->ctrl_reg = DSPCNTR(info->pipe);
+ info->stride_reg = DSPSTRIDE(info->pipe);
+ info->surf_reg = DSPSURF(info->pipe);
+
+ return 0;
+}
+
+static int gen8_check_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ u32 stride, tile;
+
+ if (!info->async_flip)
+ return 0;
+
+ if (IS_SKYLAKE(dev_priv)) {
+ stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
+ tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
+ GENMASK(12, 10)) >> 10;
+ } else {
+ stride = (vgpu_vreg(s->vgpu, info->stride_reg) &
+ GENMASK(15, 6)) >> 6;
+ tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
+ }
+
+ if (stride != info->stride_val)
+ gvt_dbg_cmd("cannot change stride during async flip\n");
+
+ if (tile != info->tile_val)
+ gvt_dbg_cmd("cannot change tile during async flip\n");
+
+ return 0;
+}
+
+static int gen8_update_plane_mmio_from_mi_display_flip(
+ struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct intel_vgpu *vgpu = s->vgpu;
+
+#define write_bits(reg, e, s, v) do { \
+ vgpu_vreg(vgpu, reg) &= ~GENMASK(e, s); \
+ vgpu_vreg(vgpu, reg) |= (v << s); \
+} while (0)
+
+ write_bits(info->surf_reg, 31, 12, info->surf_val);
+ if (IS_SKYLAKE(dev_priv))
+ write_bits(info->stride_reg, 9, 0, info->stride_val);
+ else
+ write_bits(info->stride_reg, 15, 6, info->stride_val);
+ write_bits(info->ctrl_reg, IS_SKYLAKE(dev_priv) ? 12 : 10,
+ 10, info->tile_val);
+
+#undef write_bits
+
+ vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
+ intel_vgpu_trigger_virtual_event(vgpu, info->event);
+ return 0;
+}
+
+static int decode_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+
+ if (IS_BROADWELL(dev_priv))
+ return gen8_decode_mi_display_flip(s, info);
+ if (IS_SKYLAKE(dev_priv))
+ return skl_decode_mi_display_flip(s, info);
+
+ return -ENODEV;
+}
+
+static int check_mi_display_flip(struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+
+ if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ return gen8_check_mi_display_flip(s, info);
+ return -ENODEV;
+}
+
+static int update_plane_mmio_from_mi_display_flip(
+ struct parser_exec_state *s,
+ struct mi_display_flip_command_info *info)
+{
+ struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+
+ if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ return gen8_update_plane_mmio_from_mi_display_flip(s, info);
+ return -ENODEV;
+}
+
+static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
+{
+ struct mi_display_flip_command_info info;
+ int ret;
+ int i;
+ int len = cmd_length(s);
+
+ ret = decode_mi_display_flip(s, &info);
+ if (ret) {
+ gvt_err("fail to decode MI display flip command\n");
+ return ret;
+ }
+
+ ret = check_mi_display_flip(s, &info);
+ if (ret) {
+ gvt_err("invalid MI display flip command\n");
+ return ret;
+ }
+
+ ret = update_plane_mmio_from_mi_display_flip(s, &info);
+ if (ret) {
+ gvt_err("fail to update plane mmio\n");
+ return ret;
+ }
+
+ for (i = 0; i < len; i++)
+ patch_value(s, cmd_ptr(s, i), MI_NOOP);
+ return 0;
+}
+
+static bool is_wait_for_flip_pending(u32 cmd)
+{
+ return cmd & (MI_WAIT_FOR_PLANE_A_FLIP_PENDING |
+ MI_WAIT_FOR_PLANE_B_FLIP_PENDING |
+ MI_WAIT_FOR_PLANE_C_FLIP_PENDING |
+ MI_WAIT_FOR_SPRITE_A_FLIP_PENDING |
+ MI_WAIT_FOR_SPRITE_B_FLIP_PENDING |
+ MI_WAIT_FOR_SPRITE_C_FLIP_PENDING);
+}
+
+static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s)
+{
+ u32 cmd = cmd_val(s, 0);
+
+ if (!is_wait_for_flip_pending(cmd))
+ return 0;
+
+ patch_value(s, cmd_ptr(s, 0), MI_NOOP);
+ return 0;
+}
+
+static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
+{
+ unsigned long addr;
+ unsigned long gma_high, gma_low;
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+
+ if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
+ return INTEL_GVT_INVALID_ADDR;
+
+ gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
+ if (gmadr_bytes == 4) {
+ addr = gma_low;
+ } else {
+ gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK;
+ addr = (((unsigned long)gma_high) << 32) | gma_low;
+ }
+ return addr;
+}
+
+static inline int cmd_address_audit(struct parser_exec_state *s,
+ unsigned long guest_gma, int op_size, bool index_mode)
+{
+ struct intel_vgpu *vgpu = s->vgpu;
+ u32 max_surface_size = vgpu->gvt->device_info.max_surface_size;
+ int i;
+ int ret;
+
+ if (op_size > max_surface_size) {
+ gvt_err("command address audit fail name %s\n", s->info->name);
+ return -EINVAL;
+ }
+
+ if (index_mode) {
+ if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ } else if ((!vgpu_gmadr_is_valid(s->vgpu, guest_gma)) ||
+ (!vgpu_gmadr_is_valid(s->vgpu,
+ guest_gma + op_size - 1))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ return 0;
+err:
+ gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
+ s->info->name, guest_gma, op_size);
+
+ pr_err("cmd dump: ");
+ for (i = 0; i < cmd_length(s); i++) {
+ if (!(i % 4))
+ pr_err("\n%08x ", cmd_val(s, i));
+ else
+ pr_err("%08x ", cmd_val(s, i));
+ }
+ pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
+ vgpu->id,
+ vgpu_aperture_gmadr_base(vgpu),
+ vgpu_aperture_gmadr_end(vgpu),
+ vgpu_hidden_gmadr_base(vgpu),
+ vgpu_hidden_gmadr_end(vgpu));
+ return ret;
+}
+
+static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ int op_size = (cmd_length(s) - 3) * sizeof(u32);
+ int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
+ unsigned long gma, gma_low, gma_high;
+ int ret = 0;
+
+ /* check ppggt */
+ if (!(cmd_val(s, 0) & (1 << 22)))
+ return 0;
+
+ gma = cmd_val(s, 2) & GENMASK(31, 2);
+
+ if (gmadr_bytes == 8) {
+ gma_low = cmd_val(s, 1) & GENMASK(31, 2);
+ gma_high = cmd_val(s, 2) & GENMASK(15, 0);
+ gma = (gma_high << 32) | gma_low;
+ core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0;
+ }
+ ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false);
+ return ret;
+}
+
+static inline int unexpected_cmd(struct parser_exec_state *s)
+{
+ gvt_err("vgpu%d: Unexpected %s in command buffer!\n",
+ s->vgpu->id, s->info->name);
+ return -EINVAL;
+}
+
+static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ int op_size = ((1 << (cmd_val(s, 0) & GENMASK(20, 19) >> 19)) *
+ sizeof(u32));
+ unsigned long gma, gma_high;
+ int ret = 0;
+
+ if (!(cmd_val(s, 0) & (1 << 22)))
+ return ret;
+
+ gma = cmd_val(s, 1) & GENMASK(31, 2);
+ if (gmadr_bytes == 8) {
+ gma_high = cmd_val(s, 2) & GENMASK(15, 0);
+ gma = (gma_high << 32) | gma;
+ }
+ ret = cmd_address_audit(s, gma, op_size, false);
+ return ret;
+}
+
+static int cmd_handler_mi_store_data_index(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_clflush(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_conditional_batch_buffer_end(
+ struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_update_gtt(struct parser_exec_state *s)
+{
+ return unexpected_cmd(s);
+}
+
+static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
+{
+ int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ unsigned long gma;
+ bool index_mode = false;
+ int ret = 0;
+
+ /* Check post-sync and ppgtt bit */
+ if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
+ gma = cmd_val(s, 1) & GENMASK(31, 3);
+ if (gmadr_bytes == 8)
+ gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32;
+ /* Store Data Index */
+ if (cmd_val(s, 0) & (1 << 21))
+ index_mode = true;
+ ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
+ }
+ /* Check notify bit */
+ if ((cmd_val(s, 0) & (1 << 8)))
+ set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
+ s->workload->pending_events);
+ return ret;
+}
+
+static void addr_type_update_snb(struct parser_exec_state *s)
+{
+ if ((s->buf_type == RING_BUFFER_INSTRUCTION) &&
+ (BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) {
+ s->buf_addr_type = PPGTT_BUFFER;
+ }
+}
+
+
+static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
+ unsigned long gma, unsigned long end_gma, void *va)
+{
+ unsigned long copy_len, offset;
+ unsigned long len = 0;
+ unsigned long gpa;
+
+ while (gma != end_gma) {
+ gpa = intel_vgpu_gma_to_gpa(mm, gma);
+ if (gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("invalid gma address: %lx\n", gma);
+ return -EFAULT;
+ }
+
+ offset = gma & (GTT_PAGE_SIZE - 1);
+
+ copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ?
+ GTT_PAGE_SIZE - offset : end_gma - gma;
+
+ intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
+
+ len += copy_len;
+ gma += copy_len;
+ }
+ return 0;
+}
+
+
+/*
+ * Check whether a batch buffer needs to be scanned. Currently
+ * the only criteria is based on privilege.
+ */
+static int batch_buffer_needs_scan(struct parser_exec_state *s)
+{
+ struct intel_gvt *gvt = s->vgpu->gvt;
+
+ if (bypass_batch_buffer_scan)
+ return 0;
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ /* BDW decides privilege based on address space */
+ if (cmd_val(s, 0) & (1 << 8))
+ return 0;
+ }
+ return 1;
+}
+
+static uint32_t find_bb_size(struct parser_exec_state *s)
+{
+ unsigned long gma = 0;
+ struct cmd_info *info;
+ uint32_t bb_size = 0;
+ uint32_t cmd_len = 0;
+ bool met_bb_end = false;
+ u32 cmd;
+
+ /* get the start gm address of the batch buffer */
+ gma = get_gma_bb_from_cmd(s, 1);
+ cmd = cmd_val(s, 0);
+
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ if (info == NULL) {
+ gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+ cmd, get_opcode(cmd, s->ring_id));
+ return -EINVAL;
+ }
+ do {
+ copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
+ gma, gma + 4, &cmd);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ if (info == NULL) {
+ gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+ cmd, get_opcode(cmd, s->ring_id));
+ return -EINVAL;
+ }
+
+ if (info->opcode == OP_MI_BATCH_BUFFER_END) {
+ met_bb_end = true;
+ } else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
+ if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) {
+ /* chained batch buffer */
+ met_bb_end = true;
+ }
+ }
+ cmd_len = get_cmd_length(info, cmd) << 2;
+ bb_size += cmd_len;
+ gma += cmd_len;
+
+ } while (!met_bb_end);
+
+ return bb_size;
+}
+
+static int perform_bb_shadow(struct parser_exec_state *s)
+{
+ struct intel_shadow_bb_entry *entry_obj;
+ unsigned long gma = 0;
+ uint32_t bb_size;
+ void *dst = NULL;
+ int ret = 0;
+
+ /* get the start gm address of the batch buffer */
+ gma = get_gma_bb_from_cmd(s, 1);
+
+ /* get the size of the batch buffer */
+ bb_size = find_bb_size(s);
+
+ /* allocate shadow batch buffer */
+ entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
+ if (entry_obj == NULL)
+ return -ENOMEM;
+
+ entry_obj->obj =
+ i915_gem_object_create(&(s->vgpu->gvt->dev_priv->drm),
+ roundup(bb_size, PAGE_SIZE));
+ if (IS_ERR(entry_obj->obj)) {
+ ret = PTR_ERR(entry_obj->obj);
+ goto free_entry;
+ }
+ entry_obj->len = bb_size;
+ INIT_LIST_HEAD(&entry_obj->list);
+
+ dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ goto put_obj;
+ }
+
+ ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
+ if (ret) {
+ gvt_err("failed to set shadow batch to CPU\n");
+ goto unmap_src;
+ }
+
+ entry_obj->va = dst;
+ entry_obj->bb_start_cmd_va = s->ip_va;
+
+ /* copy batch buffer to shadow batch buffer*/
+ ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
+ gma, gma + bb_size,
+ dst);
+ if (ret) {
+ gvt_err("fail to copy guest ring buffer\n");
+ goto unmap_src;
+ }
+
+ list_add(&entry_obj->list, &s->workload->shadow_bb);
+ /*
+ * ip_va saves the virtual address of the shadow batch buffer, while
+ * ip_gma saves the graphics address of the original batch buffer.
+ * As the shadow batch buffer is just a copy from the originial one,
+ * it should be right to use shadow batch buffer'va and original batch
+ * buffer's gma in pair. After all, we don't want to pin the shadow
+ * buffer here (too early).
+ */
+ s->ip_va = dst;
+ s->ip_gma = gma;
+
+ return 0;
+
+unmap_src:
+ i915_gem_object_unpin_map(entry_obj->obj);
+put_obj:
+ i915_gem_object_put(entry_obj->obj);
+free_entry:
+ kfree(entry_obj);
+ return ret;
+}
+
+static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
+{
+ bool second_level;
+ int ret = 0;
+
+ if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
+ gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
+ return -EINVAL;
+ }
+
+ second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
+ if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
+ gvt_err("Jumping to 2nd level BB from RB is not allowed\n");
+ return -EINVAL;
+ }
+
+ s->saved_buf_addr_type = s->buf_addr_type;
+ addr_type_update_snb(s);
+ if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32);
+ s->buf_type = BATCH_BUFFER_INSTRUCTION;
+ } else if (second_level) {
+ s->buf_type = BATCH_BUFFER_2ND_LEVEL;
+ s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32);
+ s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32);
+ }
+
+ if (batch_buffer_needs_scan(s)) {
+ ret = perform_bb_shadow(s);
+ if (ret < 0)
+ gvt_err("invalid shadow batch buffer\n");
+ } else {
+ /* emulate a batch buffer end to do return right */
+ ret = cmd_handler_mi_batch_buffer_end(s);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct cmd_info cmd_info[] = {
+ {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
+
+ {"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
+ 0, 1, NULL},
+
+ {"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT, F_LEN_CONST, R_ALL, D_ALL,
+ 0, 1, cmd_handler_mi_user_interrupt},
+
+ {"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT, F_LEN_CONST, R_RCS | R_BCS,
+ D_ALL, 0, 1, cmd_handler_mi_wait_for_event},
+
+ {"MI_FLUSH", OP_MI_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
+
+ {"MI_ARB_CHECK", OP_MI_ARB_CHECK, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_RS_CONTROL", OP_MI_RS_CONTROL, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_REPORT_HEAD", OP_MI_REPORT_HEAD, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC, F_LEN_CONST, R_RCS,
+ D_ALL, 0, 1, NULL},
+
+ {"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END,
+ F_IP_ADVANCE_CUSTOM | F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ cmd_handler_mi_batch_buffer_end},
+
+ {"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH, F_LEN_CONST, R_ALL, D_ALL,
+ 0, 1, NULL},
+
+ {"MI_PREDICATE", OP_MI_PREDICATE, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER, F_LEN_CONST, R_ALL,
+ D_ALL, 0, 1, NULL},
+
+ {"MI_SET_APPID", OP_MI_SET_APPID, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
+ NULL},
+
+ {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE,
+ R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
+
+ {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL,
+ 0, 8, NULL},
+
+ {"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
+
+ {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS,
+ ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
+
+ {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
+ ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
+
+ {"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX, F_LEN_VAR, R_ALL, D_ALL,
+ 0, 8, cmd_handler_mi_store_data_index},
+
+ {"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM, F_LEN_VAR, R_ALL,
+ D_ALL, 0, 8, cmd_handler_lri},
+
+ {"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
+ cmd_handler_mi_update_gtt},
+
+ {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL,
+ D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm},
+
+ {"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
+ cmd_handler_mi_flush_dw},
+
+ {"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
+ 10, cmd_handler_mi_clflush},
+
+ {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL,
+ D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count},
+
+ {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL,
+ D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm},
+
+ {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL,
+ D_ALL, 0, 8, cmd_handler_lrr},
+
+ {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(2), 8, NULL},
+
+ {"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(2), 8, NULL},
+
+ {"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2),
+ 8, cmd_handler_mi_op_2e},
+
+ {"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
+ 8, cmd_handler_mi_op_2f},
+
+ {"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START,
+ F_IP_ADVANCE_CUSTOM, R_ALL, D_ALL, 0, 8,
+ cmd_handler_mi_batch_buffer_start},
+
+ {"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
+ F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
+ cmd_handler_mi_conditional_batch_buffer_end},
+
+ {"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
+ R_RCS | R_BCS, D_ALL, 0, 2, NULL},
+
+ {"XY_SETUP_BLT", OP_XY_SETUP_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ 0, 8, NULL},
+
+ {"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_PIXEL_BLT", OP_XY_PIXEL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
+
+ {"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ 0, 8, NULL},
+
+ {"XY_TEXT_BLT", OP_XY_TEXT_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_1(3), 8, NULL},
+
+ {"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, 0, 8, NULL},
+
+ {"XY_COLOR_BLT", OP_XY_COLOR_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_PAT_BLT", OP_XY_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_FULL_BLT", OP_XY_FULL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
+
+ {"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_3(4, 5, 8), 8, NULL},
+
+ {"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT, F_LEN_VAR,
+ R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
+ OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT, F_LEN_VAR, R_BCS,
+ D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
+
+ {"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
+ OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL,
+ ADDR_FIX_2(4, 5), 8, NULL},
+
+ {"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE,
+ F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
+
+ {"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
+ OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
+ OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BLEND_STATE_POINTERS",
+ OP_3DSTATE_BLEND_STATE_POINTERS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
+ OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_VS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_HS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_DS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_GS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POINTERS_PS",
+ OP_3DSTATE_BINDING_TABLE_POINTERS_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_VS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_HS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_DS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_GS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_STATE_POINTERS_PS",
+ OP_3DSTATE_SAMPLER_STATE_POINTERS_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_URB_VS", OP_3DSTATE_URB_VS, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_URB_HS", OP_3DSTATE_URB_HS, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_URB_DS", OP_3DSTATE_URB_DS, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_URB_GS", OP_3DSTATE_URB_GS, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
+
+ {"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
+
+ {"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+ NULL},
+
+ {"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
+ 8, NULL},
+
+ {"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL, F_LEN_VAR,
+ R_RCS, D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
+ 8, NULL},
+
+ {"3DSTATE_RASTER", OP_3DSTATE_RASTER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+ NULL},
+
+ {"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+ NULL},
+
+ {"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
+ NULL},
+
+ {"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, ADDR_FIX_1(2), 8, NULL},
+
+ {"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS, F_LEN_CONST,
+ R_RCS, D_ALL, 0, 1, NULL},
+
+ {"3DSTATE_VF", OP_3DSTATE_VF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GS", OP_3DSTATE_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CLIP", OP_3DSTATE_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_WM", OP_3DSTATE_WM, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_HS", OP_3DSTATE_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_TE", OP_3DSTATE_TE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DS", OP_3DSTATE_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SBE", OP_3DSTATE_SBE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PS", OP_3DSTATE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER, F_LEN_VAR, R_RCS,
+ D_ALL, ADDR_FIX_1(2), 8, NULL},
+
+ {"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER, F_LEN_VAR, R_RCS,
+ D_ALL, ADDR_FIX_1(2), 8, NULL},
+
+ {"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER, F_LEN_VAR,
+ R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL},
+
+ {"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE, F_LEN_VAR,
+ R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST, F_LEN_VAR, R_RCS,
+ D_ALL, 0, 9, NULL},
+
+ {"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS,
+ ADDR_FIX_2(2, 4), 8, NULL},
+
+ {"3DSTATE_BINDING_TABLE_POOL_ALLOC",
+ OP_3DSTATE_BINDING_TABLE_POOL_ALLOC,
+ F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
+
+ {"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC,
+ F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
+
+ {"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
+ OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC,
+ F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
+
+ {"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN, F_LEN_VAR, R_RCS,
+ D_BDW_PLUS, 0, 8, NULL},
+
+ {"PIPE_CONTROL", OP_PIPE_CONTROL, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(2), 8, cmd_handler_pipe_control},
+
+ {"3DPRIMITIVE", OP_3DPRIMITIVE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"PIPELINE_SELECT", OP_PIPELINE_SELECT, F_LEN_CONST, R_RCS, D_ALL, 0,
+ 1, NULL},
+
+ {"STATE_PREFETCH", OP_STATE_PREFETCH, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(1), 8, NULL},
+
+ {"STATE_SIP", OP_STATE_SIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
+ ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL},
+
+ {"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
+ ADDR_FIX_1(1), 8, NULL},
+
+ {"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+
+ {"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
+ 0, 8, NULL},
+
+ {"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING, F_LEN_VAR, R_RCS,
+ D_SKL_PLUS, 0, 8, NULL},
+
+ {"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD,
+ F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
+
+ {"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
+
+ {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 16, NULL},
+
+ {"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL,
+ 0, 8, NULL},
+
+ {"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16,
+ NULL},
+
+ {"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45,
+ F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
+
+ {"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE, F_LEN_VAR,
+ R_VCS, D_BDW_PLUS, 0, 12, NULL},
+
+ {"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE,
+ F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
+
+ {"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE,
+ F_LEN_VAR, R_VCS, D_BDW_PLUS, ADDR_FIX_3(1, 3, 5), 12, NULL},
+
+ {"OP_2_0_0_5", OP_2_0_0_5, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
+
+ {"MFX_STATE_POINTER", OP_MFX_STATE_POINTER, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_QM_STATE", OP_MFX_QM_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_FQM_STATE", OP_MFX_FQM_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_IT_OBJECT", OP_MFD_IT_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_WAIT", OP_MFX_WAIT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 6, NULL},
+
+ {"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+ {"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR, F_LEN_VAR,
+ R_VCS, D_ALL, ADDR_FIX_1(2), 12, NULL},
+
+ {"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_2_6_0_0", OP_MFX_2_6_0_0, F_LEN_VAR, R_VCS, D_ALL,
+ 0, 16, NULL},
+
+ {"MFX_2_6_0_9", OP_MFX_2_6_0_9, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
+
+ {"MFX_2_6_0_8", OP_MFX_2_6_0_8, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
+
+ {"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT, F_LEN_VAR,
+ R_VCS, D_ALL, 0, 12, NULL},
+
+ {"VEBOX_STATE", OP_VEB_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL},
+
+ {"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE, F_LEN_VAR, R_VECS, D_ALL,
+ 0, 12, NULL},
+
+ {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
+ 0, 20, NULL},
+};
+
+static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
+{
+ hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
+}
+
+#define GVT_MAX_CMD_LENGTH 20 /* In Dword */
+
+static void trace_cs_command(struct parser_exec_state *s,
+ cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler)
+{
+ /* This buffer is used by ftrace to store all commands copied from
+ * guest gma space. Sometimes commands can cross pages, this should
+ * not be handled in ftrace logic. So this is just used as a
+ * 'bounce buffer'
+ */
+ u32 cmd_trace_buf[GVT_MAX_CMD_LENGTH];
+ int i;
+ u32 cmd_len = cmd_length(s);
+ /* The chosen value of GVT_MAX_CMD_LENGTH are just based on
+ * following two considerations:
+ * 1) From observation, most common ring commands is not that long.
+ * But there are execeptions. So it indeed makes sence to observe
+ * longer commands.
+ * 2) From the performance and debugging point of view, dumping all
+ * contents of very commands is not necessary.
+ * We mgith shrink GVT_MAX_CMD_LENGTH or remove this trace event in
+ * future for performance considerations.
+ */
+ if (unlikely(cmd_len > GVT_MAX_CMD_LENGTH)) {
+ gvt_dbg_cmd("cmd length exceed tracing limitation!\n");
+ cmd_len = GVT_MAX_CMD_LENGTH;
+ }
+
+ for (i = 0; i < cmd_len; i++)
+ cmd_trace_buf[i] = cmd_val(s, i);
+
+ trace_gvt_command(s->vgpu->id, s->ring_id, s->ip_gma, cmd_trace_buf,
+ cmd_len, s->buf_type == RING_BUFFER_INSTRUCTION,
+ cost_pre_cmd_handler, cost_cmd_handler);
+}
+
+/* call the cmd handler, and advance ip */
+static int cmd_parser_exec(struct parser_exec_state *s)
+{
+ struct cmd_info *info;
+ u32 cmd;
+ int ret = 0;
+ cycles_t t0, t1, t2;
+ struct parser_exec_state s_before_advance_custom;
+
+ t0 = get_cycles();
+
+ cmd = cmd_val(s, 0);
+
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ if (info == NULL) {
+ gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+ cmd, get_opcode(cmd, s->ring_id));
+ return -EINVAL;
+ }
+
+ gvt_dbg_cmd("%s\n", info->name);
+
+ s->info = info;
+
+ t1 = get_cycles();
+
+ memcpy(&s_before_advance_custom, s, sizeof(struct parser_exec_state));
+
+ if (info->handler) {
+ ret = info->handler(s);
+ if (ret < 0) {
+ gvt_err("%s handler error\n", info->name);
+ return ret;
+ }
+ }
+ t2 = get_cycles();
+
+ trace_cs_command(&s_before_advance_custom, t1 - t0, t2 - t1);
+
+ if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
+ ret = cmd_advance_default(s);
+ if (ret) {
+ gvt_err("%s IP advance error\n", info->name);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static inline bool gma_out_of_range(unsigned long gma,
+ unsigned long gma_head, unsigned int gma_tail)
+{
+ if (gma_tail >= gma_head)
+ return (gma < gma_head) || (gma > gma_tail);
+ else
+ return (gma > gma_tail) && (gma < gma_head);
+}
+
+static int command_scan(struct parser_exec_state *s,
+ unsigned long rb_head, unsigned long rb_tail,
+ unsigned long rb_start, unsigned long rb_len)
+{
+
+ unsigned long gma_head, gma_tail, gma_bottom;
+ int ret = 0;
+
+ gma_head = rb_start + rb_head;
+ gma_tail = rb_start + rb_tail;
+ gma_bottom = rb_start + rb_len;
+
+ gvt_dbg_cmd("scan_start: start=%lx end=%lx\n", gma_head, gma_tail);
+
+ while (s->ip_gma != gma_tail) {
+ if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ if (!(s->ip_gma >= rb_start) ||
+ !(s->ip_gma < gma_bottom)) {
+ gvt_err("ip_gma %lx out of ring scope."
+ "(base:0x%lx, bottom: 0x%lx)\n",
+ s->ip_gma, rb_start,
+ gma_bottom);
+ parser_exec_state_dump(s);
+ return -EINVAL;
+ }
+ if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
+ gvt_err("ip_gma %lx out of range."
+ "base 0x%lx head 0x%lx tail 0x%lx\n",
+ s->ip_gma, rb_start,
+ rb_head, rb_tail);
+ parser_exec_state_dump(s);
+ break;
+ }
+ }
+ ret = cmd_parser_exec(s);
+ if (ret) {
+ gvt_err("cmd parser error\n");
+ parser_exec_state_dump(s);
+ break;
+ }
+ }
+
+ gvt_dbg_cmd("scan_end\n");
+
+ return ret;
+}
+
+static int scan_workload(struct intel_vgpu_workload *workload)
+{
+ unsigned long gma_head, gma_tail, gma_bottom;
+ struct parser_exec_state s;
+ int ret = 0;
+
+ /* ring base is page aligned */
+ if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE)))
+ return -EINVAL;
+
+ gma_head = workload->rb_start + workload->rb_head;
+ gma_tail = workload->rb_start + workload->rb_tail;
+ gma_bottom = workload->rb_start + _RING_CTL_BUF_SIZE(workload->rb_ctl);
+
+ s.buf_type = RING_BUFFER_INSTRUCTION;
+ s.buf_addr_type = GTT_BUFFER;
+ s.vgpu = workload->vgpu;
+ s.ring_id = workload->ring_id;
+ s.ring_start = workload->rb_start;
+ s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
+ s.ring_head = gma_head;
+ s.ring_tail = gma_tail;
+ s.rb_va = workload->shadow_ring_buffer_va;
+ s.workload = workload;
+
+ if (bypass_scan_mask & (1 << workload->ring_id))
+ return 0;
+
+ ret = ip_gma_set(&s, gma_head);
+ if (ret)
+ goto out;
+
+ ret = command_scan(&s, workload->rb_head, workload->rb_tail,
+ workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl));
+
+out:
+ return ret;
+}
+
+static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+
+ unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
+ struct parser_exec_state s;
+ int ret = 0;
+
+ /* ring base is page aligned */
+ if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
+ return -EINVAL;
+
+ ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
+ ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
+ PAGE_SIZE);
+ gma_head = wa_ctx->indirect_ctx.guest_gma;
+ gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail;
+ gma_bottom = wa_ctx->indirect_ctx.guest_gma + ring_size;
+
+ s.buf_type = RING_BUFFER_INSTRUCTION;
+ s.buf_addr_type = GTT_BUFFER;
+ s.vgpu = wa_ctx->workload->vgpu;
+ s.ring_id = wa_ctx->workload->ring_id;
+ s.ring_start = wa_ctx->indirect_ctx.guest_gma;
+ s.ring_size = ring_size;
+ s.ring_head = gma_head;
+ s.ring_tail = gma_tail;
+ s.rb_va = wa_ctx->indirect_ctx.shadow_va;
+ s.workload = wa_ctx->workload;
+
+ ret = ip_gma_set(&s, gma_head);
+ if (ret)
+ goto out;
+
+ ret = command_scan(&s, 0, ring_tail,
+ wa_ctx->indirect_ctx.guest_gma, ring_size);
+out:
+ return ret;
+}
+
+static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+ struct intel_ring *ring = shadow_ctx->engine[ring_id].ring;
+ unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
+ unsigned int copy_len = 0;
+ int ret;
+
+ guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
+
+ /* calculate workload ring buffer size */
+ workload->rb_len = (workload->rb_tail + guest_rb_size -
+ workload->rb_head) % guest_rb_size;
+
+ gma_head = workload->rb_start + workload->rb_head;
+ gma_tail = workload->rb_start + workload->rb_tail;
+ gma_top = workload->rb_start + guest_rb_size;
+
+ /* allocate shadow ring buffer */
+ ret = intel_ring_begin(workload->req, workload->rb_len / 4);
+ if (ret)
+ return ret;
+
+ /* get shadow ring buffer va */
+ workload->shadow_ring_buffer_va = ring->vaddr + ring->tail;
+
+ /* head > tail --> copy head <-> top */
+ if (gma_head > gma_tail) {
+ ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
+ gma_head, gma_top,
+ workload->shadow_ring_buffer_va);
+ if (ret) {
+ gvt_err("fail to copy guest ring buffer\n");
+ return ret;
+ }
+ copy_len = gma_top - gma_head;
+ gma_head = workload->rb_start;
+ }
+
+ /* copy head or start <-> tail */
+ ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
+ gma_head, gma_tail,
+ workload->shadow_ring_buffer_va + copy_len);
+ if (ret) {
+ gvt_err("fail to copy guest ring buffer\n");
+ return ret;
+ }
+ ring->tail += workload->rb_len;
+ intel_ring_advance(ring);
+ return 0;
+}
+
+int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
+{
+ int ret;
+
+ ret = shadow_workload_ring_buffer(workload);
+ if (ret) {
+ gvt_err("fail to shadow workload ring_buffer\n");
+ return ret;
+ }
+
+ ret = scan_workload(workload);
+ if (ret) {
+ gvt_err("scan workload error\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ struct drm_device *dev = &wa_ctx->workload->vgpu->gvt->dev_priv->drm;
+ int ctx_size = wa_ctx->indirect_ctx.size;
+ unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
+ struct drm_i915_gem_object *obj;
+ int ret = 0;
+ void *map;
+
+ obj = i915_gem_object_create(dev,
+ roundup(ctx_size + CACHELINE_BYTES,
+ PAGE_SIZE));
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ /* get the va of the shadow batch buffer */
+ map = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(map)) {
+ gvt_err("failed to vmap shadow indirect ctx\n");
+ ret = PTR_ERR(map);
+ goto put_obj;
+ }
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, false);
+ if (ret) {
+ gvt_err("failed to set shadow indirect ctx to CPU\n");
+ goto unmap_src;
+ }
+
+ ret = copy_gma_to_hva(wa_ctx->workload->vgpu,
+ wa_ctx->workload->vgpu->gtt.ggtt_mm,
+ guest_gma, guest_gma + ctx_size,
+ map);
+ if (ret) {
+ gvt_err("fail to copy guest indirect ctx\n");
+ goto unmap_src;
+ }
+
+ wa_ctx->indirect_ctx.obj = obj;
+ wa_ctx->indirect_ctx.shadow_va = map;
+ return 0;
+
+unmap_src:
+ i915_gem_object_unpin_map(obj);
+put_obj:
+ i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+ return ret;
+}
+
+static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
+ unsigned char *bb_start_sva;
+
+ per_ctx_start[0] = 0x18800001;
+ per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
+
+ bb_start_sva = (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
+ wa_ctx->indirect_ctx.size;
+
+ memcpy(bb_start_sva, per_ctx_start, CACHELINE_BYTES);
+
+ return 0;
+}
+
+int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ int ret;
+
+ if (wa_ctx->indirect_ctx.size == 0)
+ return 0;
+
+ ret = shadow_indirect_ctx(wa_ctx);
+ if (ret) {
+ gvt_err("fail to shadow indirect ctx\n");
+ return ret;
+ }
+
+ combine_wa_ctx(wa_ctx);
+
+ ret = scan_wa_ctx(wa_ctx);
+ if (ret) {
+ gvt_err("scan wa ctx error\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
+ unsigned int opcode, int rings)
+{
+ struct cmd_info *info = NULL;
+ unsigned int ring;
+
+ for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) {
+ info = find_cmd_entry(gvt, opcode, ring);
+ if (info)
+ break;
+ }
+ return info;
+}
+
+static int init_cmd_table(struct intel_gvt *gvt)
+{
+ int i;
+ struct cmd_entry *e;
+ struct cmd_info *info;
+ unsigned int gen_type;
+
+ gen_type = intel_gvt_get_device_type(gvt);
+
+ for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
+ if (!(cmd_info[i].devices & gen_type))
+ continue;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->info = &cmd_info[i];
+ info = find_cmd_entry_any_ring(gvt,
+ e->info->opcode, e->info->rings);
+ if (info) {
+ gvt_err("%s %s duplicated\n", e->info->name,
+ info->name);
+ return -EEXIST;
+ }
+
+ INIT_HLIST_NODE(&e->hlist);
+ add_cmd_entry(gvt, e);
+ gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
+ e->info->name, e->info->opcode, e->info->flag,
+ e->info->devices, e->info->rings);
+ }
+ return 0;
+}
+
+static void clean_cmd_table(struct intel_gvt *gvt)
+{
+ struct hlist_node *tmp;
+ struct cmd_entry *e;
+ int i;
+
+ hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist)
+ kfree(e);
+
+ hash_init(gvt->cmd_table);
+}
+
+void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt)
+{
+ clean_cmd_table(gvt);
+}
+
+int intel_gvt_init_cmd_parser(struct intel_gvt *gvt)
+{
+ int ret;
+
+ ret = init_cmd_table(gvt);
+ if (ret) {
+ intel_gvt_clean_cmd_parser(gvt);
+ return ret;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
new file mode 100644
index 000000000000..bed33514103c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Kevin Tian <kevin.tian@intel.com>
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Yulei Zhang <yulei.zhang@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+#ifndef _GVT_CMD_PARSER_H_
+#define _GVT_CMD_PARSER_H_
+
+#define GVT_CMD_HASH_BITS 7
+
+void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
+
+int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
+
+int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
+
+int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h
index 7ef412be665f..68cba7bd980a 100644
--- a/drivers/gpu/drm/i915/gvt/debug.h
+++ b/drivers/gpu/drm/i915/gvt/debug.h
@@ -24,11 +24,34 @@
#ifndef __GVT_DEBUG_H__
#define __GVT_DEBUG_H__
+#define gvt_err(fmt, args...) \
+ DRM_ERROR("gvt: "fmt, ##args)
+
#define gvt_dbg_core(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
-/*
- * Other GVT debug stuff will be introduced in the GVT device model patches.
- */
+#define gvt_dbg_irq(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: irq: "fmt, ##args)
+
+#define gvt_dbg_mm(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: mm: "fmt, ##args)
+
+#define gvt_dbg_mmio(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: mmio: "fmt, ##args)
+
+#define gvt_dbg_dpy(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: dpy: "fmt, ##args)
+
+#define gvt_dbg_el(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: el: "fmt, ##args)
+
+#define gvt_dbg_sched(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: sched: "fmt, ##args)
+
+#define gvt_dbg_render(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: render: "fmt, ##args)
+
+#define gvt_dbg_cmd(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: cmd: "fmt, ##args)
#endif
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
new file mode 100644
index 000000000000..c0c884aeb30e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Terrence Xu <terrence.xu@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+static int get_edp_pipe(struct intel_vgpu *vgpu)
+{
+ u32 data = vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP);
+ int pipe = -1;
+
+ switch (data & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ pipe = PIPE_C;
+ break;
+ }
+ return pipe;
+}
+
+static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ if (!(vgpu_vreg(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
+ return 0;
+
+ if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
+ return 0;
+ return 1;
+}
+
+static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
+ return -EINVAL;
+
+ if (vgpu_vreg(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
+ return 1;
+
+ if (edp_pipe_is_enabled(vgpu) &&
+ get_edp_pipe(vgpu) == pipe)
+ return 1;
+ return 0;
+}
+
+/* EDID with 1024x768 as its resolution */
+static unsigned char virtual_dp_monitor_edid[] = {
+ /*Header*/
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ /* Vendor & Product Identification */
+ 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
+ /* Version & Revision */
+ 0x01, 0x04,
+ /* Basic Display Parameters & Features */
+ 0xa5, 0x34, 0x20, 0x78, 0x23,
+ /* Color Characteristics */
+ 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
+ /* Established Timings: maximum resolution is 1024x768 */
+ 0x21, 0x08, 0x00,
+ /* Standard Timings. All invalid */
+ 0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
+ /* 18 Byte Data Blocks 1: invalid */
+ 0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
+ 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
+ /* 18 Byte Data Blocks 2: invalid */
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ /* 18 Byte Data Blocks 3: invalid */
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
+ 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
+ /* 18 Byte Data Blocks 4: invalid */
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
+ 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
+ /* Extension Block Count */
+ 0x00,
+ /* Checksum */
+ 0xef,
+};
+
+#define DPCD_HEADER_SIZE 0xb
+
+static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
+ 0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
+ SDE_PORTC_HOTPLUG_CPT |
+ SDE_PORTD_HOTPLUG_CPT);
+
+ if (IS_SKYLAKE(dev_priv))
+ vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
+ SDE_PORTE_HOTPLUG_SPT);
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B))
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C))
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D))
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
+
+ if (IS_SKYLAKE(dev_priv) &&
+ intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
+ }
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+ if (IS_BROADWELL(dev_priv))
+ vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |=
+ GEN8_PORT_DP_A_HOTPLUG;
+ else
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
+ }
+}
+
+static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
+{
+ struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
+
+ kfree(port->edid);
+ port->edid = NULL;
+
+ kfree(port->dpcd);
+ port->dpcd = NULL;
+}
+
+static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
+ int type)
+{
+ struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
+
+ port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
+ if (!port->edid)
+ return -ENOMEM;
+
+ port->dpcd = kzalloc(sizeof(*(port->dpcd)), GFP_KERNEL);
+ if (!port->dpcd) {
+ kfree(port->edid);
+ return -ENOMEM;
+ }
+
+ memcpy(port->edid->edid_block, virtual_dp_monitor_edid,
+ EDID_SIZE);
+ port->edid->data_valid = true;
+
+ memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE);
+ port->dpcd->data_valid = true;
+ port->dpcd->data[DPCD_SINK_COUNT] = 0x1;
+ port->type = type;
+
+ emulate_monitor_status_change(vgpu);
+ return 0;
+}
+
+/**
+ * intel_gvt_check_vblank_emulation - check if vblank emulation timer should
+ * be turned on/off when a virtual pipe is enabled/disabled.
+ * @gvt: a GVT device
+ *
+ * This function is used to turn on/off vblank timer according to currently
+ * enabled/disabled virtual pipes.
+ *
+ */
+void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
+{
+ struct intel_gvt_irq *irq = &gvt->irq;
+ struct intel_vgpu *vgpu;
+ bool have_enabled_pipe = false;
+ int pipe, id;
+
+ if (WARN_ON(!mutex_is_locked(&gvt->lock)))
+ return;
+
+ hrtimer_cancel(&irq->vblank_timer.timer);
+
+ for_each_active_vgpu(gvt, vgpu, id) {
+ for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
+ have_enabled_pipe =
+ pipe_is_enabled(vgpu, pipe);
+ if (have_enabled_pipe)
+ break;
+ }
+ }
+
+ if (have_enabled_pipe)
+ hrtimer_start(&irq->vblank_timer.timer,
+ ktime_add_ns(ktime_get(), irq->vblank_timer.period),
+ HRTIMER_MODE_ABS);
+}
+
+static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_vgpu_irq *irq = &vgpu->irq;
+ int vblank_event[] = {
+ [PIPE_A] = PIPE_A_VBLANK,
+ [PIPE_B] = PIPE_B_VBLANK,
+ [PIPE_C] = PIPE_C_VBLANK,
+ };
+ int event;
+
+ if (pipe < PIPE_A || pipe > PIPE_C)
+ return;
+
+ for_each_set_bit(event, irq->flip_done_event[pipe],
+ INTEL_GVT_EVENT_MAX) {
+ clear_bit(event, irq->flip_done_event[pipe]);
+ if (!pipe_is_enabled(vgpu, pipe))
+ continue;
+
+ vgpu_vreg(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ }
+
+ if (pipe_is_enabled(vgpu, pipe)) {
+ vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(pipe))++;
+ intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]);
+ }
+}
+
+static void emulate_vblank(struct intel_vgpu *vgpu)
+{
+ int pipe;
+
+ for_each_pipe(vgpu->gvt->dev_priv, pipe)
+ emulate_vblank_on_pipe(vgpu, pipe);
+}
+
+/**
+ * intel_gvt_emulate_vblank - trigger vblank events for vGPUs on GVT device
+ * @gvt: a GVT device
+ *
+ * This function is used to trigger vblank interrupts for vGPUs on GVT device
+ *
+ */
+void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
+{
+ struct intel_vgpu *vgpu;
+ int id;
+
+ if (WARN_ON(!mutex_is_locked(&gvt->lock)))
+ return;
+
+ for_each_active_vgpu(gvt, vgpu, id)
+ emulate_vblank(vgpu);
+}
+
+/**
+ * intel_vgpu_clean_display - clean vGPU virtual display emulation
+ * @vgpu: a vGPU
+ *
+ * This function is used to clean vGPU virtual display emulation stuffs
+ *
+ */
+void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ if (IS_SKYLAKE(dev_priv))
+ clean_virtual_dp_monitor(vgpu, PORT_D);
+ else
+ clean_virtual_dp_monitor(vgpu, PORT_B);
+}
+
+/**
+ * intel_vgpu_init_display- initialize vGPU virtual display emulation
+ * @vgpu: a vGPU
+ *
+ * This function is used to initialize vGPU virtual display emulation stuffs
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_init_display(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ intel_vgpu_init_i2c_edid(vgpu);
+
+ if (IS_SKYLAKE(dev_priv))
+ return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D);
+ else
+ return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
+}
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
new file mode 100644
index 000000000000..7a60cb848268
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Terrence Xu <terrence.xu@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef _GVT_DISPLAY_H_
+#define _GVT_DISPLAY_H_
+
+#define SBI_REG_MAX 20
+#define DPCD_SIZE 0x700
+
+#define intel_vgpu_port(vgpu, port) \
+ (&(vgpu->display.ports[port]))
+
+#define intel_vgpu_has_monitor_on_port(vgpu, port) \
+ (intel_vgpu_port(vgpu, port)->edid && \
+ intel_vgpu_port(vgpu, port)->edid->data_valid)
+
+#define intel_vgpu_port_is_dp(vgpu, port) \
+ ((intel_vgpu_port(vgpu, port)->type == GVT_DP_A) || \
+ (intel_vgpu_port(vgpu, port)->type == GVT_DP_B) || \
+ (intel_vgpu_port(vgpu, port)->type == GVT_DP_C) || \
+ (intel_vgpu_port(vgpu, port)->type == GVT_DP_D))
+
+#define INTEL_GVT_MAX_UEVENT_VARS 3
+
+/* DPCD start */
+#define DPCD_SIZE 0x700
+
+/* DPCD */
+#define DP_SET_POWER 0x600
+#define DP_SET_POWER_D0 0x1
+#define AUX_NATIVE_WRITE 0x8
+#define AUX_NATIVE_READ 0x9
+
+#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
+#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
+#define AUX_NATIVE_REPLY_NAK (0x1 << 4)
+#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
+
+#define AUX_BURST_SIZE 16
+
+/* DPCD addresses */
+#define DPCD_REV 0x000
+#define DPCD_MAX_LINK_RATE 0x001
+#define DPCD_MAX_LANE_COUNT 0x002
+
+#define DPCD_TRAINING_PATTERN_SET 0x102
+#define DPCD_SINK_COUNT 0x200
+#define DPCD_LANE0_1_STATUS 0x202
+#define DPCD_LANE2_3_STATUS 0x203
+#define DPCD_LANE_ALIGN_STATUS_UPDATED 0x204
+#define DPCD_SINK_STATUS 0x205
+
+/* link training */
+#define DPCD_TRAINING_PATTERN_SET_MASK 0x03
+#define DPCD_LINK_TRAINING_DISABLED 0x00
+#define DPCD_TRAINING_PATTERN_1 0x01
+#define DPCD_TRAINING_PATTERN_2 0x02
+
+#define DPCD_CP_READY_MASK (1 << 6)
+
+/* lane status */
+#define DPCD_LANES_CR_DONE 0x11
+#define DPCD_LANES_EQ_DONE 0x22
+#define DPCD_SYMBOL_LOCKED 0x44
+
+#define DPCD_INTERLANE_ALIGN_DONE 0x01
+
+#define DPCD_SINK_IN_SYNC 0x03
+/* DPCD end */
+
+#define SBI_RESPONSE_MASK 0x3
+#define SBI_RESPONSE_SHIFT 0x1
+#define SBI_STAT_MASK 0x1
+#define SBI_STAT_SHIFT 0x0
+#define SBI_OPCODE_SHIFT 8
+#define SBI_OPCODE_MASK (0xff << SBI_OPCODE_SHIFT)
+#define SBI_CMD_IORD 2
+#define SBI_CMD_IOWR 3
+#define SBI_CMD_CRRD 6
+#define SBI_CMD_CRWR 7
+#define SBI_ADDR_OFFSET_SHIFT 16
+#define SBI_ADDR_OFFSET_MASK (0xffff << SBI_ADDR_OFFSET_SHIFT)
+
+struct intel_vgpu_sbi_register {
+ unsigned int offset;
+ u32 value;
+};
+
+struct intel_vgpu_sbi {
+ int number;
+ struct intel_vgpu_sbi_register registers[SBI_REG_MAX];
+};
+
+enum intel_gvt_plane_type {
+ PRIMARY_PLANE = 0,
+ CURSOR_PLANE,
+ SPRITE_PLANE,
+ MAX_PLANE
+};
+
+struct intel_vgpu_dpcd_data {
+ bool data_valid;
+ u8 data[DPCD_SIZE];
+};
+
+enum intel_vgpu_port_type {
+ GVT_CRT = 0,
+ GVT_DP_A,
+ GVT_DP_B,
+ GVT_DP_C,
+ GVT_DP_D,
+ GVT_HDMI_B,
+ GVT_HDMI_C,
+ GVT_HDMI_D,
+ GVT_PORT_MAX
+};
+
+struct intel_vgpu_port {
+ /* per display EDID information */
+ struct intel_vgpu_edid_data *edid;
+ /* per display DPCD information */
+ struct intel_vgpu_dpcd_data *dpcd;
+ int type;
+};
+
+void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
+void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
+
+int intel_vgpu_init_display(struct intel_vgpu *vgpu);
+void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
new file mode 100644
index 000000000000..7e1da1c563ca
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -0,0 +1,532 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Terrence Xu <terrence.xu@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define GMBUS1_TOTAL_BYTES_SHIFT 16
+#define GMBUS1_TOTAL_BYTES_MASK 0x1ff
+#define gmbus1_total_byte_count(v) (((v) >> \
+ GMBUS1_TOTAL_BYTES_SHIFT) & GMBUS1_TOTAL_BYTES_MASK)
+#define gmbus1_slave_addr(v) (((v) & 0xff) >> 1)
+#define gmbus1_slave_index(v) (((v) >> 8) & 0xff)
+#define gmbus1_bus_cycle(v) (((v) >> 25) & 0x7)
+
+/* GMBUS0 bits definitions */
+#define _GMBUS_PIN_SEL_MASK (0x7)
+
+static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
+ unsigned char chr = 0;
+
+ if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
+ gvt_err("Driver tries to read EDID without proper sequence!\n");
+ return 0;
+ }
+ if (edid->current_edid_read >= EDID_SIZE) {
+ gvt_err("edid_get_byte() exceeds the size of EDID!\n");
+ return 0;
+ }
+
+ if (!edid->edid_available) {
+ gvt_err("Reading EDID but EDID is not available!\n");
+ return 0;
+ }
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, edid->port)) {
+ struct intel_vgpu_edid_data *edid_data =
+ intel_vgpu_port(vgpu, edid->port)->edid;
+
+ chr = edid_data->edid_block[edid->current_edid_read];
+ edid->current_edid_read++;
+ } else {
+ gvt_err("No EDID available during the reading?\n");
+ }
+ return chr;
+}
+
+static inline int get_port_from_gmbus0(u32 gmbus0)
+{
+ int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
+ int port = -EINVAL;
+
+ if (port_select == 2)
+ port = PORT_E;
+ else if (port_select == 4)
+ port = PORT_C;
+ else if (port_select == 5)
+ port = PORT_B;
+ else if (port_select == 6)
+ port = PORT_D;
+ return port;
+}
+
+static void reset_gmbus_controller(struct intel_vgpu *vgpu)
+{
+ vgpu_vreg(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
+ if (!vgpu->display.i2c_edid.edid_available)
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
+ vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
+}
+
+/* GMBUS0 */
+static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ int port, pin_select;
+
+ memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
+
+ pin_select = vgpu_vreg(vgpu, offset) & _GMBUS_PIN_SEL_MASK;
+
+ intel_vgpu_init_i2c_edid(vgpu);
+
+ if (pin_select == 0)
+ return 0;
+
+ port = get_port_from_gmbus0(pin_select);
+ if (WARN_ON(port < 0))
+ return 0;
+
+ vgpu->display.i2c_edid.state = I2C_GMBUS;
+ vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
+
+ vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
+
+ if (intel_vgpu_has_monitor_on_port(vgpu, port) &&
+ !intel_vgpu_port_is_dp(vgpu, port)) {
+ vgpu->display.i2c_edid.port = port;
+ vgpu->display.i2c_edid.edid_available = true;
+ vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
+ } else
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
+ return 0;
+}
+
+static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
+ u32 slave_addr;
+ u32 wvalue = *(u32 *)p_data;
+
+ if (vgpu_vreg(vgpu, offset) & GMBUS_SW_CLR_INT) {
+ if (!(wvalue & GMBUS_SW_CLR_INT)) {
+ vgpu_vreg(vgpu, offset) &= ~GMBUS_SW_CLR_INT;
+ reset_gmbus_controller(vgpu);
+ }
+ /*
+ * TODO: "This bit is cleared to zero when an event
+ * causes the HW_RDY bit transition to occur "
+ */
+ } else {
+ /*
+ * per bspec setting this bit can cause:
+ * 1) INT status bit cleared
+ * 2) HW_RDY bit asserted
+ */
+ if (wvalue & GMBUS_SW_CLR_INT) {
+ vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
+ }
+
+ /* For virtualization, we suppose that HW is always ready,
+ * so GMBUS_SW_RDY should always be cleared
+ */
+ if (wvalue & GMBUS_SW_RDY)
+ wvalue &= ~GMBUS_SW_RDY;
+
+ i2c_edid->gmbus.total_byte_count =
+ gmbus1_total_byte_count(wvalue);
+ slave_addr = gmbus1_slave_addr(wvalue);
+
+ /* vgpu gmbus only support EDID */
+ if (slave_addr == EDID_ADDR) {
+ i2c_edid->slave_selected = true;
+ } else if (slave_addr != 0) {
+ gvt_dbg_dpy(
+ "vgpu%d: unsupported gmbus slave addr(0x%x)\n"
+ " gmbus operations will be ignored.\n",
+ vgpu->id, slave_addr);
+ }
+
+ if (wvalue & GMBUS_CYCLE_INDEX)
+ i2c_edid->current_edid_read =
+ gmbus1_slave_index(wvalue);
+
+ i2c_edid->gmbus.cycle_type = gmbus1_bus_cycle(wvalue);
+ switch (gmbus1_bus_cycle(wvalue)) {
+ case GMBUS_NOCYCLE:
+ break;
+ case GMBUS_STOP:
+ /* From spec:
+ * This can only cause a STOP to be generated
+ * if a GMBUS cycle is generated, the GMBUS is
+ * currently in a data/wait/idle phase, or it is in a
+ * WAIT phase
+ */
+ if (gmbus1_bus_cycle(vgpu_vreg(vgpu, offset))
+ != GMBUS_NOCYCLE) {
+ intel_vgpu_init_i2c_edid(vgpu);
+ /* After the 'stop' cycle, hw state would become
+ * 'stop phase' and then 'idle phase' after a
+ * few milliseconds. In emulation, we just set
+ * it as 'idle phase' ('stop phase' is not
+ * visible in gmbus interface)
+ */
+ i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
+ vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
+ }
+ break;
+ case NIDX_NS_W:
+ case IDX_NS_W:
+ case NIDX_STOP:
+ case IDX_STOP:
+ /* From hw spec the GMBUS phase
+ * transition like this:
+ * START (-->INDEX) -->DATA
+ */
+ i2c_edid->gmbus.phase = GMBUS_DATA_PHASE;
+ vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
+ break;
+ default:
+ gvt_err("Unknown/reserved GMBUS cycle detected!\n");
+ break;
+ }
+ /*
+ * From hw spec the WAIT state will be
+ * cleared:
+ * (1) in a new GMBUS cycle
+ * (2) by generating a stop
+ */
+ vgpu_vreg(vgpu, offset) = wvalue;
+ }
+ return 0;
+}
+
+static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ int i;
+ unsigned char byte_data;
+ struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
+ int byte_left = i2c_edid->gmbus.total_byte_count -
+ i2c_edid->current_edid_read;
+ int byte_count = byte_left;
+ u32 reg_data = 0;
+
+ /* Data can only be recevied if previous settings correct */
+ if (vgpu_vreg(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
+ if (byte_left <= 0) {
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+ return 0;
+ }
+
+ if (byte_count > 4)
+ byte_count = 4;
+ for (i = 0; i < byte_count; i++) {
+ byte_data = edid_get_byte(vgpu);
+ reg_data |= (byte_data << (i << 3));
+ }
+
+ memcpy(&vgpu_vreg(vgpu, offset), &reg_data, byte_count);
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+
+ if (byte_left <= 4) {
+ switch (i2c_edid->gmbus.cycle_type) {
+ case NIDX_STOP:
+ case IDX_STOP:
+ i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
+ break;
+ case NIDX_NS_W:
+ case IDX_NS_W:
+ default:
+ i2c_edid->gmbus.phase = GMBUS_WAIT_PHASE;
+ break;
+ }
+ intel_vgpu_init_i2c_edid(vgpu);
+ }
+ /*
+ * Read GMBUS3 during send operation,
+ * return the latest written value
+ */
+ } else {
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+ gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n",
+ vgpu->id);
+ }
+ return 0;
+}
+
+static int gmbus2_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 value = vgpu_vreg(vgpu, offset);
+
+ if (!(vgpu_vreg(vgpu, offset) & GMBUS_INUSE))
+ vgpu_vreg(vgpu, offset) |= GMBUS_INUSE;
+ memcpy(p_data, (void *)&value, bytes);
+ return 0;
+}
+
+static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 wvalue = *(u32 *)p_data;
+
+ if (wvalue & GMBUS_INUSE)
+ vgpu_vreg(vgpu, offset) &= ~GMBUS_INUSE;
+ /* All other bits are read-only */
+ return 0;
+}
+
+/**
+ * intel_gvt_i2c_handle_gmbus_read - emulate gmbus register mmio read
+ * @vgpu: a vGPU
+ *
+ * This function is used to emulate gmbus register mmio read
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+ return -EINVAL;
+
+ if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
+ return gmbus2_mmio_read(vgpu, offset, p_data, bytes);
+ else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
+ return gmbus3_mmio_read(vgpu, offset, p_data, bytes);
+
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+ return 0;
+}
+
+/**
+ * intel_gvt_i2c_handle_gmbus_write - emulate gmbus register mmio write
+ * @vgpu: a vGPU
+ *
+ * This function is used to emulate gmbus register mmio write
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+ return -EINVAL;
+
+ if (offset == i915_mmio_reg_offset(PCH_GMBUS0))
+ return gmbus0_mmio_write(vgpu, offset, p_data, bytes);
+ else if (offset == i915_mmio_reg_offset(PCH_GMBUS1))
+ return gmbus1_mmio_write(vgpu, offset, p_data, bytes);
+ else if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
+ return gmbus2_mmio_write(vgpu, offset, p_data, bytes);
+ else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
+ return gmbus3_mmio_write(vgpu, offset, p_data, bytes);
+
+ memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
+ return 0;
+}
+
+enum {
+ AUX_CH_CTL = 0,
+ AUX_CH_DATA1,
+ AUX_CH_DATA2,
+ AUX_CH_DATA3,
+ AUX_CH_DATA4,
+ AUX_CH_DATA5
+};
+
+static inline int get_aux_ch_reg(unsigned int offset)
+{
+ int reg;
+
+ switch (offset & 0xff) {
+ case 0x10:
+ reg = AUX_CH_CTL;
+ break;
+ case 0x14:
+ reg = AUX_CH_DATA1;
+ break;
+ case 0x18:
+ reg = AUX_CH_DATA2;
+ break;
+ case 0x1c:
+ reg = AUX_CH_DATA3;
+ break;
+ case 0x20:
+ reg = AUX_CH_DATA4;
+ break;
+ case 0x24:
+ reg = AUX_CH_DATA5;
+ break;
+ default:
+ reg = -1;
+ break;
+ }
+ return reg;
+}
+
+#define AUX_CTL_MSG_LENGTH(reg) \
+ ((reg & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> \
+ DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT)
+
+/**
+ * intel_gvt_i2c_handle_aux_ch_write - emulate AUX channel register write
+ * @vgpu: a vGPU
+ *
+ * This function is used to emulate AUX channel register write
+ *
+ */
+void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
+ int port_idx,
+ unsigned int offset,
+ void *p_data)
+{
+ struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
+ int msg_length, ret_msg_size;
+ int msg, addr, ctrl, op;
+ u32 value = *(u32 *)p_data;
+ int aux_data_for_write = 0;
+ int reg = get_aux_ch_reg(offset);
+
+ if (reg != AUX_CH_CTL) {
+ vgpu_vreg(vgpu, offset) = value;
+ return;
+ }
+
+ msg_length = AUX_CTL_MSG_LENGTH(value);
+ // check the msg in DATA register.
+ msg = vgpu_vreg(vgpu, offset + 4);
+ addr = (msg >> 8) & 0xffff;
+ ctrl = (msg >> 24) & 0xff;
+ op = ctrl >> 4;
+ if (!(value & DP_AUX_CH_CTL_SEND_BUSY)) {
+ /* The ctl write to clear some states */
+ return;
+ }
+
+ /* Always set the wanted value for vms. */
+ ret_msg_size = (((op & 0x1) == GVT_AUX_I2C_READ) ? 2 : 1);
+ vgpu_vreg(vgpu, offset) =
+ DP_AUX_CH_CTL_DONE |
+ ((ret_msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) &
+ DP_AUX_CH_CTL_MESSAGE_SIZE_MASK);
+
+ if (msg_length == 3) {
+ if (!(op & GVT_AUX_I2C_MOT)) {
+ /* stop */
+ intel_vgpu_init_i2c_edid(vgpu);
+ } else {
+ /* start or restart */
+ i2c_edid->aux_ch.i2c_over_aux_ch = true;
+ i2c_edid->aux_ch.aux_ch_mot = true;
+ if (addr == 0) {
+ /* reset the address */
+ intel_vgpu_init_i2c_edid(vgpu);
+ } else if (addr == EDID_ADDR) {
+ i2c_edid->state = I2C_AUX_CH;
+ i2c_edid->port = port_idx;
+ i2c_edid->slave_selected = true;
+ if (intel_vgpu_has_monitor_on_port(vgpu,
+ port_idx) &&
+ intel_vgpu_port_is_dp(vgpu, port_idx))
+ i2c_edid->edid_available = true;
+ }
+ }
+ } else if ((op & 0x1) == GVT_AUX_I2C_WRITE) {
+ /* TODO
+ * We only support EDID reading from I2C_over_AUX. And
+ * we do not expect the index mode to be used. Right now
+ * the WRITE operation is ignored. It is good enough to
+ * support the gfx driver to do EDID access.
+ */
+ } else {
+ if (WARN_ON((op & 0x1) != GVT_AUX_I2C_READ))
+ return;
+ if (WARN_ON(msg_length != 4))
+ return;
+ if (i2c_edid->edid_available && i2c_edid->slave_selected) {
+ unsigned char val = edid_get_byte(vgpu);
+
+ aux_data_for_write = (val << 16);
+ }
+ }
+ /* write the return value in AUX_CH_DATA reg which includes:
+ * ACK of I2C_WRITE
+ * returned byte if it is READ
+ */
+
+ aux_data_for_write |= (GVT_AUX_I2C_REPLY_ACK & 0xff) << 24;
+ vgpu_vreg(vgpu, offset + 4) = aux_data_for_write;
+}
+
+/**
+ * intel_vgpu_init_i2c_edid - initialize vGPU i2c edid emulation
+ * @vgpu: a vGPU
+ *
+ * This function is used to initialize vGPU i2c edid emulation stuffs
+ *
+ */
+void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
+
+ edid->state = I2C_NOT_SPECIFIED;
+
+ edid->port = -1;
+ edid->slave_selected = false;
+ edid->edid_available = false;
+ edid->current_edid_read = 0;
+
+ memset(&edid->gmbus, 0, sizeof(struct intel_vgpu_i2c_gmbus));
+
+ edid->aux_ch.i2c_over_aux_ch = false;
+ edid->aux_ch.aux_ch_mot = false;
+}
diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h
new file mode 100644
index 000000000000..de366b1d5196
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/edid.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Terrence Xu <terrence.xu@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef _GVT_EDID_H_
+#define _GVT_EDID_H_
+
+#define EDID_SIZE 128
+#define EDID_ADDR 0x50 /* Linux hvm EDID addr */
+
+#define GVT_AUX_NATIVE_WRITE 0x8
+#define GVT_AUX_NATIVE_READ 0x9
+#define GVT_AUX_I2C_WRITE 0x0
+#define GVT_AUX_I2C_READ 0x1
+#define GVT_AUX_I2C_STATUS 0x2
+#define GVT_AUX_I2C_MOT 0x4
+#define GVT_AUX_I2C_REPLY_ACK (0x0 << 6)
+
+struct intel_vgpu_edid_data {
+ bool data_valid;
+ unsigned char edid_block[EDID_SIZE];
+};
+
+enum gmbus_cycle_type {
+ GMBUS_NOCYCLE = 0x0,
+ NIDX_NS_W = 0x1,
+ IDX_NS_W = 0x3,
+ GMBUS_STOP = 0x4,
+ NIDX_STOP = 0x5,
+ IDX_STOP = 0x7
+};
+
+/*
+ * States of GMBUS
+ *
+ * GMBUS0-3 could be related to the EDID virtualization. Another two GMBUS
+ * registers, GMBUS4 (interrupt mask) and GMBUS5 (2 byte indes register), are
+ * not considered here. Below describes the usage of GMBUS registers that are
+ * cared by the EDID virtualization
+ *
+ * GMBUS0:
+ * R/W
+ * port selection. value of bit0 - bit2 corresponds to the GPIO registers.
+ *
+ * GMBUS1:
+ * R/W Protect
+ * Command and Status.
+ * bit0 is the direction bit: 1 is read; 0 is write.
+ * bit1 - bit7 is slave 7-bit address.
+ * bit16 - bit24 total byte count (ignore?)
+ *
+ * GMBUS2:
+ * Most of bits are read only except bit 15 (IN_USE)
+ * Status register
+ * bit0 - bit8 current byte count
+ * bit 11: hardware ready;
+ *
+ * GMBUS3:
+ * Read/Write
+ * Data for transfer
+ */
+
+/* From hw specs, Other phases like START, ADDRESS, INDEX
+ * are invisible to GMBUS MMIO interface. So no definitions
+ * in below enum types
+ */
+enum gvt_gmbus_phase {
+ GMBUS_IDLE_PHASE = 0,
+ GMBUS_DATA_PHASE,
+ GMBUS_WAIT_PHASE,
+ //GMBUS_STOP_PHASE,
+ GMBUS_MAX_PHASE
+};
+
+struct intel_vgpu_i2c_gmbus {
+ unsigned int total_byte_count; /* from GMBUS1 */
+ enum gmbus_cycle_type cycle_type;
+ enum gvt_gmbus_phase phase;
+};
+
+struct intel_vgpu_i2c_aux_ch {
+ bool i2c_over_aux_ch;
+ bool aux_ch_mot;
+};
+
+enum i2c_state {
+ I2C_NOT_SPECIFIED = 0,
+ I2C_GMBUS = 1,
+ I2C_AUX_CH = 2
+};
+
+/* I2C sequences cannot interleave.
+ * GMBUS and AUX_CH sequences cannot interleave.
+ */
+struct intel_vgpu_i2c_edid {
+ enum i2c_state state;
+
+ unsigned int port;
+ bool slave_selected;
+ bool edid_available;
+ unsigned int current_edid_read;
+
+ struct intel_vgpu_i2c_gmbus gmbus;
+ struct intel_vgpu_i2c_aux_ch aux_ch;
+};
+
+void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu);
+
+int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes);
+
+int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes);
+
+void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
+ int port_idx,
+ unsigned int offset,
+ void *p_data);
+
+#endif /*_GVT_EDID_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
new file mode 100644
index 000000000000..c1f6019d8895
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -0,0 +1,860 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define _EL_OFFSET_STATUS 0x234
+#define _EL_OFFSET_STATUS_BUF 0x370
+#define _EL_OFFSET_STATUS_PTR 0x3A0
+
+#define execlist_ring_mmio(gvt, ring_id, offset) \
+ (gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
+
+#define valid_context(ctx) ((ctx)->valid)
+#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
+ ((a)->lrca == (b)->lrca))
+
+static int context_switch_events[] = {
+ [RCS] = RCS_AS_CONTEXT_SWITCH,
+ [BCS] = BCS_AS_CONTEXT_SWITCH,
+ [VCS] = VCS_AS_CONTEXT_SWITCH,
+ [VCS2] = VCS2_AS_CONTEXT_SWITCH,
+ [VECS] = VECS_AS_CONTEXT_SWITCH,
+};
+
+static int ring_id_to_context_switch_event(int ring_id)
+{
+ if (WARN_ON(ring_id < RCS && ring_id >
+ ARRAY_SIZE(context_switch_events)))
+ return -EINVAL;
+
+ return context_switch_events[ring_id];
+}
+
+static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
+{
+ gvt_dbg_el("[before] running slot %d/context %x pending slot %d\n",
+ execlist->running_slot ?
+ execlist->running_slot->index : -1,
+ execlist->running_context ?
+ execlist->running_context->context_id : 0,
+ execlist->pending_slot ?
+ execlist->pending_slot->index : -1);
+
+ execlist->running_slot = execlist->pending_slot;
+ execlist->pending_slot = NULL;
+ execlist->running_context = execlist->running_context ?
+ &execlist->running_slot->ctx[0] : NULL;
+
+ gvt_dbg_el("[after] running slot %d/context %x pending slot %d\n",
+ execlist->running_slot ?
+ execlist->running_slot->index : -1,
+ execlist->running_context ?
+ execlist->running_context->context_id : 0,
+ execlist->pending_slot ?
+ execlist->pending_slot->index : -1);
+}
+
+static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
+{
+ struct intel_vgpu_execlist_slot *running = execlist->running_slot;
+ struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
+ struct execlist_ctx_descriptor_format *desc = execlist->running_context;
+ struct intel_vgpu *vgpu = execlist->vgpu;
+ struct execlist_status_format status;
+ int ring_id = execlist->ring_id;
+ u32 status_reg = execlist_ring_mmio(vgpu->gvt,
+ ring_id, _EL_OFFSET_STATUS);
+
+ status.ldw = vgpu_vreg(vgpu, status_reg);
+ status.udw = vgpu_vreg(vgpu, status_reg + 4);
+
+ if (running) {
+ status.current_execlist_pointer = !!running->index;
+ status.execlist_write_pointer = !!!running->index;
+ status.execlist_0_active = status.execlist_0_valid =
+ !!!(running->index);
+ status.execlist_1_active = status.execlist_1_valid =
+ !!(running->index);
+ } else {
+ status.context_id = 0;
+ status.execlist_0_active = status.execlist_0_valid = 0;
+ status.execlist_1_active = status.execlist_1_valid = 0;
+ }
+
+ status.context_id = desc ? desc->context_id : 0;
+ status.execlist_queue_full = !!(pending);
+
+ vgpu_vreg(vgpu, status_reg) = status.ldw;
+ vgpu_vreg(vgpu, status_reg + 4) = status.udw;
+
+ gvt_dbg_el("vgpu%d: status reg offset %x ldw %x udw %x\n",
+ vgpu->id, status_reg, status.ldw, status.udw);
+}
+
+static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
+ struct execlist_context_status_format *status,
+ bool trigger_interrupt_later)
+{
+ struct intel_vgpu *vgpu = execlist->vgpu;
+ int ring_id = execlist->ring_id;
+ struct execlist_context_status_pointer_format ctx_status_ptr;
+ u32 write_pointer;
+ u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
+
+ ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+ _EL_OFFSET_STATUS_PTR);
+ ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+ _EL_OFFSET_STATUS_BUF);
+
+ ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
+
+ write_pointer = ctx_status_ptr.write_ptr;
+
+ if (write_pointer == 0x7)
+ write_pointer = 0;
+ else {
+ ++write_pointer;
+ write_pointer %= 0x6;
+ }
+
+ offset = ctx_status_buf_reg + write_pointer * 8;
+
+ vgpu_vreg(vgpu, offset) = status->ldw;
+ vgpu_vreg(vgpu, offset + 4) = status->udw;
+
+ ctx_status_ptr.write_ptr = write_pointer;
+ vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
+
+ gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
+ vgpu->id, write_pointer, offset, status->ldw, status->udw);
+
+ if (trigger_interrupt_later)
+ return;
+
+ intel_vgpu_trigger_virtual_event(vgpu,
+ ring_id_to_context_switch_event(execlist->ring_id));
+}
+
+static int emulate_execlist_ctx_schedule_out(
+ struct intel_vgpu_execlist *execlist,
+ struct execlist_ctx_descriptor_format *ctx)
+{
+ struct intel_vgpu_execlist_slot *running = execlist->running_slot;
+ struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
+ struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
+ struct execlist_ctx_descriptor_format *ctx1 = &running->ctx[1];
+ struct execlist_context_status_format status;
+
+ memset(&status, 0, sizeof(status));
+
+ gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
+
+ if (WARN_ON(!same_context(ctx, execlist->running_context))) {
+ gvt_err("schedule out context is not running context,"
+ "ctx id %x running ctx id %x\n",
+ ctx->context_id,
+ execlist->running_context->context_id);
+ return -EINVAL;
+ }
+
+ /* ctx1 is valid, ctx0/ctx is scheduled-out -> element switch */
+ if (valid_context(ctx1) && same_context(ctx0, ctx)) {
+ gvt_dbg_el("ctx 1 valid, ctx/ctx 0 is scheduled-out\n");
+
+ execlist->running_context = ctx1;
+
+ emulate_execlist_status(execlist);
+
+ status.context_complete = status.element_switch = 1;
+ status.context_id = ctx->context_id;
+
+ emulate_csb_update(execlist, &status, false);
+ /*
+ * ctx1 is not valid, ctx == ctx0
+ * ctx1 is valid, ctx1 == ctx
+ * --> last element is finished
+ * emulate:
+ * active-to-idle if there is *no* pending execlist
+ * context-complete if there *is* pending execlist
+ */
+ } else if ((!valid_context(ctx1) && same_context(ctx0, ctx))
+ || (valid_context(ctx1) && same_context(ctx1, ctx))) {
+ gvt_dbg_el("need to switch virtual execlist slot\n");
+
+ switch_virtual_execlist_slot(execlist);
+
+ emulate_execlist_status(execlist);
+
+ status.context_complete = status.active_to_idle = 1;
+ status.context_id = ctx->context_id;
+
+ if (!pending) {
+ emulate_csb_update(execlist, &status, false);
+ } else {
+ emulate_csb_update(execlist, &status, true);
+
+ memset(&status, 0, sizeof(status));
+
+ status.idle_to_active = 1;
+ status.context_id = 0;
+
+ emulate_csb_update(execlist, &status, false);
+ }
+ } else {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
+ struct intel_vgpu_execlist *execlist)
+{
+ struct intel_vgpu *vgpu = execlist->vgpu;
+ int ring_id = execlist->ring_id;
+ u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+ _EL_OFFSET_STATUS);
+ struct execlist_status_format status;
+
+ status.ldw = vgpu_vreg(vgpu, status_reg);
+ status.udw = vgpu_vreg(vgpu, status_reg + 4);
+
+ if (status.execlist_queue_full) {
+ gvt_err("virtual execlist slots are full\n");
+ return NULL;
+ }
+
+ return &execlist->slot[status.execlist_write_pointer];
+}
+
+static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
+ struct execlist_ctx_descriptor_format ctx[2])
+{
+ struct intel_vgpu_execlist_slot *running = execlist->running_slot;
+ struct intel_vgpu_execlist_slot *slot =
+ get_next_execlist_slot(execlist);
+
+ struct execlist_ctx_descriptor_format *ctx0, *ctx1;
+ struct execlist_context_status_format status;
+
+ gvt_dbg_el("emulate schedule-in\n");
+
+ if (!slot) {
+ gvt_err("no available execlist slot\n");
+ return -EINVAL;
+ }
+
+ memset(&status, 0, sizeof(status));
+ memset(slot->ctx, 0, sizeof(slot->ctx));
+
+ slot->ctx[0] = ctx[0];
+ slot->ctx[1] = ctx[1];
+
+ gvt_dbg_el("alloc slot index %d ctx 0 %x ctx 1 %x\n",
+ slot->index, ctx[0].context_id,
+ ctx[1].context_id);
+
+ /*
+ * no running execlist, make this write bundle as running execlist
+ * -> idle-to-active
+ */
+ if (!running) {
+ gvt_dbg_el("no current running execlist\n");
+
+ execlist->running_slot = slot;
+ execlist->pending_slot = NULL;
+ execlist->running_context = &slot->ctx[0];
+
+ gvt_dbg_el("running slot index %d running context %x\n",
+ execlist->running_slot->index,
+ execlist->running_context->context_id);
+
+ emulate_execlist_status(execlist);
+
+ status.idle_to_active = 1;
+ status.context_id = 0;
+
+ emulate_csb_update(execlist, &status, false);
+ return 0;
+ }
+
+ ctx0 = &running->ctx[0];
+ ctx1 = &running->ctx[1];
+
+ gvt_dbg_el("current running slot index %d ctx 0 %x ctx 1 %x\n",
+ running->index, ctx0->context_id, ctx1->context_id);
+
+ /*
+ * already has an running execlist
+ * a. running ctx1 is valid,
+ * ctx0 is finished, and running ctx1 == new execlist ctx[0]
+ * b. running ctx1 is not valid,
+ * ctx0 == new execlist ctx[0]
+ * ----> lite-restore + preempted
+ */
+ if ((valid_context(ctx1) && same_context(ctx1, &slot->ctx[0]) &&
+ /* condition a */
+ (!same_context(ctx0, execlist->running_context))) ||
+ (!valid_context(ctx1) &&
+ same_context(ctx0, &slot->ctx[0]))) { /* condition b */
+ gvt_dbg_el("need to switch virtual execlist slot\n");
+
+ execlist->pending_slot = slot;
+ switch_virtual_execlist_slot(execlist);
+
+ emulate_execlist_status(execlist);
+
+ status.lite_restore = status.preempted = 1;
+ status.context_id = ctx[0].context_id;
+
+ emulate_csb_update(execlist, &status, false);
+ } else {
+ gvt_dbg_el("emulate as pending slot\n");
+ /*
+ * otherwise
+ * --> emulate pending execlist exist + but no preemption case
+ */
+ execlist->pending_slot = slot;
+ emulate_execlist_status(execlist);
+ }
+ return 0;
+}
+
+static void free_workload(struct intel_vgpu_workload *workload)
+{
+ intel_vgpu_unpin_mm(workload->shadow_mm);
+ intel_gvt_mm_unreference(workload->shadow_mm);
+ kmem_cache_free(workload->vgpu->workloads, workload);
+}
+
+#define get_desc_from_elsp_dwords(ed, i) \
+ ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
+
+
+#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
+#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
+static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
+ unsigned long add, int gmadr_bytes)
+{
+ if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
+ return -1;
+
+ *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
+ BATCH_BUFFER_ADDR_MASK;
+ if (gmadr_bytes == 8) {
+ *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
+ add & BATCH_BUFFER_ADDR_HIGH_MASK;
+ }
+
+ return 0;
+}
+
+static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+{
+ int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+
+ /* pin the gem object to ggtt */
+ if (!list_empty(&workload->shadow_bb)) {
+ struct intel_shadow_bb_entry *entry_obj =
+ list_first_entry(&workload->shadow_bb,
+ struct intel_shadow_bb_entry,
+ list);
+ struct intel_shadow_bb_entry *temp;
+
+ list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
+ list) {
+ struct i915_vma *vma;
+
+ vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
+ 4, 0);
+ if (IS_ERR(vma)) {
+ gvt_err("Cannot pin\n");
+ return;
+ }
+
+ /* FIXME: we are not tracking our pinned VMA leaving it
+ * up to the core to fix up the stray pin_count upon
+ * free.
+ */
+
+ /* update the relocate gma with shadow batch buffer*/
+ set_gma_to_bb_cmd(entry_obj,
+ i915_ggtt_offset(vma),
+ gmadr_bytes);
+ }
+ }
+}
+
+static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ int ring_id = wa_ctx->workload->ring_id;
+ struct i915_gem_context *shadow_ctx =
+ wa_ctx->workload->vgpu->shadow_ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ shadow_ctx->engine[ring_id].state->obj;
+ struct execlist_ring_context *shadow_ring_context;
+ struct page *page;
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+ shadow_ring_context = kmap_atomic(page);
+
+ shadow_ring_context->bb_per_ctx_ptr.val =
+ (shadow_ring_context->bb_per_ctx_ptr.val &
+ (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
+ shadow_ring_context->rcs_indirect_ctx.val =
+ (shadow_ring_context->rcs_indirect_ctx.val &
+ (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
+
+ kunmap_atomic(shadow_ring_context);
+ return 0;
+}
+
+static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ struct i915_vma *vma;
+ unsigned char *per_ctx_va =
+ (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
+ wa_ctx->indirect_ctx.size;
+
+ if (wa_ctx->indirect_ctx.size == 0)
+ return;
+
+ vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
+ 0, CACHELINE_BYTES, 0);
+ if (IS_ERR(vma)) {
+ gvt_err("Cannot pin indirect ctx obj\n");
+ return;
+ }
+
+ /* FIXME: we are not tracking our pinned VMA leaving it
+ * up to the core to fix up the stray pin_count upon
+ * free.
+ */
+
+ wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
+
+ wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
+ memset(per_ctx_va, 0, CACHELINE_BYTES);
+
+ update_wa_ctx_2_shadow_ctx(wa_ctx);
+}
+
+static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct execlist_ctx_descriptor_format ctx[2];
+ int ring_id = workload->ring_id;
+
+ intel_vgpu_pin_mm(workload->shadow_mm);
+ intel_vgpu_sync_oos_pages(workload->vgpu);
+ intel_vgpu_flush_post_shadow(workload->vgpu);
+ prepare_shadow_batch_buffer(workload);
+ prepare_shadow_wa_ctx(&workload->wa_ctx);
+ if (!workload->emulate_schedule_in)
+ return 0;
+
+ ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
+ ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
+
+ return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
+}
+
+static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+{
+ /* release all the shadow batch buffer */
+ if (!list_empty(&workload->shadow_bb)) {
+ struct intel_shadow_bb_entry *entry_obj =
+ list_first_entry(&workload->shadow_bb,
+ struct intel_shadow_bb_entry,
+ list);
+ struct intel_shadow_bb_entry *temp;
+
+ list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
+ list) {
+ i915_gem_object_unpin_map(entry_obj->obj);
+ i915_gem_object_put(entry_obj->obj);
+ list_del(&entry_obj->list);
+ kfree(entry_obj);
+ }
+ }
+}
+
+static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ if (wa_ctx->indirect_ctx.size == 0)
+ return;
+
+ i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
+ i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+}
+
+static int complete_execlist_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_execlist *execlist =
+ &vgpu->execlist[workload->ring_id];
+ struct intel_vgpu_workload *next_workload;
+ struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
+ bool lite_restore = false;
+ int ret;
+
+ gvt_dbg_el("complete workload %p status %d\n", workload,
+ workload->status);
+
+ release_shadow_batch_buffer(workload);
+ release_shadow_wa_ctx(&workload->wa_ctx);
+
+ if (workload->status || vgpu->resetting)
+ goto out;
+
+ if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
+ struct execlist_ctx_descriptor_format *this_desc, *next_desc;
+
+ next_workload = container_of(next,
+ struct intel_vgpu_workload, list);
+ this_desc = &workload->ctx_desc;
+ next_desc = &next_workload->ctx_desc;
+
+ lite_restore = same_context(this_desc, next_desc);
+ }
+
+ if (lite_restore) {
+ gvt_dbg_el("next context == current - no schedule-out\n");
+ free_workload(workload);
+ return 0;
+ }
+
+ ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
+ if (ret)
+ goto err;
+out:
+ free_workload(workload);
+ return 0;
+err:
+ free_workload(workload);
+ return ret;
+}
+
+#define RING_CTX_OFF(x) \
+ offsetof(struct execlist_ring_context, x)
+
+static void read_guest_pdps(struct intel_vgpu *vgpu,
+ u64 ring_context_gpa, u32 pdp[8])
+{
+ u64 gpa;
+ int i;
+
+ gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
+
+ for (i = 0; i < 8; i++)
+ intel_gvt_hypervisor_read_gpa(vgpu,
+ gpa + i * 8, &pdp[7 - i], 4);
+}
+
+static int prepare_mm(struct intel_vgpu_workload *workload)
+{
+ struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
+ struct intel_vgpu_mm *mm;
+ int page_table_level;
+ u32 pdp[8];
+
+ if (desc->addressing_mode == 1) { /* legacy 32-bit */
+ page_table_level = 3;
+ } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
+ page_table_level = 4;
+ } else {
+ gvt_err("Advanced Context mode(SVM) is not supported!\n");
+ return -EINVAL;
+ }
+
+ read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
+
+ mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
+ if (mm) {
+ intel_gvt_mm_reference(mm);
+ } else {
+
+ mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
+ pdp, page_table_level, 0);
+ if (IS_ERR(mm)) {
+ gvt_err("fail to create mm object.\n");
+ return PTR_ERR(mm);
+ }
+ }
+ workload->shadow_mm = mm;
+ return 0;
+}
+
+#define get_last_workload(q) \
+ (list_empty(q) ? NULL : container_of(q->prev, \
+ struct intel_vgpu_workload, list))
+
+static int submit_context(struct intel_vgpu *vgpu, int ring_id,
+ struct execlist_ctx_descriptor_format *desc,
+ bool emulate_schedule_in)
+{
+ struct list_head *q = workload_q_head(vgpu, ring_id);
+ struct intel_vgpu_workload *last_workload = get_last_workload(q);
+ struct intel_vgpu_workload *workload = NULL;
+ u64 ring_context_gpa;
+ u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
+ int ret;
+
+ ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
+ if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("invalid guest context LRCA: %x\n", desc->lrca);
+ return -EINVAL;
+ }
+
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ring_header.val), &head, 4);
+
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ring_tail.val), &tail, 4);
+
+ head &= RB_HEAD_OFF_MASK;
+ tail &= RB_TAIL_OFF_MASK;
+
+ if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
+ gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
+ gvt_dbg_el("ctx head %x real head %lx\n", head,
+ last_workload->rb_tail);
+ /*
+ * cannot use guest context head pointer here,
+ * as it might not be updated at this time
+ */
+ head = last_workload->rb_tail;
+ }
+
+ gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
+
+ workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL);
+ if (!workload)
+ return -ENOMEM;
+
+ /* record some ring buffer register values for scan and shadow */
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rb_start.val), &start, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
+
+ INIT_LIST_HEAD(&workload->list);
+ INIT_LIST_HEAD(&workload->shadow_bb);
+
+ init_waitqueue_head(&workload->shadow_ctx_status_wq);
+ atomic_set(&workload->shadow_ctx_active, 0);
+
+ workload->vgpu = vgpu;
+ workload->ring_id = ring_id;
+ workload->ctx_desc = *desc;
+ workload->ring_context_gpa = ring_context_gpa;
+ workload->rb_head = head;
+ workload->rb_tail = tail;
+ workload->rb_start = start;
+ workload->rb_ctl = ctl;
+ workload->prepare = prepare_execlist_workload;
+ workload->complete = complete_execlist_workload;
+ workload->status = -EINPROGRESS;
+ workload->emulate_schedule_in = emulate_schedule_in;
+
+ if (ring_id == RCS) {
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
+
+ workload->wa_ctx.indirect_ctx.guest_gma =
+ indirect_ctx & INDIRECT_CTX_ADDR_MASK;
+ workload->wa_ctx.indirect_ctx.size =
+ (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
+ CACHELINE_BYTES;
+ workload->wa_ctx.per_ctx.guest_gma =
+ per_ctx & PER_CTX_ADDR_MASK;
+ workload->wa_ctx.workload = workload;
+
+ WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
+ }
+
+ if (emulate_schedule_in)
+ memcpy(&workload->elsp_dwords,
+ &vgpu->execlist[ring_id].elsp_dwords,
+ sizeof(workload->elsp_dwords));
+
+ gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
+ workload, ring_id, head, tail, start, ctl);
+
+ gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
+ emulate_schedule_in);
+
+ ret = prepare_mm(workload);
+ if (ret) {
+ kmem_cache_free(vgpu->workloads, workload);
+ return ret;
+ }
+
+ queue_workload(workload);
+ return 0;
+}
+
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
+ struct execlist_ctx_descriptor_format *desc[2], valid_desc[2];
+ unsigned long valid_desc_bitmap = 0;
+ bool emulate_schedule_in = true;
+ int ret;
+ int i;
+
+ memset(valid_desc, 0, sizeof(valid_desc));
+
+ desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
+ desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
+
+ for (i = 0; i < 2; i++) {
+ if (!desc[i]->valid)
+ continue;
+
+ if (!desc[i]->privilege_access) {
+ gvt_err("vgpu%d: unexpected GGTT elsp submission\n",
+ vgpu->id);
+ return -EINVAL;
+ }
+
+ /* TODO: add another guest context checks here. */
+ set_bit(i, &valid_desc_bitmap);
+ valid_desc[i] = *desc[i];
+ }
+
+ if (!valid_desc_bitmap) {
+ gvt_err("vgpu%d: no valid desc in a elsp submission\n",
+ vgpu->id);
+ return -EINVAL;
+ }
+
+ if (!test_bit(0, (void *)&valid_desc_bitmap) &&
+ test_bit(1, (void *)&valid_desc_bitmap)) {
+ gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n",
+ vgpu->id);
+ return -EINVAL;
+ }
+
+ /* submit workload */
+ for_each_set_bit(i, (void *)&valid_desc_bitmap, 2) {
+ ret = submit_context(vgpu, ring_id, &valid_desc[i],
+ emulate_schedule_in);
+ if (ret) {
+ gvt_err("vgpu%d: fail to schedule workload\n",
+ vgpu->id);
+ return ret;
+ }
+ emulate_schedule_in = false;
+ }
+ return 0;
+}
+
+static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
+ struct execlist_context_status_pointer_format ctx_status_ptr;
+ u32 ctx_status_ptr_reg;
+
+ memset(execlist, 0, sizeof(*execlist));
+
+ execlist->vgpu = vgpu;
+ execlist->ring_id = ring_id;
+ execlist->slot[0].index = 0;
+ execlist->slot[1].index = 1;
+
+ ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
+ _EL_OFFSET_STATUS_PTR);
+
+ ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
+ ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
+ vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
+}
+
+void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
+{
+ kmem_cache_destroy(vgpu->workloads);
+}
+
+int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
+{
+ enum intel_engine_id i;
+ struct intel_engine_cs *engine;
+
+ /* each ring has a virtual execlist engine */
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ init_vgpu_execlist(vgpu, i);
+ INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
+ }
+
+ vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
+ sizeof(struct intel_vgpu_workload), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+
+ if (!vgpu->workloads)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
+ unsigned long ring_bitmap)
+{
+ int bit;
+ struct list_head *pos, *n;
+ struct intel_vgpu_workload *workload = NULL;
+
+ for_each_set_bit(bit, &ring_bitmap, sizeof(ring_bitmap) * 8) {
+ if (bit >= I915_NUM_ENGINES)
+ break;
+ /* free the unsubmited workload in the queue */
+ list_for_each_safe(pos, n, &vgpu->workload_q_head[bit]) {
+ workload = container_of(pos,
+ struct intel_vgpu_workload, list);
+ list_del_init(&workload->list);
+ free_workload(workload);
+ }
+
+ init_vgpu_execlist(vgpu, bit);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
new file mode 100644
index 000000000000..635f31c6dcc1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ *
+ */
+
+#ifndef _GVT_EXECLIST_H_
+#define _GVT_EXECLIST_H_
+
+struct execlist_ctx_descriptor_format {
+ union {
+ u32 udw;
+ u32 context_id;
+ };
+ union {
+ u32 ldw;
+ struct {
+ u32 valid : 1;
+ u32 force_pd_restore : 1;
+ u32 force_restore : 1;
+ u32 addressing_mode : 2;
+ u32 llc_coherency : 1;
+ u32 fault_handling : 2;
+ u32 privilege_access : 1;
+ u32 reserved : 3;
+ u32 lrca : 20;
+ };
+ };
+};
+
+struct execlist_status_format {
+ union {
+ u32 ldw;
+ struct {
+ u32 current_execlist_pointer :1;
+ u32 execlist_write_pointer :1;
+ u32 execlist_queue_full :1;
+ u32 execlist_1_valid :1;
+ u32 execlist_0_valid :1;
+ u32 last_ctx_switch_reason :9;
+ u32 current_active_elm_status :2;
+ u32 arbitration_enable :1;
+ u32 execlist_1_active :1;
+ u32 execlist_0_active :1;
+ u32 reserved :13;
+ };
+ };
+ union {
+ u32 udw;
+ u32 context_id;
+ };
+};
+
+struct execlist_context_status_pointer_format {
+ union {
+ u32 dw;
+ struct {
+ u32 write_ptr :3;
+ u32 reserved :5;
+ u32 read_ptr :3;
+ u32 reserved2 :5;
+ u32 mask :16;
+ };
+ };
+};
+
+struct execlist_context_status_format {
+ union {
+ u32 ldw;
+ struct {
+ u32 idle_to_active :1;
+ u32 preempted :1;
+ u32 element_switch :1;
+ u32 active_to_idle :1;
+ u32 context_complete :1;
+ u32 wait_on_sync_flip :1;
+ u32 wait_on_vblank :1;
+ u32 wait_on_semaphore :1;
+ u32 wait_on_scanline :1;
+ u32 reserved :2;
+ u32 semaphore_wait_mode :1;
+ u32 display_plane :3;
+ u32 lite_restore :1;
+ u32 reserved_2 :16;
+ };
+ };
+ union {
+ u32 udw;
+ u32 context_id;
+ };
+};
+
+struct execlist_mmio_pair {
+ u32 addr;
+ u32 val;
+};
+
+/* The first 52 dwords in register state context */
+struct execlist_ring_context {
+ u32 nop1;
+ u32 lri_cmd_1;
+ struct execlist_mmio_pair ctx_ctrl;
+ struct execlist_mmio_pair ring_header;
+ struct execlist_mmio_pair ring_tail;
+ struct execlist_mmio_pair rb_start;
+ struct execlist_mmio_pair rb_ctrl;
+ struct execlist_mmio_pair bb_cur_head_UDW;
+ struct execlist_mmio_pair bb_cur_head_LDW;
+ struct execlist_mmio_pair bb_state;
+ struct execlist_mmio_pair second_bb_addr_UDW;
+ struct execlist_mmio_pair second_bb_addr_LDW;
+ struct execlist_mmio_pair second_bb_state;
+ struct execlist_mmio_pair bb_per_ctx_ptr;
+ struct execlist_mmio_pair rcs_indirect_ctx;
+ struct execlist_mmio_pair rcs_indirect_ctx_offset;
+ u32 nop2;
+ u32 nop3;
+ u32 nop4;
+ u32 lri_cmd_2;
+ struct execlist_mmio_pair ctx_timestamp;
+ struct execlist_mmio_pair pdp3_UDW;
+ struct execlist_mmio_pair pdp3_LDW;
+ struct execlist_mmio_pair pdp2_UDW;
+ struct execlist_mmio_pair pdp2_LDW;
+ struct execlist_mmio_pair pdp1_UDW;
+ struct execlist_mmio_pair pdp1_LDW;
+ struct execlist_mmio_pair pdp0_UDW;
+ struct execlist_mmio_pair pdp0_LDW;
+};
+
+struct intel_vgpu_elsp_dwords {
+ u32 data[4];
+ u32 index;
+};
+
+struct intel_vgpu_execlist_slot {
+ struct execlist_ctx_descriptor_format ctx[2];
+ u32 index;
+};
+
+struct intel_vgpu_execlist {
+ struct intel_vgpu_execlist_slot slot[2];
+ struct intel_vgpu_execlist_slot *running_slot;
+ struct intel_vgpu_execlist_slot *pending_slot;
+ struct execlist_ctx_descriptor_format *running_context;
+ int ring_id;
+ struct intel_vgpu *vgpu;
+ struct intel_vgpu_elsp_dwords elsp_dwords;
+};
+
+void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu);
+
+int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
+
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
+
+void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
+ unsigned long ring_bitmap);
+
+#endif /*_GVT_EXECLIST_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
new file mode 100644
index 000000000000..2fae2a2ca96f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Changbin Du <changbin.du@intel.com>
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/crc32.h>
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+
+#define FIRMWARE_VERSION (0x0)
+
+struct gvt_firmware_header {
+ u64 magic;
+ u32 crc32; /* protect the data after this field */
+ u32 version;
+ u64 cfg_space_size;
+ u64 cfg_space_offset; /* offset in the file */
+ u64 mmio_size;
+ u64 mmio_offset; /* offset in the file */
+ unsigned char data[1];
+};
+
+#define RD(offset) (readl(mmio + offset.reg))
+#define WR(v, offset) (writel(v, mmio + offset.reg))
+
+static void bdw_forcewake_get(void __iomem *mmio)
+{
+ WR(_MASKED_BIT_DISABLE(0xffff), FORCEWAKE_MT);
+
+ RD(ECOBUS);
+
+ if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL) == 0, 50))
+ gvt_err("fail to wait forcewake idle\n");
+
+ WR(_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL), FORCEWAKE_MT);
+
+ if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL), 50))
+ gvt_err("fail to wait forcewake ack\n");
+
+ if (wait_for((RD(GEN6_GT_THREAD_STATUS_REG) &
+ GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 50))
+ gvt_err("fail to wait c0 wake up\n");
+}
+
+#undef RD
+#undef WR
+
+#define dev_to_drm_minor(d) dev_get_drvdata((d))
+
+static ssize_t
+gvt_firmware_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
+{
+ memcpy(buf, attr->private + offset, count);
+ return count;
+}
+
+static struct bin_attribute firmware_attr = {
+ .attr = {.name = "gvt_firmware", .mode = (S_IRUSR)},
+ .read = gvt_firmware_read,
+ .write = NULL,
+ .mmap = NULL,
+};
+
+static int expose_firmware_sysfs(struct intel_gvt *gvt,
+ void __iomem *mmio)
+{
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+ struct intel_gvt_mmio_info *e;
+ struct gvt_firmware_header *h;
+ void *firmware;
+ void *p;
+ unsigned long size;
+ int i;
+ int ret;
+
+ size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
+ firmware = vmalloc(size);
+ if (!firmware)
+ return -ENOMEM;
+
+ h = firmware;
+
+ h->magic = VGT_MAGIC;
+ h->version = FIRMWARE_VERSION;
+ h->cfg_space_size = info->cfg_space_size;
+ h->cfg_space_offset = offsetof(struct gvt_firmware_header, data);
+ h->mmio_size = info->mmio_size;
+ h->mmio_offset = h->cfg_space_offset + h->cfg_space_size;
+
+ p = firmware + h->cfg_space_offset;
+
+ for (i = 0; i < h->cfg_space_size; i += 4)
+ pci_read_config_dword(pdev, i, p + i);
+
+ memcpy(gvt->firmware.cfg_space, p, info->cfg_space_size);
+
+ p = firmware + h->mmio_offset;
+
+ hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
+ int j;
+
+ for (j = 0; j < e->length; j += 4)
+ *(u32 *)(p + e->offset + j) =
+ readl(mmio + e->offset + j);
+ }
+
+ memcpy(gvt->firmware.mmio, p, info->mmio_size);
+
+ firmware_attr.size = size;
+ firmware_attr.private = firmware;
+
+ ret = device_create_bin_file(&pdev->dev, &firmware_attr);
+ if (ret) {
+ vfree(firmware);
+ return ret;
+ }
+ return 0;
+}
+
+static void clean_firmware_sysfs(struct intel_gvt *gvt)
+{
+ struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+
+ device_remove_bin_file(&pdev->dev, &firmware_attr);
+ vfree(firmware_attr.private);
+}
+
+/**
+ * intel_gvt_free_firmware - free GVT firmware
+ * @gvt: intel gvt device
+ *
+ */
+void intel_gvt_free_firmware(struct intel_gvt *gvt)
+{
+ if (!gvt->firmware.firmware_loaded)
+ clean_firmware_sysfs(gvt);
+
+ kfree(gvt->firmware.cfg_space);
+ kfree(gvt->firmware.mmio);
+}
+
+static int verify_firmware(struct intel_gvt *gvt,
+ const struct firmware *fw)
+{
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct gvt_firmware_header *h;
+ unsigned long id, crc32_start;
+ const void *mem;
+ const char *item;
+ u64 file, request;
+
+ h = (struct gvt_firmware_header *)fw->data;
+
+ crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
+ mem = fw->data + crc32_start;
+
+#define VERIFY(s, a, b) do { \
+ item = (s); file = (u64)(a); request = (u64)(b); \
+ if ((a) != (b)) \
+ goto invalid_firmware; \
+} while (0)
+
+ VERIFY("magic number", h->magic, VGT_MAGIC);
+ VERIFY("version", h->version, FIRMWARE_VERSION);
+ VERIFY("crc32", h->crc32, crc32_le(0, mem, fw->size - crc32_start));
+ VERIFY("cfg space size", h->cfg_space_size, info->cfg_space_size);
+ VERIFY("mmio size", h->mmio_size, info->mmio_size);
+
+ mem = (fw->data + h->cfg_space_offset);
+
+ id = *(u16 *)(mem + PCI_VENDOR_ID);
+ VERIFY("vender id", id, pdev->vendor);
+
+ id = *(u16 *)(mem + PCI_DEVICE_ID);
+ VERIFY("device id", id, pdev->device);
+
+ id = *(u8 *)(mem + PCI_REVISION_ID);
+ VERIFY("revision id", id, pdev->revision);
+
+#undef VERIFY
+ return 0;
+
+invalid_firmware:
+ gvt_dbg_core("Invalid firmware: %s [file] 0x%llx [request] 0x%llx\n",
+ item, file, request);
+ return -EINVAL;
+}
+
+#define GVT_FIRMWARE_PATH "i915/gvt"
+
+/**
+ * intel_gvt_load_firmware - load GVT firmware
+ * @gvt: intel gvt device
+ *
+ */
+int intel_gvt_load_firmware(struct intel_gvt *gvt)
+{
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct intel_gvt_firmware *firmware = &gvt->firmware;
+ struct gvt_firmware_header *h;
+ const struct firmware *fw;
+ char *path;
+ void __iomem *mmio;
+ void *mem;
+ int ret;
+
+ path = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!path)
+ return -ENOMEM;
+
+ mem = kmalloc(info->cfg_space_size, GFP_KERNEL);
+ if (!mem) {
+ kfree(path);
+ return -ENOMEM;
+ }
+
+ firmware->cfg_space = mem;
+
+ mem = kmalloc(info->mmio_size, GFP_KERNEL);
+ if (!mem) {
+ kfree(path);
+ kfree(firmware->cfg_space);
+ return -ENOMEM;
+ }
+
+ firmware->mmio = mem;
+
+ mmio = pci_iomap(pdev, info->mmio_bar, info->mmio_size);
+ if (!mmio) {
+ kfree(path);
+ kfree(firmware->cfg_space);
+ kfree(firmware->mmio);
+ return -EINVAL;
+ }
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv))
+ bdw_forcewake_get(mmio);
+
+ sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
+ GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
+ pdev->revision);
+
+ gvt_dbg_core("request hw state firmware %s...\n", path);
+
+ ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev);
+ kfree(path);
+
+ if (ret)
+ goto expose_firmware;
+
+ gvt_dbg_core("success.\n");
+
+ ret = verify_firmware(gvt, fw);
+ if (ret)
+ goto out_free_fw;
+
+ gvt_dbg_core("verified.\n");
+
+ h = (struct gvt_firmware_header *)fw->data;
+
+ memcpy(firmware->cfg_space, fw->data + h->cfg_space_offset,
+ h->cfg_space_size);
+ memcpy(firmware->mmio, fw->data + h->mmio_offset,
+ h->mmio_size);
+
+ release_firmware(fw);
+ firmware->firmware_loaded = true;
+ pci_iounmap(pdev, mmio);
+ return 0;
+
+out_free_fw:
+ release_firmware(fw);
+expose_firmware:
+ expose_firmware_sysfs(gvt, mmio);
+ pci_iounmap(pdev, mmio);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
new file mode 100644
index 000000000000..2cc761328569
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -0,0 +1,2232 @@
+/*
+ * GTT virtualization
+ *
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Xiao Zheng <xiao.zheng@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+#include "trace.h"
+
+static bool enable_out_of_sync = false;
+static int preallocated_oos_pages = 8192;
+
+/*
+ * validate a gm address and related range size,
+ * translate it to host gm address
+ */
+bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
+{
+ if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
+ && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
+ gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n",
+ vgpu->id, addr, size);
+ return false;
+ }
+ return true;
+}
+
+/* translate a guest gmadr to host gmadr */
+int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
+{
+ if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
+ "invalid guest gmadr %llx\n", g_addr))
+ return -EACCES;
+
+ if (vgpu_gmadr_is_aperture(vgpu, g_addr))
+ *h_addr = vgpu_aperture_gmadr_base(vgpu)
+ + (g_addr - vgpu_aperture_offset(vgpu));
+ else
+ *h_addr = vgpu_hidden_gmadr_base(vgpu)
+ + (g_addr - vgpu_hidden_offset(vgpu));
+ return 0;
+}
+
+/* translate a host gmadr to guest gmadr */
+int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
+{
+ if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
+ "invalid host gmadr %llx\n", h_addr))
+ return -EACCES;
+
+ if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
+ *g_addr = vgpu_aperture_gmadr_base(vgpu)
+ + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
+ else
+ *g_addr = vgpu_hidden_gmadr_base(vgpu)
+ + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
+ return 0;
+}
+
+int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
+ unsigned long *h_index)
+{
+ u64 h_addr;
+ int ret;
+
+ ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT,
+ &h_addr);
+ if (ret)
+ return ret;
+
+ *h_index = h_addr >> GTT_PAGE_SHIFT;
+ return 0;
+}
+
+int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
+ unsigned long *g_index)
+{
+ u64 g_addr;
+ int ret;
+
+ ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT,
+ &g_addr);
+ if (ret)
+ return ret;
+
+ *g_index = g_addr >> GTT_PAGE_SHIFT;
+ return 0;
+}
+
+#define gtt_type_is_entry(type) \
+ (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
+ && type != GTT_TYPE_PPGTT_PTE_ENTRY \
+ && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
+
+#define gtt_type_is_pt(type) \
+ (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
+
+#define gtt_type_is_pte_pt(type) \
+ (type == GTT_TYPE_PPGTT_PTE_PT)
+
+#define gtt_type_is_root_pointer(type) \
+ (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
+
+#define gtt_init_entry(e, t, p, v) do { \
+ (e)->type = t; \
+ (e)->pdev = p; \
+ memcpy(&(e)->val64, &v, sizeof(v)); \
+} while (0)
+
+enum {
+ GTT_TYPE_INVALID = -1,
+
+ GTT_TYPE_GGTT_PTE,
+
+ GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_2M_ENTRY,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY,
+
+ GTT_TYPE_PPGTT_PTE_ENTRY,
+
+ GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PML4_ENTRY,
+
+ GTT_TYPE_PPGTT_ROOT_ENTRY,
+
+ GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+ GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+
+ GTT_TYPE_PPGTT_ENTRY,
+
+ GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PDP_PT,
+ GTT_TYPE_PPGTT_PML4_PT,
+
+ GTT_TYPE_MAX,
+};
+
+/*
+ * Mappings between GTT_TYPE* enumerations.
+ * Following information can be found according to the given type:
+ * - type of next level page table
+ * - type of entry inside this level page table
+ * - type of entry with PSE set
+ *
+ * If the given type doesn't have such a kind of information,
+ * e.g. give a l4 root entry type, then request to get its PSE type,
+ * give a PTE page table type, then request to get its next level page
+ * table type, as we know l4 root entry doesn't have a PSE bit,
+ * and a PTE page table doesn't have a next level page table type,
+ * GTT_TYPE_INVALID will be returned. This is useful when traversing a
+ * page table.
+ */
+
+struct gtt_type_table_entry {
+ int entry_type;
+ int next_pt_type;
+ int pse_entry_type;
+};
+
+#define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \
+ [type] = { \
+ .entry_type = e_type, \
+ .next_pt_type = npt_type, \
+ .pse_entry_type = pse_type, \
+ }
+
+static struct gtt_type_table_entry gtt_type_table[] = {
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+ GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+ GTT_TYPE_PPGTT_PML4_PT,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
+ GTT_TYPE_PPGTT_PML4_ENTRY,
+ GTT_TYPE_PPGTT_PDP_PT,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
+ GTT_TYPE_PPGTT_PML4_ENTRY,
+ GTT_TYPE_PPGTT_PDP_PT,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
+ GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+ GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
+ GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
+ GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_INVALID),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
+ GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
+ GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_PPGTT_PTE_1G_ENTRY),
+ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
+ GTT_TYPE_GGTT_PTE,
+ GTT_TYPE_INVALID,
+ GTT_TYPE_INVALID),
+};
+
+static inline int get_next_pt_type(int type)
+{
+ return gtt_type_table[type].next_pt_type;
+}
+
+static inline int get_entry_type(int type)
+{
+ return gtt_type_table[type].entry_type;
+}
+
+static inline int get_pse_type(int type)
+{
+ return gtt_type_table[type].pse_entry_type;
+}
+
+static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
+{
+ void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+ u64 pte;
+
+#ifdef readq
+ pte = readq(addr);
+#else
+ pte = ioread32(addr);
+ pte |= ioread32(addr + 4) << 32;
+#endif
+ return pte;
+}
+
+static void write_pte64(struct drm_i915_private *dev_priv,
+ unsigned long index, u64 pte)
+{
+ void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+
+#ifdef writeq
+ writeq(pte, addr);
+#else
+ iowrite32((u32)pte, addr);
+ iowrite32(pte >> 32, addr + 4);
+#endif
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index, bool hypervisor_access, unsigned long gpa,
+ struct intel_vgpu *vgpu)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ int ret;
+
+ if (WARN_ON(info->gtt_entry_size != 8))
+ return e;
+
+ if (hypervisor_access) {
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
+ (index << info->gtt_entry_size_shift),
+ &e->val64, 8);
+ WARN_ON(ret);
+ } else if (!pt) {
+ e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
+ } else {
+ e->val64 = *((u64 *)pt + index);
+ }
+ return e;
+}
+
+static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index, bool hypervisor_access, unsigned long gpa,
+ struct intel_vgpu *vgpu)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ int ret;
+
+ if (WARN_ON(info->gtt_entry_size != 8))
+ return e;
+
+ if (hypervisor_access) {
+ ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
+ (index << info->gtt_entry_size_shift),
+ &e->val64, 8);
+ WARN_ON(ret);
+ } else if (!pt) {
+ write_pte64(vgpu->gvt->dev_priv, index, e->val64);
+ } else {
+ *((u64 *)pt + index) = e->val64;
+ }
+ return e;
+}
+
+#define GTT_HAW 46
+
+#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
+#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
+#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
+
+static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
+{
+ unsigned long pfn;
+
+ if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
+ pfn = (e->val64 & ADDR_1G_MASK) >> 12;
+ else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
+ pfn = (e->val64 & ADDR_2M_MASK) >> 12;
+ else
+ pfn = (e->val64 & ADDR_4K_MASK) >> 12;
+ return pfn;
+}
+
+static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
+{
+ if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
+ e->val64 &= ~ADDR_1G_MASK;
+ pfn &= (ADDR_1G_MASK >> 12);
+ } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
+ e->val64 &= ~ADDR_2M_MASK;
+ pfn &= (ADDR_2M_MASK >> 12);
+ } else {
+ e->val64 &= ~ADDR_4K_MASK;
+ pfn &= (ADDR_4K_MASK >> 12);
+ }
+
+ e->val64 |= (pfn << 12);
+}
+
+static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
+{
+ /* Entry doesn't have PSE bit. */
+ if (get_pse_type(e->type) == GTT_TYPE_INVALID)
+ return false;
+
+ e->type = get_entry_type(e->type);
+ if (!(e->val64 & (1 << 7)))
+ return false;
+
+ e->type = get_pse_type(e->type);
+ return true;
+}
+
+static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
+{
+ /*
+ * i915 writes PDP root pointer registers without present bit,
+ * it also works, so we need to treat root pointer entry
+ * specifically.
+ */
+ if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
+ || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
+ return (e->val64 != 0);
+ else
+ return (e->val64 & (1 << 0));
+}
+
+static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
+{
+ e->val64 &= ~(1 << 0);
+}
+
+/*
+ * Per-platform GMA routines.
+ */
+static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
+{
+ unsigned long x = (gma >> GTT_PAGE_SHIFT);
+
+ trace_gma_index(__func__, gma, x);
+ return x;
+}
+
+#define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
+static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
+{ \
+ unsigned long x = (exp); \
+ trace_gma_index(__func__, gma, x); \
+ return x; \
+}
+
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
+DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
+
+static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
+ .get_entry = gtt_get_entry64,
+ .set_entry = gtt_set_entry64,
+ .clear_present = gtt_entry_clear_present,
+ .test_present = gen8_gtt_test_present,
+ .test_pse = gen8_gtt_test_pse,
+ .get_pfn = gen8_gtt_get_pfn,
+ .set_pfn = gen8_gtt_set_pfn,
+};
+
+static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
+ .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
+ .gma_to_pte_index = gen8_gma_to_pte_index,
+ .gma_to_pde_index = gen8_gma_to_pde_index,
+ .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
+ .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
+ .gma_to_pml4_index = gen8_gma_to_pml4_index,
+};
+
+static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
+ struct intel_gvt_gtt_entry *m)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ unsigned long gfn, mfn;
+
+ *m = *p;
+
+ if (!ops->test_present(p))
+ return 0;
+
+ gfn = ops->get_pfn(p);
+
+ mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
+ if (mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("fail to translate gfn: 0x%lx\n", gfn);
+ return -ENXIO;
+ }
+
+ ops->set_pfn(m, mfn);
+ return 0;
+}
+
+/*
+ * MM helpers.
+ */
+struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
+ void *page_table, struct intel_gvt_gtt_entry *e,
+ unsigned long index)
+{
+ struct intel_gvt *gvt = mm->vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+ e->type = mm->page_table_entry_type;
+
+ ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
+ ops->test_pse(e);
+ return e;
+}
+
+struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
+ void *page_table, struct intel_gvt_gtt_entry *e,
+ unsigned long index)
+{
+ struct intel_gvt *gvt = mm->vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+ return ops->set_entry(page_table, e, index, false, 0, mm->vgpu);
+}
+
+/*
+ * PPGTT shadow page table helpers.
+ */
+static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
+ struct intel_vgpu_ppgtt_spt *spt,
+ void *page_table, int type,
+ struct intel_gvt_gtt_entry *e, unsigned long index,
+ bool guest)
+{
+ struct intel_gvt *gvt = spt->vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+ e->type = get_entry_type(type);
+
+ if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
+ return e;
+
+ ops->get_entry(page_table, e, index, guest,
+ spt->guest_page.gfn << GTT_PAGE_SHIFT,
+ spt->vgpu);
+ ops->test_pse(e);
+ return e;
+}
+
+static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
+ struct intel_vgpu_ppgtt_spt *spt,
+ void *page_table, int type,
+ struct intel_gvt_gtt_entry *e, unsigned long index,
+ bool guest)
+{
+ struct intel_gvt *gvt = spt->vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+
+ if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
+ return e;
+
+ return ops->set_entry(page_table, e, index, guest,
+ spt->guest_page.gfn << GTT_PAGE_SHIFT,
+ spt->vgpu);
+}
+
+#define ppgtt_get_guest_entry(spt, e, index) \
+ ppgtt_spt_get_entry(spt, NULL, \
+ spt->guest_page_type, e, index, true)
+
+#define ppgtt_set_guest_entry(spt, e, index) \
+ ppgtt_spt_set_entry(spt, NULL, \
+ spt->guest_page_type, e, index, true)
+
+#define ppgtt_get_shadow_entry(spt, e, index) \
+ ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
+ spt->shadow_page.type, e, index, false)
+
+#define ppgtt_set_shadow_entry(spt, e, index) \
+ ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
+ spt->shadow_page.type, e, index, false)
+
+/**
+ * intel_vgpu_init_guest_page - init a guest page data structure
+ * @vgpu: a vGPU
+ * @p: a guest page data structure
+ * @gfn: guest memory page frame number
+ * @handler: function will be called when target guest memory page has
+ * been modified.
+ *
+ * This function is called when user wants to track a guest memory page.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p,
+ unsigned long gfn,
+ int (*handler)(void *, u64, void *, int),
+ void *data)
+{
+ INIT_HLIST_NODE(&p->node);
+
+ p->writeprotection = false;
+ p->gfn = gfn;
+ p->handler = handler;
+ p->data = data;
+ p->oos_page = NULL;
+ p->write_cnt = 0;
+
+ hash_add(vgpu->gtt.guest_page_hash_table, &p->node, p->gfn);
+ return 0;
+}
+
+static int detach_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page);
+
+/**
+ * intel_vgpu_clean_guest_page - release the resource owned by guest page data
+ * structure
+ * @vgpu: a vGPU
+ * @p: a tracked guest page
+ *
+ * This function is called when user tries to stop tracking a guest memory
+ * page.
+ */
+void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p)
+{
+ if (!hlist_unhashed(&p->node))
+ hash_del(&p->node);
+
+ if (p->oos_page)
+ detach_oos_page(vgpu, p->oos_page);
+
+ if (p->writeprotection)
+ intel_gvt_hypervisor_unset_wp_page(vgpu, p);
+}
+
+/**
+ * intel_vgpu_find_guest_page - find a guest page data structure by GFN.
+ * @vgpu: a vGPU
+ * @gfn: guest memory page frame number
+ *
+ * This function is called when emulation logic wants to know if a trapped GFN
+ * is a tracked guest page.
+ *
+ * Returns:
+ * Pointer to guest page data structure, NULL if failed.
+ */
+struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
+ struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ struct intel_vgpu_guest_page *p;
+
+ hash_for_each_possible(vgpu->gtt.guest_page_hash_table,
+ p, node, gfn) {
+ if (p->gfn == gfn)
+ return p;
+ }
+ return NULL;
+}
+
+static inline int init_shadow_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_shadow_page *p, int type)
+{
+ p->vaddr = page_address(p->page);
+ p->type = type;
+
+ INIT_HLIST_NODE(&p->node);
+
+ p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr);
+ if (p->mfn == INTEL_GVT_INVALID_ADDR)
+ return -EFAULT;
+
+ hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
+ return 0;
+}
+
+static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p)
+{
+ if (!hlist_unhashed(&p->node))
+ hash_del(&p->node);
+}
+
+static inline struct intel_vgpu_shadow_page *find_shadow_page(
+ struct intel_vgpu *vgpu, unsigned long mfn)
+{
+ struct intel_vgpu_shadow_page *p;
+
+ hash_for_each_possible(vgpu->gtt.shadow_page_hash_table,
+ p, node, mfn) {
+ if (p->mfn == mfn)
+ return p;
+ }
+ return NULL;
+}
+
+#define guest_page_to_ppgtt_spt(ptr) \
+ container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
+
+#define shadow_page_to_ppgtt_spt(ptr) \
+ container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
+
+static void *alloc_spt(gfp_t gfp_mask)
+{
+ struct intel_vgpu_ppgtt_spt *spt;
+
+ spt = kzalloc(sizeof(*spt), gfp_mask);
+ if (!spt)
+ return NULL;
+
+ spt->shadow_page.page = alloc_page(gfp_mask);
+ if (!spt->shadow_page.page) {
+ kfree(spt);
+ return NULL;
+ }
+ return spt;
+}
+
+static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
+{
+ __free_page(spt->shadow_page.page);
+ kfree(spt);
+}
+
+static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+ trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
+
+ clean_shadow_page(&spt->shadow_page);
+ intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
+ list_del_init(&spt->post_shadow_list);
+
+ free_spt(spt);
+}
+
+static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
+{
+ struct hlist_node *n;
+ struct intel_vgpu_shadow_page *sp;
+ int i;
+
+ hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node)
+ ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
+}
+
+static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
+ u64 pa, void *p_data, int bytes);
+
+static int ppgtt_write_protection_handler(void *gp, u64 pa,
+ void *p_data, int bytes)
+{
+ struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
+ int ret;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ if (!gpt->writeprotection)
+ return -EINVAL;
+
+ ret = ppgtt_handle_guest_write_page_table_bytes(gp,
+ pa, p_data, bytes);
+ if (ret)
+ return ret;
+ return ret;
+}
+
+static int reclaim_one_mm(struct intel_gvt *gvt);
+
+static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
+ struct intel_vgpu *vgpu, int type, unsigned long gfn)
+{
+ struct intel_vgpu_ppgtt_spt *spt = NULL;
+ int ret;
+
+retry:
+ spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
+ if (!spt) {
+ if (reclaim_one_mm(vgpu->gvt))
+ goto retry;
+
+ gvt_err("fail to allocate ppgtt shadow page\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spt->vgpu = vgpu;
+ spt->guest_page_type = type;
+ atomic_set(&spt->refcount, 1);
+ INIT_LIST_HEAD(&spt->post_shadow_list);
+
+ /*
+ * TODO: guest page type may be different with shadow page type,
+ * when we support PSE page in future.
+ */
+ ret = init_shadow_page(vgpu, &spt->shadow_page, type);
+ if (ret) {
+ gvt_err("fail to initialize shadow page for spt\n");
+ goto err;
+ }
+
+ ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
+ gfn, ppgtt_write_protection_handler, NULL);
+ if (ret) {
+ gvt_err("fail to initialize guest page for spt\n");
+ goto err;
+ }
+
+ trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
+ return spt;
+err:
+ ppgtt_free_shadow_page(spt);
+ return ERR_PTR(ret);
+}
+
+static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
+ struct intel_vgpu *vgpu, unsigned long mfn)
+{
+ struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn);
+
+ if (p)
+ return shadow_page_to_ppgtt_spt(p);
+
+ gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n",
+ vgpu->id, mfn);
+ return NULL;
+}
+
+#define pt_entry_size_shift(spt) \
+ ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
+
+#define pt_entries(spt) \
+ (GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
+
+#define for_each_present_guest_entry(spt, e, i) \
+ for (i = 0; i < pt_entries(spt); i++) \
+ if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
+ ppgtt_get_guest_entry(spt, e, i)))
+
+#define for_each_present_shadow_entry(spt, e, i) \
+ for (i = 0; i < pt_entries(spt); i++) \
+ if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
+ ppgtt_get_shadow_entry(spt, e, i)))
+
+static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+ int v = atomic_read(&spt->refcount);
+
+ trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
+
+ atomic_inc(&spt->refcount);
+}
+
+static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
+
+static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
+ struct intel_gvt_gtt_entry *e)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *s;
+
+ if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
+ return -EINVAL;
+
+ if (ops->get_pfn(e) == vgpu->gtt.scratch_page_mfn)
+ return 0;
+
+ s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
+ if (!s) {
+ gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
+ vgpu->id, ops->get_pfn(e));
+ return -ENXIO;
+ }
+ return ppgtt_invalidate_shadow_page(s);
+}
+
+static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+ struct intel_gvt_gtt_entry e;
+ unsigned long index;
+ int ret;
+ int v = atomic_read(&spt->refcount);
+
+ trace_spt_change(spt->vgpu->id, "die", spt,
+ spt->guest_page.gfn, spt->shadow_page.type);
+
+ trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
+
+ if (atomic_dec_return(&spt->refcount) > 0)
+ return 0;
+
+ if (gtt_type_is_pte_pt(spt->shadow_page.type))
+ goto release;
+
+ for_each_present_shadow_entry(spt, &e, index) {
+ if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
+ gvt_err("GVT doesn't support pse bit for now\n");
+ return -EINVAL;
+ }
+ ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
+ spt->vgpu, &e);
+ if (ret)
+ goto fail;
+ }
+release:
+ trace_spt_change(spt->vgpu->id, "release", spt,
+ spt->guest_page.gfn, spt->shadow_page.type);
+ ppgtt_free_shadow_page(spt);
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n",
+ spt->vgpu->id, spt, e.val64, e.type);
+ return ret;
+}
+
+static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
+
+static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
+ struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
+{
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *s = NULL;
+ struct intel_vgpu_guest_page *g;
+ int ret;
+
+ if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ g = intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
+ if (g) {
+ s = guest_page_to_ppgtt_spt(g);
+ ppgtt_get_shadow_page(s);
+ } else {
+ int type = get_next_pt_type(we->type);
+
+ s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we));
+ if (IS_ERR(s)) {
+ ret = PTR_ERR(s);
+ goto fail;
+ }
+
+ ret = intel_gvt_hypervisor_set_wp_page(vgpu, &s->guest_page);
+ if (ret)
+ goto fail;
+
+ ret = ppgtt_populate_shadow_page(s);
+ if (ret)
+ goto fail;
+
+ trace_spt_change(vgpu->id, "new", s, s->guest_page.gfn,
+ s->shadow_page.type);
+ }
+ return s;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
+ vgpu->id, s, we->val64, we->type);
+ return ERR_PTR(ret);
+}
+
+static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
+ struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
+{
+ struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
+
+ se->type = ge->type;
+ se->val64 = ge->val64;
+
+ ops->set_pfn(se, s->shadow_page.mfn);
+}
+
+static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+{
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_vgpu_ppgtt_spt *s;
+ struct intel_gvt_gtt_entry se, ge;
+ unsigned long i;
+ int ret;
+
+ trace_spt_change(spt->vgpu->id, "born", spt,
+ spt->guest_page.gfn, spt->shadow_page.type);
+
+ if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
+ for_each_present_guest_entry(spt, &ge, i) {
+ ret = gtt_entry_p2m(vgpu, &ge, &se);
+ if (ret)
+ goto fail;
+ ppgtt_set_shadow_entry(spt, &se, i);
+ }
+ return 0;
+ }
+
+ for_each_present_guest_entry(spt, &ge, i) {
+ if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
+ gvt_err("GVT doesn't support pse bit now\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
+ if (IS_ERR(s)) {
+ ret = PTR_ERR(s);
+ goto fail;
+ }
+ ppgtt_get_shadow_entry(spt, &se, i);
+ ppgtt_generate_shadow_entry(&se, s, &ge);
+ ppgtt_set_shadow_entry(spt, &se, i);
+ }
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
+ vgpu->id, spt, ge.val64, ge.type);
+ return ret;
+}
+
+static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
+ struct intel_gvt_gtt_entry *we, unsigned long index)
+{
+ struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+ struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_entry e;
+ int ret;
+
+ trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type,
+ we->val64, index);
+
+ ppgtt_get_shadow_entry(spt, &e, index);
+ if (!ops->test_present(&e))
+ return 0;
+
+ if (ops->get_pfn(&e) == vgpu->gtt.scratch_page_mfn)
+ return 0;
+
+ if (gtt_type_is_pt(get_next_pt_type(we->type))) {
+ struct intel_vgpu_guest_page *g =
+ intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
+ if (!g) {
+ gvt_err("fail to find guest page\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+ ret = ppgtt_invalidate_shadow_page(guest_page_to_ppgtt_spt(g));
+ if (ret)
+ goto fail;
+ }
+ ops->set_pfn(&e, vgpu->gtt.scratch_page_mfn);
+ ppgtt_set_shadow_entry(spt, &e, index);
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
+ vgpu->id, spt, we->val64, we->type);
+ return ret;
+}
+
+static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
+ struct intel_gvt_gtt_entry *we, unsigned long index)
+{
+ struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+ struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt_gtt_entry m;
+ struct intel_vgpu_ppgtt_spt *s;
+ int ret;
+
+ trace_gpt_change(spt->vgpu->id, "add", spt, sp->type,
+ we->val64, index);
+
+ if (gtt_type_is_pt(get_next_pt_type(we->type))) {
+ s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we);
+ if (IS_ERR(s)) {
+ ret = PTR_ERR(s);
+ goto fail;
+ }
+ ppgtt_get_shadow_entry(spt, &m, index);
+ ppgtt_generate_shadow_entry(&m, s, we);
+ ppgtt_set_shadow_entry(spt, &m, index);
+ } else {
+ ret = gtt_entry_p2m(vgpu, we, &m);
+ if (ret)
+ goto fail;
+ ppgtt_set_shadow_entry(spt, &m, index);
+ }
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id,
+ spt, we->val64, we->type);
+ return ret;
+}
+
+static int sync_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *spt =
+ guest_page_to_ppgtt_spt(oos_page->guest_page);
+ struct intel_gvt_gtt_entry old, new, m;
+ int index;
+ int ret;
+
+ trace_oos_change(vgpu->id, "sync", oos_page->id,
+ oos_page->guest_page, spt->guest_page_type);
+
+ old.type = new.type = get_entry_type(spt->guest_page_type);
+ old.val64 = new.val64 = 0;
+
+ for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift);
+ index++) {
+ ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
+ ops->get_entry(NULL, &new, index, true,
+ oos_page->guest_page->gfn << PAGE_SHIFT, vgpu);
+
+ if (old.val64 == new.val64
+ && !test_and_clear_bit(index, spt->post_shadow_bitmap))
+ continue;
+
+ trace_oos_sync(vgpu->id, oos_page->id,
+ oos_page->guest_page, spt->guest_page_type,
+ new.val64, index);
+
+ ret = gtt_entry_p2m(vgpu, &new, &m);
+ if (ret)
+ return ret;
+
+ ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
+ ppgtt_set_shadow_entry(spt, &m, index);
+ }
+
+ oos_page->guest_page->write_cnt = 0;
+ list_del_init(&spt->post_shadow_list);
+ return 0;
+}
+
+static int detach_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_vgpu_ppgtt_spt *spt =
+ guest_page_to_ppgtt_spt(oos_page->guest_page);
+
+ trace_oos_change(vgpu->id, "detach", oos_page->id,
+ oos_page->guest_page, spt->guest_page_type);
+
+ oos_page->guest_page->write_cnt = 0;
+ oos_page->guest_page->oos_page = NULL;
+ oos_page->guest_page = NULL;
+
+ list_del_init(&oos_page->vm_list);
+ list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
+
+ return 0;
+}
+
+static int attach_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page,
+ struct intel_vgpu_guest_page *gpt)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ int ret;
+
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, gpt->gfn << GTT_PAGE_SHIFT,
+ oos_page->mem, GTT_PAGE_SIZE);
+ if (ret)
+ return ret;
+
+ oos_page->guest_page = gpt;
+ gpt->oos_page = oos_page;
+
+ list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
+
+ trace_oos_change(vgpu->id, "attach", gpt->oos_page->id,
+ gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+ return 0;
+}
+
+static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *gpt)
+{
+ int ret;
+
+ ret = intel_gvt_hypervisor_set_wp_page(vgpu, gpt);
+ if (ret)
+ return ret;
+
+ trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id,
+ gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+
+ list_del_init(&gpt->oos_page->vm_list);
+ return sync_oos_page(vgpu, gpt->oos_page);
+}
+
+static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *gpt)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
+ int ret;
+
+ WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
+
+ if (list_empty(&gtt->oos_page_free_list_head)) {
+ oos_page = container_of(gtt->oos_page_use_list_head.next,
+ struct intel_vgpu_oos_page, list);
+ ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
+ if (ret)
+ return ret;
+ ret = detach_oos_page(vgpu, oos_page);
+ if (ret)
+ return ret;
+ } else
+ oos_page = container_of(gtt->oos_page_free_list_head.next,
+ struct intel_vgpu_oos_page, list);
+ return attach_oos_page(vgpu, oos_page, gpt);
+}
+
+static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *gpt)
+{
+ struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
+
+ if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
+ return -EINVAL;
+
+ trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id,
+ gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+
+ list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
+ return intel_gvt_hypervisor_unset_wp_page(vgpu, gpt);
+}
+
+/**
+ * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is called before submitting a guest workload to host,
+ * to sync all the out-of-synced shadow for vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_oos_page *oos_page;
+ int ret;
+
+ if (!enable_out_of_sync)
+ return 0;
+
+ list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
+ oos_page = container_of(pos,
+ struct intel_vgpu_oos_page, vm_list);
+ ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * The heart of PPGTT shadow page table.
+ */
+static int ppgtt_handle_guest_write_page_table(
+ struct intel_vgpu_guest_page *gpt,
+ struct intel_gvt_gtt_entry *we, unsigned long index)
+{
+ struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_entry ge;
+
+ int old_present, new_present;
+ int ret;
+
+ ppgtt_get_guest_entry(spt, &ge, index);
+
+ old_present = ops->test_present(&ge);
+ new_present = ops->test_present(we);
+
+ ppgtt_set_guest_entry(spt, we, index);
+
+ if (old_present) {
+ ret = ppgtt_handle_guest_entry_removal(gpt, &ge, index);
+ if (ret)
+ goto fail;
+ }
+ if (new_present) {
+ ret = ppgtt_handle_guest_entry_add(gpt, we, index);
+ if (ret)
+ goto fail;
+ }
+ return 0;
+fail:
+ gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n",
+ vgpu->id, spt, we->val64, we->type);
+ return ret;
+}
+
+static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt)
+{
+ return enable_out_of_sync
+ && gtt_type_is_pte_pt(
+ guest_page_to_ppgtt_spt(gpt)->guest_page_type)
+ && gpt->write_cnt >= 2;
+}
+
+static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
+ unsigned long index)
+{
+ set_bit(index, spt->post_shadow_bitmap);
+ if (!list_empty(&spt->post_shadow_list))
+ return;
+
+ list_add_tail(&spt->post_shadow_list,
+ &spt->vgpu->gtt.post_shadow_list_head);
+}
+
+/**
+ * intel_vgpu_flush_post_shadow - flush the post shadow transactions
+ * @vgpu: a vGPU
+ *
+ * This function is called before submitting a guest workload to host,
+ * to flush all the post shadows for a vGPU.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_ppgtt_spt *spt;
+ struct intel_gvt_gtt_entry ge, e;
+ unsigned long index;
+ int ret;
+
+ list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
+ spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
+ post_shadow_list);
+
+ for_each_set_bit(index, spt->post_shadow_bitmap,
+ GTT_ENTRY_NUM_IN_ONE_PAGE) {
+ ppgtt_get_guest_entry(spt, &ge, index);
+ e = ge;
+ e.val64 = 0;
+ ppgtt_set_guest_entry(spt, &e, index);
+
+ ret = ppgtt_handle_guest_write_page_table(
+ &spt->guest_page, &ge, index);
+ if (ret)
+ return ret;
+ clear_bit(index, spt->post_shadow_bitmap);
+ }
+ list_del_init(&spt->post_shadow_list);
+ }
+ return 0;
+}
+
+static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
+ u64 pa, void *p_data, int bytes)
+{
+ struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
+ struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ struct intel_gvt_gtt_entry we;
+ unsigned long index;
+ int ret;
+
+ index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
+
+ ppgtt_get_guest_entry(spt, &we, index);
+ memcpy((void *)&we.val64 + (pa & (info->gtt_entry_size - 1)),
+ p_data, bytes);
+
+ ops->test_pse(&we);
+
+ if (bytes == info->gtt_entry_size) {
+ ret = ppgtt_handle_guest_write_page_table(gpt, &we, index);
+ if (ret)
+ return ret;
+ } else {
+ struct intel_gvt_gtt_entry ge;
+
+ ppgtt_get_guest_entry(spt, &ge, index);
+
+ if (!test_bit(index, spt->post_shadow_bitmap)) {
+ ret = ppgtt_handle_guest_entry_removal(gpt,
+ &ge, index);
+ if (ret)
+ return ret;
+ }
+
+ ppgtt_set_post_shadow(spt, index);
+ ppgtt_set_guest_entry(spt, &we, index);
+ }
+
+ if (!enable_out_of_sync)
+ return 0;
+
+ gpt->write_cnt++;
+
+ if (gpt->oos_page)
+ ops->set_entry(gpt->oos_page->mem, &we, index,
+ false, 0, vgpu);
+
+ if (can_do_out_of_sync(gpt)) {
+ if (!gpt->oos_page)
+ ppgtt_allocate_oos_page(vgpu, gpt);
+
+ ret = ppgtt_set_guest_page_oos(vgpu, gpt);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * mm page table allocation policy for bdw+
+ * - for ggtt, only virtual page table will be allocated.
+ * - for ppgtt, dedicated virtual/shadow page table will be allocated.
+ */
+static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+ void *mem;
+
+ if (mm->type == INTEL_GVT_MM_PPGTT) {
+ mm->page_table_entry_cnt = 4;
+ mm->page_table_entry_size = mm->page_table_entry_cnt *
+ info->gtt_entry_size;
+ mem = kzalloc(mm->has_shadow_page_table ?
+ mm->page_table_entry_size * 2
+ : mm->page_table_entry_size,
+ GFP_ATOMIC);
+ if (!mem)
+ return -ENOMEM;
+ mm->virtual_page_table = mem;
+ if (!mm->has_shadow_page_table)
+ return 0;
+ mm->shadow_page_table = mem + mm->page_table_entry_size;
+ } else if (mm->type == INTEL_GVT_MM_GGTT) {
+ mm->page_table_entry_cnt =
+ (gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT);
+ mm->page_table_entry_size = mm->page_table_entry_cnt *
+ info->gtt_entry_size;
+ mem = vzalloc(mm->page_table_entry_size);
+ if (!mem)
+ return -ENOMEM;
+ mm->virtual_page_table = mem;
+ }
+ return 0;
+}
+
+static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm)
+{
+ if (mm->type == INTEL_GVT_MM_PPGTT) {
+ kfree(mm->virtual_page_table);
+ } else if (mm->type == INTEL_GVT_MM_GGTT) {
+ if (mm->virtual_page_table)
+ vfree(mm->virtual_page_table);
+ }
+ mm->virtual_page_table = mm->shadow_page_table = NULL;
+}
+
+static void invalidate_mm(struct intel_vgpu_mm *mm)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
+ struct intel_gvt_gtt_entry se;
+ int i;
+
+ if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed))
+ return;
+
+ for (i = 0; i < mm->page_table_entry_cnt; i++) {
+ ppgtt_get_shadow_root_entry(mm, &se, i);
+ if (!ops->test_present(&se))
+ continue;
+ ppgtt_invalidate_shadow_page_by_shadow_entry(
+ vgpu, &se);
+ se.val64 = 0;
+ ppgtt_set_shadow_root_entry(mm, &se, i);
+
+ trace_gpt_change(vgpu->id, "destroy root pointer",
+ NULL, se.type, se.val64, i);
+ }
+ mm->shadowed = false;
+}
+
+/**
+ * intel_vgpu_destroy_mm - destroy a mm object
+ * @mm: a kref object
+ *
+ * This function is used to destroy a mm object for vGPU
+ *
+ */
+void intel_vgpu_destroy_mm(struct kref *mm_ref)
+{
+ struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+
+ if (!mm->initialized)
+ goto out;
+
+ list_del(&mm->list);
+ list_del(&mm->lru_list);
+
+ if (mm->has_shadow_page_table)
+ invalidate_mm(mm);
+
+ gtt->mm_free_page_table(mm);
+out:
+ kfree(mm);
+}
+
+static int shadow_mm(struct intel_vgpu_mm *mm)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
+ struct intel_vgpu_ppgtt_spt *spt;
+ struct intel_gvt_gtt_entry ge, se;
+ int i;
+ int ret;
+
+ if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed))
+ return 0;
+
+ mm->shadowed = true;
+
+ for (i = 0; i < mm->page_table_entry_cnt; i++) {
+ ppgtt_get_guest_root_entry(mm, &ge, i);
+ if (!ops->test_present(&ge))
+ continue;
+
+ trace_gpt_change(vgpu->id, __func__, NULL,
+ ge.type, ge.val64, i);
+
+ spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
+ if (IS_ERR(spt)) {
+ gvt_err("fail to populate guest root pointer\n");
+ ret = PTR_ERR(spt);
+ goto fail;
+ }
+ ppgtt_generate_shadow_entry(&se, spt, &ge);
+ ppgtt_set_shadow_root_entry(mm, &se, i);
+
+ trace_gpt_change(vgpu->id, "populate root pointer",
+ NULL, se.type, se.val64, i);
+ }
+ return 0;
+fail:
+ invalidate_mm(mm);
+ return ret;
+}
+
+/**
+ * intel_vgpu_create_mm - create a mm object for a vGPU
+ * @vgpu: a vGPU
+ * @mm_type: mm object type, should be PPGTT or GGTT
+ * @virtual_page_table: page table root pointers. Could be NULL if user wants
+ * to populate shadow later.
+ * @page_table_level: describe the page table level of the mm object
+ * @pde_base_index: pde root pointer base in GGTT MMIO.
+ *
+ * This function is used to create a mm object for a vGPU.
+ *
+ * Returns:
+ * Zero on success, negative error code in pointer if failed.
+ */
+struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
+ int mm_type, void *virtual_page_table, int page_table_level,
+ u32 pde_base_index)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_vgpu_mm *mm;
+ int ret;
+
+ mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
+ if (!mm) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mm->type = mm_type;
+
+ if (page_table_level == 1)
+ mm->page_table_entry_type = GTT_TYPE_GGTT_PTE;
+ else if (page_table_level == 3)
+ mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
+ else if (page_table_level == 4)
+ mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
+ else {
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ mm->page_table_level = page_table_level;
+ mm->pde_base_index = pde_base_index;
+
+ mm->vgpu = vgpu;
+ mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT);
+
+ kref_init(&mm->ref);
+ atomic_set(&mm->pincount, 0);
+ INIT_LIST_HEAD(&mm->list);
+ INIT_LIST_HEAD(&mm->lru_list);
+ list_add_tail(&mm->list, &vgpu->gtt.mm_list_head);
+
+ ret = gtt->mm_alloc_page_table(mm);
+ if (ret) {
+ gvt_err("fail to allocate page table for mm\n");
+ goto fail;
+ }
+
+ mm->initialized = true;
+
+ if (virtual_page_table)
+ memcpy(mm->virtual_page_table, virtual_page_table,
+ mm->page_table_entry_size);
+
+ if (mm->has_shadow_page_table) {
+ ret = shadow_mm(mm);
+ if (ret)
+ goto fail;
+ list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
+ }
+ return mm;
+fail:
+ gvt_err("fail to create mm\n");
+ if (mm)
+ intel_gvt_mm_unreference(mm);
+ return ERR_PTR(ret);
+}
+
+/**
+ * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
+ * @mm: a vGPU mm object
+ *
+ * This function is called when user doesn't want to use a vGPU mm object
+ */
+void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
+{
+ if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
+ return;
+
+ atomic_dec(&mm->pincount);
+}
+
+/**
+ * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
+ * @vgpu: a vGPU
+ *
+ * This function is called when user wants to use a vGPU mm object. If this
+ * mm object hasn't been shadowed yet, the shadow will be populated at this
+ * time.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
+{
+ int ret;
+
+ if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
+ return 0;
+
+ atomic_inc(&mm->pincount);
+
+ if (!mm->shadowed) {
+ ret = shadow_mm(mm);
+ if (ret)
+ return ret;
+ }
+
+ list_del_init(&mm->lru_list);
+ list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
+ return 0;
+}
+
+static int reclaim_one_mm(struct intel_gvt *gvt)
+{
+ struct intel_vgpu_mm *mm;
+ struct list_head *pos, *n;
+
+ list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, lru_list);
+
+ if (mm->type != INTEL_GVT_MM_PPGTT)
+ continue;
+ if (atomic_read(&mm->pincount))
+ continue;
+
+ list_del_init(&mm->lru_list);
+ invalidate_mm(mm);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * GMA translation APIs.
+ */
+static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_vgpu_ppgtt_spt *s;
+
+ if (WARN_ON(!mm->has_shadow_page_table))
+ return -EINVAL;
+
+ s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
+ if (!s)
+ return -ENXIO;
+
+ if (!guest)
+ ppgtt_get_shadow_entry(s, e, index);
+ else
+ ppgtt_get_guest_entry(s, e, index);
+ return 0;
+}
+
+/**
+ * intel_vgpu_gma_to_gpa - translate a gma to GPA
+ * @mm: mm object. could be a PPGTT or GGTT mm object
+ * @gma: graphics memory address in this mm object
+ *
+ * This function is used to translate a graphics memory address in specific
+ * graphics memory space to guest physical address.
+ *
+ * Returns:
+ * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
+ */
+unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
+{
+ struct intel_vgpu *vgpu = mm->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
+ unsigned long gpa = INTEL_GVT_INVALID_ADDR;
+ unsigned long gma_index[4];
+ struct intel_gvt_gtt_entry e;
+ int i, index;
+ int ret;
+
+ if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT)
+ return INTEL_GVT_INVALID_ADDR;
+
+ if (mm->type == INTEL_GVT_MM_GGTT) {
+ if (!vgpu_gmadr_is_valid(vgpu, gma))
+ goto err;
+
+ ggtt_get_guest_entry(mm, &e,
+ gma_ops->gma_to_ggtt_pte_index(gma));
+ gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
+ + (gma & ~GTT_PAGE_MASK);
+
+ trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
+ return gpa;
+ }
+
+ switch (mm->page_table_level) {
+ case 4:
+ ppgtt_get_shadow_root_entry(mm, &e, 0);
+ gma_index[0] = gma_ops->gma_to_pml4_index(gma);
+ gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
+ gma_index[2] = gma_ops->gma_to_pde_index(gma);
+ gma_index[3] = gma_ops->gma_to_pte_index(gma);
+ index = 4;
+ break;
+ case 3:
+ ppgtt_get_shadow_root_entry(mm, &e,
+ gma_ops->gma_to_l3_pdp_index(gma));
+ gma_index[0] = gma_ops->gma_to_pde_index(gma);
+ gma_index[1] = gma_ops->gma_to_pte_index(gma);
+ index = 2;
+ break;
+ case 2:
+ ppgtt_get_shadow_root_entry(mm, &e,
+ gma_ops->gma_to_pde_index(gma));
+ gma_index[0] = gma_ops->gma_to_pte_index(gma);
+ index = 1;
+ break;
+ default:
+ WARN_ON(1);
+ goto err;
+ }
+
+ /* walk into the shadow page table and get gpa from guest entry */
+ for (i = 0; i < index; i++) {
+ ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
+ (i == index - 1));
+ if (ret)
+ goto err;
+ }
+
+ gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
+ + (gma & ~GTT_PAGE_MASK);
+
+ trace_gma_translate(vgpu->id, "ppgtt", 0,
+ mm->page_table_level, gma, gpa);
+ return gpa;
+err:
+ gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma);
+ return INTEL_GVT_INVALID_ADDR;
+}
+
+static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
+ unsigned int off, void *p_data, unsigned int bytes)
+{
+ struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ unsigned long index = off >> info->gtt_entry_size_shift;
+ struct intel_gvt_gtt_entry e;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ ggtt_get_guest_entry(ggtt_mm, &e, index);
+ memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
+ bytes);
+ return 0;
+}
+
+/**
+ * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
+ * @vgpu: a vGPU
+ * @off: register offset
+ * @p_data: data will be returned to guest
+ * @bytes: data length
+ *
+ * This function is used to emulate the GTT MMIO register read
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ int ret;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ off -= info->gtt_start_offset;
+ ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes);
+ return ret;
+}
+
+static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+ struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+ unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
+ unsigned long gma;
+ struct intel_gvt_gtt_entry e, m;
+ int ret;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ gma = g_gtt_index << GTT_PAGE_SHIFT;
+
+ /* the VM may configure the whole GM space when ballooning is used */
+ if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma),
+ "vgpu%d: found oob ggtt write, offset %x\n",
+ vgpu->id, off)) {
+ return 0;
+ }
+
+ ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
+
+ memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
+ bytes);
+
+ if (ops->test_present(&e)) {
+ ret = gtt_entry_p2m(vgpu, &e, &m);
+ if (ret) {
+ gvt_err("vgpu%d: fail to translate guest gtt entry\n",
+ vgpu->id);
+ return ret;
+ }
+ } else {
+ m = e;
+ m.val64 = 0;
+ }
+
+ ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
+ ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
+ return 0;
+}
+
+/*
+ * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write
+ * @vgpu: a vGPU
+ * @off: register offset
+ * @p_data: data from guest write
+ * @bytes: data length
+ *
+ * This function is used to emulate the GTT MMIO register write
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+ int ret;
+
+ if (bytes != 4 && bytes != 8)
+ return -EINVAL;
+
+ off -= info->gtt_start_offset;
+ ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes);
+ return ret;
+}
+
+static int create_scratch_page(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_gtt *gtt = &vgpu->gtt;
+ void *p;
+ void *vaddr;
+ unsigned long mfn;
+
+ gtt->scratch_page = alloc_page(GFP_KERNEL);
+ if (!gtt->scratch_page) {
+ gvt_err("Failed to allocate scratch page.\n");
+ return -ENOMEM;
+ }
+
+ /* set to zero */
+ p = kmap_atomic(gtt->scratch_page);
+ memset(p, 0, PAGE_SIZE);
+ kunmap_atomic(p);
+
+ /* translate page to mfn */
+ vaddr = page_address(gtt->scratch_page);
+ mfn = intel_gvt_hypervisor_virt_to_mfn(vaddr);
+
+ if (mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("fail to translate vaddr:0x%llx\n", (u64)vaddr);
+ __free_page(gtt->scratch_page);
+ gtt->scratch_page = NULL;
+ return -ENXIO;
+ }
+
+ gtt->scratch_page_mfn = mfn;
+ gvt_dbg_core("vgpu%d create scratch page: mfn=0x%lx\n", vgpu->id, mfn);
+ return 0;
+}
+
+static void release_scratch_page(struct intel_vgpu *vgpu)
+{
+ if (vgpu->gtt.scratch_page != NULL) {
+ __free_page(vgpu->gtt.scratch_page);
+ vgpu->gtt.scratch_page = NULL;
+ vgpu->gtt.scratch_page_mfn = 0;
+ }
+}
+
+/**
+ * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
+ * @vgpu: a vGPU
+ *
+ * This function is used to initialize per-vGPU graphics memory virtualization
+ * components.
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_gtt *gtt = &vgpu->gtt;
+ struct intel_vgpu_mm *ggtt_mm;
+
+ hash_init(gtt->guest_page_hash_table);
+ hash_init(gtt->shadow_page_hash_table);
+
+ INIT_LIST_HEAD(&gtt->mm_list_head);
+ INIT_LIST_HEAD(&gtt->oos_page_list_head);
+ INIT_LIST_HEAD(&gtt->post_shadow_list_head);
+
+ ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
+ NULL, 1, 0);
+ if (IS_ERR(ggtt_mm)) {
+ gvt_err("fail to create mm for ggtt.\n");
+ return PTR_ERR(ggtt_mm);
+ }
+
+ gtt->ggtt_mm = ggtt_mm;
+
+ return create_scratch_page(vgpu);
+}
+
+/**
+ * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
+ * @vgpu: a vGPU
+ *
+ * This function is used to clean up per-vGPU graphics memory virtualization
+ * components.
+ *
+ * Returns:
+ * Zero on success, error code if failed.
+ */
+void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_mm *mm;
+
+ ppgtt_free_all_shadow_page(vgpu);
+ release_scratch_page(vgpu);
+
+ list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, list);
+ vgpu->gvt->gtt.mm_free_page_table(mm);
+ list_del(&mm->list);
+ list_del(&mm->lru_list);
+ kfree(mm);
+ }
+}
+
+static void clean_spt_oos(struct intel_gvt *gvt)
+{
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct list_head *pos, *n;
+ struct intel_vgpu_oos_page *oos_page;
+
+ WARN(!list_empty(&gtt->oos_page_use_list_head),
+ "someone is still using oos page\n");
+
+ list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
+ oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
+ list_del(&oos_page->list);
+ kfree(oos_page);
+ }
+}
+
+static int setup_spt_oos(struct intel_gvt *gvt)
+{
+ struct intel_gvt_gtt *gtt = &gvt->gtt;
+ struct intel_vgpu_oos_page *oos_page;
+ int i;
+ int ret;
+
+ INIT_LIST_HEAD(&gtt->oos_page_free_list_head);
+ INIT_LIST_HEAD(&gtt->oos_page_use_list_head);
+
+ for (i = 0; i < preallocated_oos_pages; i++) {
+ oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
+ if (!oos_page) {
+ gvt_err("fail to pre-allocate oos page\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ INIT_LIST_HEAD(&oos_page->list);
+ INIT_LIST_HEAD(&oos_page->vm_list);
+ oos_page->id = i;
+ list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head);
+ }
+
+ gvt_dbg_mm("%d oos pages preallocated\n", i);
+
+ return 0;
+fail:
+ clean_spt_oos(gvt);
+ return ret;
+}
+
+/**
+ * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
+ * @vgpu: a vGPU
+ * @page_table_level: PPGTT page table level
+ * @root_entry: PPGTT page table root pointers
+ *
+ * This function is used to find a PPGTT mm object from mm object pool
+ *
+ * Returns:
+ * pointer to mm object on success, NULL if failed.
+ */
+struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level, void *root_entry)
+{
+ struct list_head *pos;
+ struct intel_vgpu_mm *mm;
+ u64 *src, *dst;
+
+ list_for_each(pos, &vgpu->gtt.mm_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, list);
+ if (mm->type != INTEL_GVT_MM_PPGTT)
+ continue;
+
+ if (mm->page_table_level != page_table_level)
+ continue;
+
+ src = root_entry;
+ dst = mm->virtual_page_table;
+
+ if (page_table_level == 3) {
+ if (src[0] == dst[0]
+ && src[1] == dst[1]
+ && src[2] == dst[2]
+ && src[3] == dst[3])
+ return mm;
+ } else {
+ if (src[0] == dst[0])
+ return mm;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
+ * g2v notification
+ * @vgpu: a vGPU
+ * @page_table_level: PPGTT page table level
+ *
+ * This function is used to create a PPGTT mm object from a guest to GVT-g
+ * notification.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level)
+{
+ u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
+ struct intel_vgpu_mm *mm;
+
+ if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
+ return -EINVAL;
+
+ mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
+ if (mm) {
+ intel_gvt_mm_reference(mm);
+ } else {
+ mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
+ pdp, page_table_level, 0);
+ if (IS_ERR(mm)) {
+ gvt_err("fail to create mm\n");
+ return PTR_ERR(mm);
+ }
+ }
+ return 0;
+}
+
+/**
+ * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
+ * g2v notification
+ * @vgpu: a vGPU
+ * @page_table_level: PPGTT page table level
+ *
+ * This function is used to create a PPGTT mm object from a guest to GVT-g
+ * notification.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level)
+{
+ u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
+ struct intel_vgpu_mm *mm;
+
+ if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
+ return -EINVAL;
+
+ mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
+ if (!mm) {
+ gvt_err("fail to find ppgtt instance.\n");
+ return -EINVAL;
+ }
+ intel_gvt_mm_unreference(mm);
+ return 0;
+}
+
+/**
+ * intel_gvt_init_gtt - initialize mm components of a GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the initialization stage, to initialize
+ * the mm components of a GVT device.
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ */
+int intel_gvt_init_gtt(struct intel_gvt *gvt)
+{
+ int ret;
+
+ gvt_dbg_core("init gtt\n");
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
+ gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
+ gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
+ gvt->gtt.mm_free_page_table = gen8_mm_free_page_table;
+ } else {
+ return -ENODEV;
+ }
+
+ if (enable_out_of_sync) {
+ ret = setup_spt_oos(gvt);
+ if (ret) {
+ gvt_err("fail to initialize SPT oos\n");
+ return ret;
+ }
+ }
+ INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head);
+ return 0;
+}
+
+/**
+ * intel_gvt_clean_gtt - clean up mm components of a GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the driver unloading stage, to clean up the
+ * the mm components of a GVT device.
+ *
+ */
+void intel_gvt_clean_gtt(struct intel_gvt *gvt)
+{
+ if (enable_out_of_sync)
+ clean_spt_oos(gvt);
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
new file mode 100644
index 000000000000..e4dcde78f3f9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -0,0 +1,270 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Xiao Zheng <xiao.zheng@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#ifndef _GVT_GTT_H_
+#define _GVT_GTT_H_
+
+#define GTT_PAGE_SHIFT 12
+#define GTT_PAGE_SIZE (1UL << GTT_PAGE_SHIFT)
+#define GTT_PAGE_MASK (~(GTT_PAGE_SIZE-1))
+
+struct intel_vgpu_mm;
+
+#define INTEL_GVT_GTT_HASH_BITS 8
+#define INTEL_GVT_INVALID_ADDR (~0UL)
+
+struct intel_gvt_gtt_entry {
+ u64 val64;
+ int type;
+};
+
+struct intel_gvt_gtt_pte_ops {
+ struct intel_gvt_gtt_entry *(*get_entry)(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index, bool hypervisor_access, unsigned long gpa,
+ struct intel_vgpu *vgpu);
+ struct intel_gvt_gtt_entry *(*set_entry)(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index, bool hypervisor_access, unsigned long gpa,
+ struct intel_vgpu *vgpu);
+ bool (*test_present)(struct intel_gvt_gtt_entry *e);
+ void (*clear_present)(struct intel_gvt_gtt_entry *e);
+ bool (*test_pse)(struct intel_gvt_gtt_entry *e);
+ void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
+ unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
+};
+
+struct intel_gvt_gtt_gma_ops {
+ unsigned long (*gma_to_ggtt_pte_index)(unsigned long gma);
+ unsigned long (*gma_to_pte_index)(unsigned long gma);
+ unsigned long (*gma_to_pde_index)(unsigned long gma);
+ unsigned long (*gma_to_l3_pdp_index)(unsigned long gma);
+ unsigned long (*gma_to_l4_pdp_index)(unsigned long gma);
+ unsigned long (*gma_to_pml4_index)(unsigned long gma);
+};
+
+struct intel_gvt_gtt {
+ struct intel_gvt_gtt_pte_ops *pte_ops;
+ struct intel_gvt_gtt_gma_ops *gma_ops;
+ int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm);
+ void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
+ struct list_head oos_page_use_list_head;
+ struct list_head oos_page_free_list_head;
+ struct list_head mm_lru_list_head;
+};
+
+enum {
+ INTEL_GVT_MM_GGTT = 0,
+ INTEL_GVT_MM_PPGTT,
+};
+
+struct intel_vgpu_mm {
+ int type;
+ bool initialized;
+ bool shadowed;
+
+ int page_table_entry_type;
+ u32 page_table_entry_size;
+ u32 page_table_entry_cnt;
+ void *virtual_page_table;
+ void *shadow_page_table;
+
+ int page_table_level;
+ bool has_shadow_page_table;
+ u32 pde_base_index;
+
+ struct list_head list;
+ struct kref ref;
+ atomic_t pincount;
+ struct list_head lru_list;
+ struct intel_vgpu *vgpu;
+};
+
+extern struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(
+ struct intel_vgpu_mm *mm,
+ void *page_table, struct intel_gvt_gtt_entry *e,
+ unsigned long index);
+
+extern struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(
+ struct intel_vgpu_mm *mm,
+ void *page_table, struct intel_gvt_gtt_entry *e,
+ unsigned long index);
+
+#define ggtt_get_guest_entry(mm, e, index) \
+ intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
+
+#define ggtt_set_guest_entry(mm, e, index) \
+ intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
+
+#define ggtt_get_shadow_entry(mm, e, index) \
+ intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
+
+#define ggtt_set_shadow_entry(mm, e, index) \
+ intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
+
+#define ppgtt_get_guest_root_entry(mm, e, index) \
+ intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
+
+#define ppgtt_set_guest_root_entry(mm, e, index) \
+ intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
+
+#define ppgtt_get_shadow_root_entry(mm, e, index) \
+ intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
+
+#define ppgtt_set_shadow_root_entry(mm, e, index) \
+ intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
+
+extern struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
+ int mm_type, void *virtual_page_table, int page_table_level,
+ u32 pde_base_index);
+extern void intel_vgpu_destroy_mm(struct kref *mm_ref);
+
+struct intel_vgpu_guest_page;
+
+struct intel_vgpu_gtt {
+ struct intel_vgpu_mm *ggtt_mm;
+ unsigned long active_ppgtt_mm_bitmap;
+ struct list_head mm_list_head;
+ DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
+ DECLARE_HASHTABLE(guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
+ atomic_t n_write_protected_guest_page;
+ struct list_head oos_page_list_head;
+ struct list_head post_shadow_list_head;
+ struct page *scratch_page;
+ unsigned long scratch_page_mfn;
+};
+
+extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
+extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
+
+extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
+extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
+
+extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level, void *root_entry);
+
+struct intel_vgpu_oos_page;
+
+struct intel_vgpu_shadow_page {
+ void *vaddr;
+ struct page *page;
+ int type;
+ struct hlist_node node;
+ unsigned long mfn;
+};
+
+struct intel_vgpu_guest_page {
+ struct hlist_node node;
+ bool writeprotection;
+ unsigned long gfn;
+ int (*handler)(void *, u64, void *, int);
+ void *data;
+ unsigned long write_cnt;
+ struct intel_vgpu_oos_page *oos_page;
+};
+
+struct intel_vgpu_oos_page {
+ struct intel_vgpu_guest_page *guest_page;
+ struct list_head list;
+ struct list_head vm_list;
+ int id;
+ unsigned char mem[GTT_PAGE_SIZE];
+};
+
+#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
+
+struct intel_vgpu_ppgtt_spt {
+ struct intel_vgpu_shadow_page shadow_page;
+ struct intel_vgpu_guest_page guest_page;
+ int guest_page_type;
+ atomic_t refcount;
+ struct intel_vgpu *vgpu;
+ DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
+ struct list_head post_shadow_list;
+};
+
+int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *guest_page,
+ unsigned long gfn,
+ int (*handler)(void *gp, u64, void *, int),
+ void *data);
+
+void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *guest_page);
+
+int intel_vgpu_set_guest_page_writeprotection(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *guest_page);
+
+void intel_vgpu_clear_guest_page_writeprotection(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *guest_page);
+
+struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
+ struct intel_vgpu *vgpu, unsigned long gfn);
+
+int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);
+
+int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu);
+
+static inline void intel_gvt_mm_reference(struct intel_vgpu_mm *mm)
+{
+ kref_get(&mm->ref);
+}
+
+static inline void intel_gvt_mm_unreference(struct intel_vgpu_mm *mm)
+{
+ kref_put(&mm->ref, intel_vgpu_destroy_mm);
+}
+
+int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
+
+void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
+
+unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
+ unsigned long gma);
+
+struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level, void *root_entry);
+
+int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level);
+
+int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level);
+
+int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
+ unsigned int off, void *p_data, unsigned int bytes);
+
+int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int off, void *p_data, unsigned int bytes);
+
+#endif /* _GVT_GTT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 927f4579f5b6..31b59d40f3fb 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -19,12 +19,23 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Eddie Dong <eddie.dong@intel.com>
+ *
+ * Contributors:
+ * Niu Bing <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
*/
#include <linux/types.h>
#include <xen/xen.h>
+#include <linux/kthread.h>
#include "i915_drv.h"
+#include "gvt.h"
struct intel_gvt_host intel_gvt_host;
@@ -33,6 +44,13 @@ static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
};
+struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops = {
+ .emulate_cfg_read = intel_vgpu_emulate_cfg_read,
+ .emulate_cfg_write = intel_vgpu_emulate_cfg_write,
+ .emulate_mmio_read = intel_vgpu_emulate_mmio_read,
+ .emulate_mmio_write = intel_vgpu_emulate_mmio_write,
+};
+
/**
* intel_gvt_init_host - Load MPT modules and detect if we're running in host
* @gvt: intel gvt device
@@ -84,9 +102,66 @@ int intel_gvt_init_host(void)
static void init_device_info(struct intel_gvt *gvt)
{
- if (IS_BROADWELL(gvt->dev_priv))
- gvt->device_info.max_support_vgpus = 8;
- /* This function will grow large in GVT device model patches. */
+ struct intel_gvt_device_info *info = &gvt->device_info;
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ info->max_support_vgpus = 8;
+ info->cfg_space_size = 256;
+ info->mmio_size = 2 * 1024 * 1024;
+ info->mmio_bar = 0;
+ info->msi_cap_offset = IS_SKYLAKE(gvt->dev_priv) ? 0xac : 0x90;
+ info->gtt_start_offset = 8 * 1024 * 1024;
+ info->gtt_entry_size = 8;
+ info->gtt_entry_size_shift = 3;
+ info->gmadr_bytes_in_cmd = 8;
+ info->max_surface_size = 36 * 1024 * 1024;
+ }
+}
+
+static int gvt_service_thread(void *data)
+{
+ struct intel_gvt *gvt = (struct intel_gvt *)data;
+ int ret;
+
+ gvt_dbg_core("service thread start\n");
+
+ while (!kthread_should_stop()) {
+ ret = wait_event_interruptible(gvt->service_thread_wq,
+ kthread_should_stop() || gvt->service_request);
+
+ if (kthread_should_stop())
+ break;
+
+ if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
+ continue;
+
+ if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
+ (void *)&gvt->service_request)) {
+ mutex_lock(&gvt->lock);
+ intel_gvt_emulate_vblank(gvt);
+ mutex_unlock(&gvt->lock);
+ }
+ }
+
+ return 0;
+}
+
+static void clean_service_thread(struct intel_gvt *gvt)
+{
+ kthread_stop(gvt->service_thread);
+}
+
+static int init_service_thread(struct intel_gvt *gvt)
+{
+ init_waitqueue_head(&gvt->service_thread_wq);
+
+ gvt->service_thread = kthread_run(gvt_service_thread,
+ gvt, "gvt_service_thread");
+ if (IS_ERR(gvt->service_thread)) {
+ gvt_err("fail to start service thread.\n");
+ return PTR_ERR(gvt->service_thread);
+ }
+ return 0;
}
/**
@@ -99,14 +174,23 @@ static void init_device_info(struct intel_gvt *gvt)
*/
void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
{
- struct intel_gvt *gvt = &dev_priv->gvt;
+ struct intel_gvt *gvt = to_gvt(dev_priv);
- if (WARN_ON(!gvt->initialized))
+ if (WARN_ON(!gvt))
return;
- /* Other de-initialization of GVT components will be introduced. */
+ clean_service_thread(gvt);
+ intel_gvt_clean_cmd_parser(gvt);
+ intel_gvt_clean_sched_policy(gvt);
+ intel_gvt_clean_workload_scheduler(gvt);
+ intel_gvt_clean_opregion(gvt);
+ intel_gvt_clean_gtt(gvt);
+ intel_gvt_clean_irq(gvt);
+ intel_gvt_clean_mmio_info(gvt);
+ intel_gvt_free_firmware(gvt);
- gvt->initialized = false;
+ kfree(dev_priv->gvt);
+ dev_priv->gvt = NULL;
}
/**
@@ -122,7 +206,9 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
*/
int intel_gvt_init_device(struct drm_i915_private *dev_priv)
{
- struct intel_gvt *gvt = &dev_priv->gvt;
+ struct intel_gvt *gvt;
+ int ret;
+
/*
* Cannot initialize GVT device without intel_gvt_host gets
* initialized first.
@@ -130,16 +216,76 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (WARN_ON(!intel_gvt_host.initialized))
return -EINVAL;
- if (WARN_ON(gvt->initialized))
+ if (WARN_ON(dev_priv->gvt))
return -EEXIST;
+ gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
+ if (!gvt)
+ return -ENOMEM;
+
gvt_dbg_core("init gvt device\n");
+ mutex_init(&gvt->lock);
+ gvt->dev_priv = dev_priv;
+
init_device_info(gvt);
- /*
- * Other initialization of GVT components will be introduce here.
- */
+
+ ret = intel_gvt_setup_mmio_info(gvt);
+ if (ret)
+ return ret;
+
+ ret = intel_gvt_load_firmware(gvt);
+ if (ret)
+ goto out_clean_mmio_info;
+
+ ret = intel_gvt_init_irq(gvt);
+ if (ret)
+ goto out_free_firmware;
+
+ ret = intel_gvt_init_gtt(gvt);
+ if (ret)
+ goto out_clean_irq;
+
+ ret = intel_gvt_init_opregion(gvt);
+ if (ret)
+ goto out_clean_gtt;
+
+ ret = intel_gvt_init_workload_scheduler(gvt);
+ if (ret)
+ goto out_clean_opregion;
+
+ ret = intel_gvt_init_sched_policy(gvt);
+ if (ret)
+ goto out_clean_workload_scheduler;
+
+ ret = intel_gvt_init_cmd_parser(gvt);
+ if (ret)
+ goto out_clean_sched_policy;
+
+ ret = init_service_thread(gvt);
+ if (ret)
+ goto out_clean_cmd_parser;
+
gvt_dbg_core("gvt device creation is done\n");
- gvt->initialized = true;
+ dev_priv->gvt = gvt;
return 0;
+
+out_clean_cmd_parser:
+ intel_gvt_clean_cmd_parser(gvt);
+out_clean_sched_policy:
+ intel_gvt_clean_sched_policy(gvt);
+out_clean_workload_scheduler:
+ intel_gvt_clean_workload_scheduler(gvt);
+out_clean_opregion:
+ intel_gvt_clean_opregion(gvt);
+out_clean_gtt:
+ intel_gvt_clean_gtt(gvt);
+out_clean_irq:
+ intel_gvt_clean_irq(gvt);
+out_free_firmware:
+ intel_gvt_free_firmware(gvt);
+out_clean_mmio_info:
+ intel_gvt_clean_mmio_info(gvt);
+ kfree(gvt);
+ return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index fb619a6e519d..11df62b542b1 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -19,6 +19,15 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Eddie Dong <eddie.dong@intel.com>
+ *
+ * Contributors:
+ * Niu Bing <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
*/
#ifndef _GVT_H_
@@ -26,6 +35,17 @@
#include "debug.h"
#include "hypercall.h"
+#include "mmio.h"
+#include "reg.h"
+#include "interrupt.h"
+#include "gtt.h"
+#include "display.h"
+#include "edid.h"
+#include "execlist.h"
+#include "scheduler.h"
+#include "sched_policy.h"
+#include "render.h"
+#include "cmd_parser.h"
#define GVT_MAX_VGPU 8
@@ -45,25 +65,324 @@ extern struct intel_gvt_host intel_gvt_host;
/* Describe per-platform limitations. */
struct intel_gvt_device_info {
u32 max_support_vgpus;
- /* This data structure will grow bigger in GVT device model patches */
+ u32 cfg_space_size;
+ u32 mmio_size;
+ u32 mmio_bar;
+ unsigned long msi_cap_offset;
+ u32 gtt_start_offset;
+ u32 gtt_entry_size;
+ u32 gtt_entry_size_shift;
+ int gmadr_bytes_in_cmd;
+ u32 max_surface_size;
+};
+
+/* GM resources owned by a vGPU */
+struct intel_vgpu_gm {
+ u64 aperture_sz;
+ u64 hidden_sz;
+ struct drm_mm_node low_gm_node;
+ struct drm_mm_node high_gm_node;
+};
+
+#define INTEL_GVT_MAX_NUM_FENCES 32
+
+/* Fences owned by a vGPU */
+struct intel_vgpu_fence {
+ struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
+ u32 base;
+ u32 size;
+};
+
+struct intel_vgpu_mmio {
+ void *vreg;
+ void *sreg;
+ bool disable_warn_untrack;
+};
+
+#define INTEL_GVT_MAX_CFG_SPACE_SZ 256
+#define INTEL_GVT_MAX_BAR_NUM 4
+
+struct intel_vgpu_pci_bar {
+ u64 size;
+ bool tracked;
+};
+
+struct intel_vgpu_cfg_space {
+ unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ];
+ struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
+};
+
+#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
+
+#define INTEL_GVT_MAX_PIPE 4
+
+struct intel_vgpu_irq {
+ bool irq_warn_once[INTEL_GVT_EVENT_MAX];
+ DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
+ INTEL_GVT_EVENT_MAX);
+};
+
+struct intel_vgpu_opregion {
+ void *va;
+ u32 gfn[INTEL_GVT_OPREGION_PAGES];
+ struct page *pages[INTEL_GVT_OPREGION_PAGES];
+};
+
+#define vgpu_opregion(vgpu) (&(vgpu->opregion))
+
+#define INTEL_GVT_MAX_PORT 5
+
+struct intel_vgpu_display {
+ struct intel_vgpu_i2c_edid i2c_edid;
+ struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT];
+ struct intel_vgpu_sbi sbi;
};
struct intel_vgpu {
struct intel_gvt *gvt;
int id;
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
+ bool active;
+ bool resetting;
+ void *sched_data;
+
+ struct intel_vgpu_fence fence;
+ struct intel_vgpu_gm gm;
+ struct intel_vgpu_cfg_space cfg_space;
+ struct intel_vgpu_mmio mmio;
+ struct intel_vgpu_irq irq;
+ struct intel_vgpu_gtt gtt;
+ struct intel_vgpu_opregion opregion;
+ struct intel_vgpu_display display;
+ struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
+ struct list_head workload_q_head[I915_NUM_ENGINES];
+ struct kmem_cache *workloads;
+ atomic_t running_workload_num;
+ DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
+ struct i915_gem_context *shadow_ctx;
+ struct notifier_block shadow_ctx_notifier_block;
+};
+
+struct intel_gvt_gm {
+ unsigned long vgpu_allocated_low_gm_size;
+ unsigned long vgpu_allocated_high_gm_size;
+};
+
+struct intel_gvt_fence {
+ unsigned long vgpu_allocated_fence_num;
+};
+
+#define INTEL_GVT_MMIO_HASH_BITS 9
+
+struct intel_gvt_mmio {
+ u32 *mmio_attribute;
+ DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
+};
+
+struct intel_gvt_firmware {
+ void *cfg_space;
+ void *mmio;
+ bool firmware_loaded;
+};
+
+struct intel_gvt_opregion {
+ void __iomem *opregion_va;
+ u32 opregion_pa;
};
struct intel_gvt {
struct mutex lock;
- bool initialized;
-
struct drm_i915_private *dev_priv;
struct idr vgpu_idr; /* vGPU IDR pool */
struct intel_gvt_device_info device_info;
+ struct intel_gvt_gm gm;
+ struct intel_gvt_fence fence;
+ struct intel_gvt_mmio mmio;
+ struct intel_gvt_firmware firmware;
+ struct intel_gvt_irq irq;
+ struct intel_gvt_gtt gtt;
+ struct intel_gvt_opregion opregion;
+ struct intel_gvt_workload_scheduler scheduler;
+ DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
+
+ struct task_struct *service_thread;
+ wait_queue_head_t service_thread_wq;
+ unsigned long service_request;
};
+static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
+{
+ return i915->gvt;
+}
+
+enum {
+ INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
+};
+
+static inline void intel_gvt_request_service(struct intel_gvt *gvt,
+ int service)
+{
+ set_bit(service, (void *)&gvt->service_request);
+ wake_up(&gvt->service_thread_wq);
+}
+
+void intel_gvt_free_firmware(struct intel_gvt *gvt);
+int intel_gvt_load_firmware(struct intel_gvt *gvt);
+
+/* Aperture/GM space definitions for GVT device */
+#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
+#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
+
+#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
+#define gvt_ggtt_sz(gvt) \
+ ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
+#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
+
+#define gvt_aperture_gmadr_base(gvt) (0)
+#define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
+ + gvt_aperture_sz(gvt) - 1)
+
+#define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
+ + gvt_aperture_sz(gvt))
+#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
+ + gvt_hidden_sz(gvt) - 1)
+
+#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
+
+/* Aperture/GM space definitions for vGPU */
+#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
+#define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
+#define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
+#define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
+
+#define vgpu_aperture_pa_base(vgpu) \
+ (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
+
+#define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
+
+#define vgpu_aperture_pa_end(vgpu) \
+ (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
+
+#define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
+#define vgpu_aperture_gmadr_end(vgpu) \
+ (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
+
+#define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
+#define vgpu_hidden_gmadr_end(vgpu) \
+ (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
+
+#define vgpu_fence_base(vgpu) (vgpu->fence.base)
+#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
+
+struct intel_vgpu_creation_params {
+ __u64 handle;
+ __u64 low_gm_sz; /* in MB */
+ __u64 high_gm_sz; /* in MB */
+ __u64 fence_sz;
+ __s32 primary;
+ __u64 vgpu_id;
+};
+
+int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
+ struct intel_vgpu_creation_params *param);
+void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
+void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
+ u32 fence, u64 value);
+
+/* Macros for easily accessing vGPU virtual/shadow register */
+#define vgpu_vreg(vgpu, reg) \
+ (*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_vreg8(vgpu, reg) \
+ (*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_vreg16(vgpu, reg) \
+ (*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_vreg64(vgpu, reg) \
+ (*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg(vgpu, reg) \
+ (*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg8(vgpu, reg) \
+ (*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg16(vgpu, reg) \
+ (*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+#define vgpu_sreg64(vgpu, reg) \
+ (*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+
+#define for_each_active_vgpu(gvt, vgpu, id) \
+ idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
+ for_each_if(vgpu->active)
+
+static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
+ u32 offset, u32 val, bool low)
+{
+ u32 *pval;
+
+ /* BAR offset should be 32 bits algiend */
+ offset = rounddown(offset, 4);
+ pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
+
+ if (low) {
+ /*
+ * only update bit 31 - bit 4,
+ * leave the bit 3 - bit 0 unchanged.
+ */
+ *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
+ }
+}
+
+struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
+ struct intel_vgpu_creation_params *
+ param);
+
+void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+
+/* validating GM functions */
+#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
+ ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
+ (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
+
+#define vgpu_gmadr_is_hidden(vgpu, gmadr) \
+ ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
+ (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
+
+#define vgpu_gmadr_is_valid(vgpu, gmadr) \
+ ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
+ (vgpu_gmadr_is_hidden(vgpu, gmadr))))
+
+#define gvt_gmadr_is_aperture(gvt, gmadr) \
+ ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
+ (gmadr <= gvt_aperture_gmadr_end(gvt)))
+
+#define gvt_gmadr_is_hidden(gvt, gmadr) \
+ ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
+ (gmadr <= gvt_hidden_gmadr_end(gvt)))
+
+#define gvt_gmadr_is_valid(gvt, gmadr) \
+ (gvt_gmadr_is_aperture(gvt, gmadr) || \
+ gvt_gmadr_is_hidden(gvt, gmadr))
+
+bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
+int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
+int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
+int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
+ unsigned long *h_index);
+int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
+ unsigned long *g_index);
+
+int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
+
+int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
+
+void intel_gvt_clean_opregion(struct intel_gvt *gvt);
+int intel_gvt_init_opregion(struct intel_gvt *gvt);
+
+void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
+int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
+
+int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
+
#include "mpt.h"
#endif
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
new file mode 100644
index 000000000000..3e74fb3d4aa9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -0,0 +1,2797 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Eddie Dong <eddie.dong@intel.com>
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Pei Zhang <pei.zhang@intel.com>
+ * Niu Bing <bing.niu@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+
+/* XXX FIXME i915 has changed PP_XXX definition */
+#define PCH_PP_STATUS _MMIO(0xc7200)
+#define PCH_PP_CONTROL _MMIO(0xc7204)
+#define PCH_PP_ON_DELAYS _MMIO(0xc7208)
+#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
+#define PCH_PP_DIVISOR _MMIO(0xc7210)
+
+/* Register contains RO bits */
+#define F_RO (1 << 0)
+/* Register contains graphics address */
+#define F_GMADR (1 << 1)
+/* Mode mask registers with high 16 bits as the mask bits */
+#define F_MODE_MASK (1 << 2)
+/* This reg can be accessed by GPU commands */
+#define F_CMD_ACCESS (1 << 3)
+/* This reg has been accessed by a VM */
+#define F_ACCESSED (1 << 4)
+/* This reg has been accessed through GPU commands */
+#define F_CMD_ACCESSED (1 << 5)
+/* This reg could be accessed by unaligned address */
+#define F_UNALIGN (1 << 6)
+
+unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
+{
+ if (IS_BROADWELL(gvt->dev_priv))
+ return D_BDW;
+ else if (IS_SKYLAKE(gvt->dev_priv))
+ return D_SKL;
+
+ return 0;
+}
+
+bool intel_gvt_match_device(struct intel_gvt *gvt,
+ unsigned long device)
+{
+ return intel_gvt_get_device_type(gvt) & device;
+}
+
+static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
+}
+
+static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
+}
+
+static int new_mmio_info(struct intel_gvt *gvt,
+ u32 offset, u32 flags, u32 size,
+ u32 addr_mask, u32 ro_mask, u32 device,
+ void *read, void *write)
+{
+ struct intel_gvt_mmio_info *info, *p;
+ u32 start, end, i;
+
+ if (!intel_gvt_match_device(gvt, device))
+ return 0;
+
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+
+ start = offset;
+ end = offset + size;
+
+ for (i = start; i < end; i += 4) {
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->offset = i;
+ p = intel_gvt_find_mmio_info(gvt, info->offset);
+ if (p)
+ gvt_err("dup mmio definition offset %x\n",
+ info->offset);
+ info->size = size;
+ info->length = (i + 4) < end ? 4 : (end - i);
+ info->addr_mask = addr_mask;
+ info->device = device;
+ info->read = read ? read : intel_vgpu_default_mmio_read;
+ info->write = write ? write : intel_vgpu_default_mmio_write;
+ gvt->mmio.mmio_attribute[info->offset / 4] = flags;
+ INIT_HLIST_NODE(&info->node);
+ hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
+ }
+ return 0;
+}
+
+static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
+{
+ enum intel_engine_id id;
+ struct intel_engine_cs *engine;
+
+ reg &= ~GENMASK(11, 0);
+ for_each_engine(engine, gvt->dev_priv, id) {
+ if (engine->mmio_base == reg)
+ return id;
+ }
+ return -1;
+}
+
+#define offset_to_fence_num(offset) \
+ ((offset - i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) >> 3)
+
+#define fence_num_to_offset(num) \
+ (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
+
+static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
+ unsigned int fence_num, void *p_data, unsigned int bytes)
+{
+ if (fence_num >= vgpu_fence_sz(vgpu)) {
+ gvt_err("vgpu%d: found oob fence register access\n",
+ vgpu->id);
+ gvt_err("vgpu%d: total fence num %d access fence num %d\n",
+ vgpu->id, vgpu_fence_sz(vgpu), fence_num);
+ memset(p_data, 0, bytes);
+ }
+ return 0;
+}
+
+static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ int ret;
+
+ ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off),
+ p_data, bytes);
+ if (ret)
+ return ret;
+ read_vreg(vgpu, off, p_data, bytes);
+ return 0;
+}
+
+static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
+ void *p_data, unsigned int bytes)
+{
+ unsigned int fence_num = offset_to_fence_num(off);
+ int ret;
+
+ ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
+ if (ret)
+ return ret;
+ write_vreg(vgpu, off, p_data, bytes);
+
+ intel_vgpu_write_fence(vgpu, fence_num,
+ vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
+ return 0;
+}
+
+#define CALC_MODE_MASK_REG(old, new) \
+ (((new) & GENMASK(31, 16)) \
+ | ((((old) & GENMASK(15, 0)) & ~((new) >> 16)) \
+ | ((new) & ((new) >> 16))))
+
+static int mul_force_wake_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 old, new;
+ uint32_t ack_reg_offset;
+
+ old = vgpu_vreg(vgpu, offset);
+ new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
+
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ switch (offset) {
+ case FORCEWAKE_RENDER_GEN9_REG:
+ ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
+ break;
+ case FORCEWAKE_BLITTER_GEN9_REG:
+ ack_reg_offset = FORCEWAKE_ACK_BLITTER_GEN9_REG;
+ break;
+ case FORCEWAKE_MEDIA_GEN9_REG:
+ ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
+ break;
+ default:
+ /*should not hit here*/
+ gvt_err("invalid forcewake offset 0x%x\n", offset);
+ return 1;
+ }
+ } else {
+ ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
+ }
+
+ vgpu_vreg(vgpu, offset) = new;
+ vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0));
+ return 0;
+}
+
+static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes, unsigned long bitmap)
+{
+ struct intel_gvt_workload_scheduler *scheduler =
+ &vgpu->gvt->scheduler;
+
+ vgpu->resetting = true;
+
+ intel_vgpu_stop_schedule(vgpu);
+ if (scheduler->current_vgpu == vgpu) {
+ mutex_unlock(&vgpu->gvt->lock);
+ intel_gvt_wait_vgpu_idle(vgpu);
+ mutex_lock(&vgpu->gvt->lock);
+ }
+
+ intel_vgpu_reset_execlist(vgpu, bitmap);
+
+ vgpu->resetting = false;
+
+ return 0;
+}
+
+static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data;
+ u64 bitmap = 0;
+
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & GEN6_GRDOM_FULL) {
+ gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
+ bitmap = 0xff;
+ }
+ if (data & GEN6_GRDOM_RENDER) {
+ gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
+ bitmap |= (1 << RCS);
+ }
+ if (data & GEN6_GRDOM_MEDIA) {
+ gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
+ bitmap |= (1 << VCS);
+ }
+ if (data & GEN6_GRDOM_BLT) {
+ gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
+ bitmap |= (1 << BCS);
+ }
+ if (data & GEN6_GRDOM_VECS) {
+ gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
+ bitmap |= (1 << VECS);
+ }
+ if (data & GEN8_GRDOM_MEDIA2) {
+ gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
+ if (HAS_BSD2(vgpu->gvt->dev_priv))
+ bitmap |= (1 << VCS2);
+ }
+ return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
+}
+
+static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes);
+}
+
+static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes);
+}
+
+static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
+ vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_ON;
+ vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
+ vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
+ vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
+
+ } else
+ vgpu_vreg(vgpu, PCH_PP_STATUS) &=
+ ~(PP_ON | PP_SEQUENCE_POWER_DOWN
+ | PP_CYCLE_DELAY_ACTIVE);
+ return 0;
+}
+
+static int transconf_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE)
+ vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE;
+ else
+ vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE;
+ return 0;
+}
+
+static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE)
+ vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK;
+ else
+ vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK;
+
+ if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK)
+ vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE;
+ else
+ vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE;
+
+ return 0;
+}
+
+static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ *(u32 *)p_data = (1 << 17);
+ return 0;
+}
+
+static int dpy_reg_mmio_read_2(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ *(u32 *)p_data = 3;
+ return 0;
+}
+
+static int dpy_reg_mmio_read_3(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ *(u32 *)p_data = (0x2f << 16);
+ return 0;
+}
+
+static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & PIPECONF_ENABLE)
+ vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
+ else
+ vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
+ intel_gvt_check_vblank_emulation(vgpu->gvt);
+ return 0;
+}
+
+static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) {
+ vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE;
+ } else {
+ vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
+ if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
+ vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E))
+ &= ~DP_TP_STATUS_AUTOTRAIN_DONE;
+ }
+ return 0;
+}
+
+static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data;
+ return 0;
+}
+
+#define FDI_LINK_TRAIN_PATTERN1 0
+#define FDI_LINK_TRAIN_PATTERN2 1
+
+static int fdi_auto_training_started(struct intel_vgpu *vgpu)
+{
+ u32 ddi_buf_ctl = vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_E));
+ u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
+ u32 tx_ctl = vgpu_vreg(vgpu, DP_TP_CTL(PORT_E));
+
+ if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
+ (rx_ctl & FDI_RX_ENABLE) &&
+ (rx_ctl & FDI_AUTO_TRAINING) &&
+ (tx_ctl & DP_TP_CTL_ENABLE) &&
+ (tx_ctl & DP_TP_CTL_FDI_AUTOTRAIN))
+ return 1;
+ else
+ return 0;
+}
+
+static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
+ enum pipe pipe, unsigned int train_pattern)
+{
+ i915_reg_t fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl;
+ unsigned int fdi_rx_check_bits, fdi_tx_check_bits;
+ unsigned int fdi_rx_train_bits, fdi_tx_train_bits;
+ unsigned int fdi_iir_check_bits;
+
+ fdi_rx_imr = FDI_RX_IMR(pipe);
+ fdi_tx_ctl = FDI_TX_CTL(pipe);
+ fdi_rx_ctl = FDI_RX_CTL(pipe);
+
+ if (train_pattern == FDI_LINK_TRAIN_PATTERN1) {
+ fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_1_CPT;
+ fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1;
+ fdi_iir_check_bits = FDI_RX_BIT_LOCK;
+ } else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) {
+ fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT;
+ fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
+ fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
+ } else {
+ gvt_err("Invalid train pattern %d\n", train_pattern);
+ return -EINVAL;
+ }
+
+ fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits;
+ fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
+
+ /* If imr bit has been masked */
+ if (vgpu_vreg(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
+ return 0;
+
+ if (((vgpu_vreg(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
+ == fdi_tx_check_bits)
+ && ((vgpu_vreg(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
+ == fdi_rx_check_bits))
+ return 1;
+ else
+ return 0;
+}
+
+#define INVALID_INDEX (~0U)
+
+static unsigned int calc_index(unsigned int offset, unsigned int start,
+ unsigned int next, unsigned int end, i915_reg_t i915_end)
+{
+ unsigned int range = next - start;
+
+ if (!end)
+ end = i915_mmio_reg_offset(i915_end);
+ if (offset < start || offset > end)
+ return INVALID_INDEX;
+ offset -= start;
+ return offset / range;
+}
+
+#define FDI_RX_CTL_TO_PIPE(offset) \
+ calc_index(offset, _FDI_RXA_CTL, _FDI_RXB_CTL, 0, FDI_RX_CTL(PIPE_C))
+
+#define FDI_TX_CTL_TO_PIPE(offset) \
+ calc_index(offset, _FDI_TXA_CTL, _FDI_TXB_CTL, 0, FDI_TX_CTL(PIPE_C))
+
+#define FDI_RX_IMR_TO_PIPE(offset) \
+ calc_index(offset, _FDI_RXA_IMR, _FDI_RXB_IMR, 0, FDI_RX_IMR(PIPE_C))
+
+static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ i915_reg_t fdi_rx_iir;
+ unsigned int index;
+ int ret;
+
+ if (FDI_RX_CTL_TO_PIPE(offset) != INVALID_INDEX)
+ index = FDI_RX_CTL_TO_PIPE(offset);
+ else if (FDI_TX_CTL_TO_PIPE(offset) != INVALID_INDEX)
+ index = FDI_TX_CTL_TO_PIPE(offset);
+ else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
+ index = FDI_RX_IMR_TO_PIPE(offset);
+ else {
+ gvt_err("Unsupport registers %x\n", offset);
+ return -EINVAL;
+ }
+
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ fdi_rx_iir = FDI_RX_IIR(index);
+
+ ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN1);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
+
+ ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
+
+ if (offset == _FDI_RXA_CTL)
+ if (fdi_auto_training_started(vgpu))
+ vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E)) |=
+ DP_TP_STATUS_AUTOTRAIN_DONE;
+ return 0;
+}
+
+#define DP_TP_CTL_TO_PORT(offset) \
+ calc_index(offset, _DP_TP_CTL_A, _DP_TP_CTL_B, 0, DP_TP_CTL(PORT_E))
+
+static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ i915_reg_t status_reg;
+ unsigned int index;
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ index = DP_TP_CTL_TO_PORT(offset);
+ data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
+ if (data == 0x2) {
+ status_reg = DP_TP_STATUS(index);
+ vgpu_vreg(vgpu, status_reg) |= (1 << 25);
+ }
+ return 0;
+}
+
+static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 reg_val;
+ u32 sticky_mask;
+
+ reg_val = *((u32 *)p_data);
+ sticky_mask = GENMASK(27, 26) | (1 << 24);
+
+ vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) |
+ (vgpu_vreg(vgpu, offset) & sticky_mask);
+ vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask);
+ return 0;
+}
+
+static int pch_adpa_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
+ vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+ return 0;
+}
+
+static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (data & FDI_MPHY_IOSFSB_RESET_CTL)
+ vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS;
+ else
+ vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
+ return 0;
+}
+
+#define DSPSURF_TO_PIPE(offset) \
+ calc_index(offset, _DSPASURF, _DSPBSURF, 0, DSPSURF(PIPE_C))
+
+static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ unsigned int index = DSPSURF_TO_PIPE(offset);
+ i915_reg_t surflive_reg = DSPSURFLIVE(index);
+ int flip_event[] = {
+ [PIPE_A] = PRIMARY_A_FLIP_DONE,
+ [PIPE_B] = PRIMARY_B_FLIP_DONE,
+ [PIPE_C] = PRIMARY_C_FLIP_DONE,
+ };
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+
+ set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
+ return 0;
+}
+
+#define SPRSURF_TO_PIPE(offset) \
+ calc_index(offset, _SPRA_SURF, _SPRB_SURF, 0, SPRSURF(PIPE_C))
+
+static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ unsigned int index = SPRSURF_TO_PIPE(offset);
+ i915_reg_t surflive_reg = SPRSURFLIVE(index);
+ int flip_event[] = {
+ [PIPE_A] = SPRITE_A_FLIP_DONE,
+ [PIPE_B] = SPRITE_B_FLIP_DONE,
+ [PIPE_C] = SPRITE_C_FLIP_DONE,
+ };
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+
+ set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
+ return 0;
+}
+
+static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
+ unsigned int reg)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ enum intel_gvt_event_type event;
+
+ if (reg == _DPA_AUX_CH_CTL)
+ event = AUX_CHANNEL_A;
+ else if (reg == _PCH_DPB_AUX_CH_CTL || reg == _DPB_AUX_CH_CTL)
+ event = AUX_CHANNEL_B;
+ else if (reg == _PCH_DPC_AUX_CH_CTL || reg == _DPC_AUX_CH_CTL)
+ event = AUX_CHANNEL_C;
+ else if (reg == _PCH_DPD_AUX_CH_CTL || reg == _DPD_AUX_CH_CTL)
+ event = AUX_CHANNEL_D;
+ else {
+ WARN_ON(true);
+ return -EINVAL;
+ }
+
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ return 0;
+}
+
+static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
+ unsigned int reg, int len, bool data_valid)
+{
+ /* mark transaction done */
+ value |= DP_AUX_CH_CTL_DONE;
+ value &= ~DP_AUX_CH_CTL_SEND_BUSY;
+ value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR;
+
+ if (data_valid)
+ value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR;
+ else
+ value |= DP_AUX_CH_CTL_TIME_OUT_ERROR;
+
+ /* message size */
+ value &= ~(0xf << 20);
+ value |= (len << 20);
+ vgpu_vreg(vgpu, reg) = value;
+
+ if (value & DP_AUX_CH_CTL_INTERRUPT)
+ return trigger_aux_channel_interrupt(vgpu, reg);
+ return 0;
+}
+
+static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
+ uint8_t t)
+{
+ if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
+ /* training pattern 1 for CR */
+ /* set LANE0_CR_DONE, LANE1_CR_DONE */
+ dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE;
+ /* set LANE2_CR_DONE, LANE3_CR_DONE */
+ dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE;
+ } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
+ DPCD_TRAINING_PATTERN_2) {
+ /* training pattern 2 for EQ */
+ /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane0_1 */
+ dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE;
+ dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED;
+ /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane2_3 */
+ dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE;
+ dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED;
+ /* set INTERLANE_ALIGN_DONE */
+ dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |=
+ DPCD_INTERLANE_ALIGN_DONE;
+ } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
+ DPCD_LINK_TRAINING_DISABLED) {
+ /* finish link training */
+ /* set sink status as synchronized */
+ dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC;
+ }
+}
+
+#define _REG_HSW_DP_AUX_CH_CTL(dp) \
+ ((dp) ? (_PCH_DPB_AUX_CH_CTL + ((dp)-1)*0x100) : 0x64010)
+
+#define _REG_SKL_DP_AUX_CH_CTL(dp) (0x64010 + (dp) * 0x100)
+
+#define OFFSET_TO_DP_AUX_PORT(offset) (((offset) & 0xF00) >> 8)
+
+#define dpy_is_valid_port(port) \
+ (((port) >= PORT_A) && ((port) < I915_MAX_PORTS))
+
+static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ struct intel_vgpu_display *display = &vgpu->display;
+ int msg, addr, ctrl, op, len;
+ int port_index = OFFSET_TO_DP_AUX_PORT(offset);
+ struct intel_vgpu_dpcd_data *dpcd = NULL;
+ struct intel_vgpu_port *port = NULL;
+ u32 data;
+
+ if (!dpy_is_valid_port(port_index)) {
+ gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id);
+ return 0;
+ }
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv) &&
+ offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
+ /* SKL DPB/C/D aux ctl register changed */
+ return 0;
+ } else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
+ offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
+ /* write to the data registers */
+ return 0;
+ }
+
+ if (!(data & DP_AUX_CH_CTL_SEND_BUSY)) {
+ /* just want to clear the sticky bits */
+ vgpu_vreg(vgpu, offset) = 0;
+ return 0;
+ }
+
+ port = &display->ports[port_index];
+ dpcd = port->dpcd;
+
+ /* read out message from DATA1 register */
+ msg = vgpu_vreg(vgpu, offset + 4);
+ addr = (msg >> 8) & 0xffff;
+ ctrl = (msg >> 24) & 0xff;
+ len = msg & 0xff;
+ op = ctrl >> 4;
+
+ if (op == GVT_AUX_NATIVE_WRITE) {
+ int t;
+ uint8_t buf[16];
+
+ if ((addr + len + 1) >= DPCD_SIZE) {
+ /*
+ * Write request exceeds what we supported,
+ * DCPD spec: When a Source Device is writing a DPCD
+ * address not supported by the Sink Device, the Sink
+ * Device shall reply with AUX NACK and “M” equal to
+ * zero.
+ */
+
+ /* NAK the write */
+ vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK;
+ dp_aux_ch_ctl_trans_done(vgpu, data, offset, 2, true);
+ return 0;
+ }
+
+ /*
+ * Write request format: (command + address) occupies
+ * 3 bytes, followed by (len + 1) bytes of data.
+ */
+ if (WARN_ON((len + 4) > AUX_BURST_SIZE))
+ return -EINVAL;
+
+ /* unpack data from vreg to buf */
+ for (t = 0; t < 4; t++) {
+ u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
+
+ buf[t * 4] = (r >> 24) & 0xff;
+ buf[t * 4 + 1] = (r >> 16) & 0xff;
+ buf[t * 4 + 2] = (r >> 8) & 0xff;
+ buf[t * 4 + 3] = r & 0xff;
+ }
+
+ /* write to virtual DPCD */
+ if (dpcd && dpcd->data_valid) {
+ for (t = 0; t <= len; t++) {
+ int p = addr + t;
+
+ dpcd->data[p] = buf[t];
+ /* check for link training */
+ if (p == DPCD_TRAINING_PATTERN_SET)
+ dp_aux_ch_ctl_link_training(dpcd,
+ buf[t]);
+ }
+ }
+
+ /* ACK the write */
+ vgpu_vreg(vgpu, offset + 4) = 0;
+ dp_aux_ch_ctl_trans_done(vgpu, data, offset, 1,
+ dpcd && dpcd->data_valid);
+ return 0;
+ }
+
+ if (op == GVT_AUX_NATIVE_READ) {
+ int idx, i, ret = 0;
+
+ if ((addr + len + 1) >= DPCD_SIZE) {
+ /*
+ * read request exceeds what we supported
+ * DPCD spec: A Sink Device receiving a Native AUX CH
+ * read request for an unsupported DPCD address must
+ * reply with an AUX ACK and read data set equal to
+ * zero instead of replying with AUX NACK.
+ */
+
+ /* ACK the READ*/
+ vgpu_vreg(vgpu, offset + 4) = 0;
+ vgpu_vreg(vgpu, offset + 8) = 0;
+ vgpu_vreg(vgpu, offset + 12) = 0;
+ vgpu_vreg(vgpu, offset + 16) = 0;
+ vgpu_vreg(vgpu, offset + 20) = 0;
+
+ dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
+ true);
+ return 0;
+ }
+
+ for (idx = 1; idx <= 5; idx++) {
+ /* clear the data registers */
+ vgpu_vreg(vgpu, offset + 4 * idx) = 0;
+ }
+
+ /*
+ * Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
+ */
+ if (WARN_ON((len + 2) > AUX_BURST_SIZE))
+ return -EINVAL;
+
+ /* read from virtual DPCD to vreg */
+ /* first 4 bytes: [ACK][addr][addr+1][addr+2] */
+ if (dpcd && dpcd->data_valid) {
+ for (i = 1; i <= (len + 1); i++) {
+ int t;
+
+ t = dpcd->data[addr + i - 1];
+ t <<= (24 - 8 * (i % 4));
+ ret |= t;
+
+ if ((i % 4 == 3) || (i == (len + 1))) {
+ vgpu_vreg(vgpu, offset +
+ (i / 4 + 1) * 4) = ret;
+ ret = 0;
+ }
+ }
+ }
+ dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
+ dpcd && dpcd->data_valid);
+ return 0;
+ }
+
+ /* i2c transaction starts */
+ intel_gvt_i2c_handle_aux_ch_write(vgpu, port_index, offset, p_data);
+
+ if (data & DP_AUX_CH_CTL_INTERRUPT)
+ trigger_aux_channel_interrupt(vgpu, offset);
+ return 0;
+}
+
+static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ bool vga_disable;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE;
+
+ gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id,
+ vga_disable ? "Disable" : "Enable");
+ return 0;
+}
+
+static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu,
+ unsigned int sbi_offset)
+{
+ struct intel_vgpu_display *display = &vgpu->display;
+ int num = display->sbi.number;
+ int i;
+
+ for (i = 0; i < num; ++i)
+ if (display->sbi.registers[i].offset == sbi_offset)
+ break;
+
+ if (i == num)
+ return 0;
+
+ return display->sbi.registers[i].value;
+}
+
+static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
+ unsigned int offset, u32 value)
+{
+ struct intel_vgpu_display *display = &vgpu->display;
+ int num = display->sbi.number;
+ int i;
+
+ for (i = 0; i < num; ++i) {
+ if (display->sbi.registers[i].offset == offset)
+ break;
+ }
+
+ if (i == num) {
+ if (num == SBI_REG_MAX) {
+ gvt_err("vgpu%d: SBI caching meets maximum limits\n",
+ vgpu->id);
+ return;
+ }
+ display->sbi.number++;
+ }
+
+ display->sbi.registers[i].offset = offset;
+ display->sbi.registers[i].value = value;
+}
+
+static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
+ SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
+ unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
+ SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
+ vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
+ sbi_offset);
+ }
+ read_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
+
+static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
+ data |= SBI_READY;
+
+ data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
+ data |= SBI_RESPONSE_SUCCESS;
+
+ vgpu_vreg(vgpu, offset) = data;
+
+ if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
+ SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
+ unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
+ SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
+
+ write_virtual_sbi_register(vgpu, sbi_offset,
+ vgpu_vreg(vgpu, SBI_DATA));
+ }
+ return 0;
+}
+
+#define _vgtif_reg(x) \
+ (VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))
+
+static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ bool invalid_read = false;
+
+ read_vreg(vgpu, offset, p_data, bytes);
+
+ switch (offset) {
+ case _vgtif_reg(magic) ... _vgtif_reg(vgt_id):
+ if (offset + bytes > _vgtif_reg(vgt_id) + 4)
+ invalid_read = true;
+ break;
+ case _vgtif_reg(avail_rs.mappable_gmadr.base) ...
+ _vgtif_reg(avail_rs.fence_num):
+ if (offset + bytes >
+ _vgtif_reg(avail_rs.fence_num) + 4)
+ invalid_read = true;
+ break;
+ case 0x78010: /* vgt_caps */
+ case 0x7881c:
+ break;
+ default:
+ invalid_read = true;
+ break;
+ }
+ if (invalid_read)
+ gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
+ offset, bytes, *(u32 *)p_data);
+ return 0;
+}
+
+static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
+{
+ int ret = 0;
+
+ switch (notification) {
+ case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
+ ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 3);
+ break;
+ case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
+ ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 3);
+ break;
+ case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
+ ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 4);
+ break;
+ case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
+ ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 4);
+ break;
+ case VGT_G2V_EXECLIST_CONTEXT_CREATE:
+ case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
+ case 1: /* Remove this in guest driver. */
+ break;
+ default:
+ gvt_err("Invalid PV notification %d\n", notification);
+ }
+ return ret;
+}
+
+static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
+ char *env[3] = {NULL, NULL, NULL};
+ char vmid_str[20];
+ char display_ready_str[20];
+
+ snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d\n", ready);
+ env[0] = display_ready_str;
+
+ snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
+ env[1] = vmid_str;
+
+ return kobject_uevent_env(kobj, KOBJ_ADD, env);
+}
+
+static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data;
+ int ret;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ data = vgpu_vreg(vgpu, offset);
+
+ switch (offset) {
+ case _vgtif_reg(display_ready):
+ send_display_ready_uevent(vgpu, data ? 1 : 0);
+ break;
+ case _vgtif_reg(g2v_notify):
+ ret = handle_g2v_notification(vgpu, data);
+ break;
+ /* add xhot and yhot to handled list to avoid error log */
+ case 0x78830:
+ case 0x78834:
+ case _vgtif_reg(pdp[0].lo):
+ case _vgtif_reg(pdp[0].hi):
+ case _vgtif_reg(pdp[1].lo):
+ case _vgtif_reg(pdp[1].hi):
+ case _vgtif_reg(pdp[2].lo):
+ case _vgtif_reg(pdp[2].hi):
+ case _vgtif_reg(pdp[3].lo):
+ case _vgtif_reg(pdp[3].hi):
+ case _vgtif_reg(execlist_context_descriptor_lo):
+ case _vgtif_reg(execlist_context_descriptor_hi):
+ break;
+ default:
+ gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
+ offset, bytes, data);
+ break;
+ }
+ return 0;
+}
+
+static int pf_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 val = *(u32 *)p_data;
+
+ if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
+ offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
+ offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
+ WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n",
+ vgpu->id);
+ return 0;
+ }
+
+ return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
+}
+
+static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_ENABLE_REQUEST)
+ vgpu_vreg(vgpu, offset) |= HSW_PWR_WELL_STATE_ENABLED;
+ else
+ vgpu_vreg(vgpu, offset) &= ~HSW_PWR_WELL_STATE_ENABLED;
+ return 0;
+}
+
+static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM)
+ vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM;
+ return 0;
+}
+
+static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 mode = *(u32 *)p_data;
+
+ if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
+ WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n",
+ vgpu->id);
+ return 0;
+ }
+
+ return 0;
+}
+
+static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ u32 trtte = *(u32 *)p_data;
+
+ if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
+ WARN(1, "VM(%d): Use physical address for TRTT!\n",
+ vgpu->id);
+ return -EINVAL;
+ }
+ write_vreg(vgpu, offset, p_data, bytes);
+ /* TRTTE is not per-context */
+ I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
+
+ return 0;
+}
+
+static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ u32 val = *(u32 *)p_data;
+
+ if (val & 1) {
+ /* unblock hw logic */
+ I915_WRITE(_MMIO(offset), val);
+ }
+ write_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
+
+static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 v = 0;
+
+ if (vgpu_vreg(vgpu, 0x46010) & (1 << 31))
+ v |= (1 << 0);
+
+ if (vgpu_vreg(vgpu, 0x46014) & (1 << 31))
+ v |= (1 << 8);
+
+ if (vgpu_vreg(vgpu, 0x46040) & (1 << 31))
+ v |= (1 << 16);
+
+ if (vgpu_vreg(vgpu, 0x46060) & (1 << 31))
+ v |= (1 << 24);
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+}
+
+static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 value = *(u32 *)p_data;
+ u32 cmd = value & 0xff;
+ u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
+
+ switch (cmd) {
+ case 0x6:
+ /**
+ * "Read memory latency" command on gen9.
+ * Below memory latency values are read
+ * from skylake platform.
+ */
+ if (!*data0)
+ *data0 = 0x1e1a1100;
+ else
+ *data0 = 0x61514b3d;
+ break;
+ case 0x5:
+ *data0 |= 0x1;
+ break;
+ }
+
+ gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
+ vgpu->id, value, *data0);
+
+ value &= ~(1 << 31);
+ return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
+}
+
+static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ v &= (1 << 31) | (1 << 29) | (1 << 9) |
+ (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
+ v |= (v >> 1);
+
+ return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
+}
+
+static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ i915_reg_t reg = {.reg = offset};
+
+ switch (offset) {
+ case 0x4ddc:
+ vgpu_vreg(vgpu, offset) = 0x8000003c;
+ break;
+ case 0x42080:
+ vgpu_vreg(vgpu, offset) = 0x8000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /**
+ * TODO: need detect stepping info after gvt contain such information
+ * 0x4ddc enabled after C0, 0x42080 enabled after E0.
+ */
+ I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+ return 0;
+}
+
+static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 v = *(u32 *)p_data;
+
+ /* other bits are MBZ. */
+ v &= (1 << 31) | (1 << 30);
+ v & (1 << 31) ? (v |= (1 << 30)) : (v &= ~(1 << 30));
+
+ vgpu_vreg(vgpu, offset) = v;
+
+ return 0;
+}
+
+static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
+ return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+}
+
+static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+ struct intel_vgpu_execlist *execlist;
+ u32 data = *(u32 *)p_data;
+ int ret;
+
+ if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
+ return -EINVAL;
+
+ execlist = &vgpu->execlist[ring_id];
+
+ execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
+ if (execlist->elsp_dwords.index == 3)
+ ret = intel_vgpu_submit_execlist(vgpu, ring_id);
+
+ ++execlist->elsp_dwords.index;
+ execlist->elsp_dwords.index &= 0x3;
+ return 0;
+}
+
+static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 data = *(u32 *)p_data;
+ int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+ bool enable_execlist;
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
+ || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
+ enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
+
+ gvt_dbg_core("EXECLIST %s on ring %d\n",
+ (enable_execlist ? "enabling" : "disabling"),
+ ring_id);
+
+ if (enable_execlist)
+ intel_vgpu_start_schedule(vgpu);
+ }
+ return 0;
+}
+
+static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ int rc = 0;
+ unsigned int id = 0;
+
+ switch (offset) {
+ case 0x4260:
+ id = RCS;
+ break;
+ case 0x4264:
+ id = VCS;
+ break;
+ case 0x4268:
+ id = VCS2;
+ break;
+ case 0x426c:
+ id = BCS;
+ break;
+ case 0x4270:
+ id = VECS;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ set_bit(id, (void *)vgpu->tlb_handle_pending);
+
+ return rc;
+}
+
+#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
+ ret = new_mmio_info(gvt, INTEL_GVT_MMIO_OFFSET(reg), \
+ f, s, am, rm, d, r, w); \
+ if (ret) \
+ return ret; \
+} while (0)
+
+#define MMIO_D(reg, d) \
+ MMIO_F(reg, 4, 0, 0, 0, d, NULL, NULL)
+
+#define MMIO_DH(reg, d, r, w) \
+ MMIO_F(reg, 4, 0, 0, 0, d, r, w)
+
+#define MMIO_DFH(reg, d, f, r, w) \
+ MMIO_F(reg, 4, f, 0, 0, d, r, w)
+
+#define MMIO_GM(reg, d, r, w) \
+ MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
+
+#define MMIO_RO(reg, d, f, rm, r, w) \
+ MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
+
+#define MMIO_RING_F(prefix, s, f, am, rm, d, r, w) do { \
+ MMIO_F(prefix(RENDER_RING_BASE), s, f, am, rm, d, r, w); \
+ MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
+ MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
+ MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
+} while (0)
+
+#define MMIO_RING_D(prefix, d) \
+ MMIO_RING_F(prefix, 4, 0, 0, 0, d, NULL, NULL)
+
+#define MMIO_RING_DFH(prefix, d, f, r, w) \
+ MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
+
+#define MMIO_RING_GM(prefix, d, r, w) \
+ MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
+
+#define MMIO_RING_RO(prefix, d, f, rm, r, w) \
+ MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
+
+static int init_generic_mmio_info(struct intel_gvt *gvt)
+{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
+
+ MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(SDEISR, D_ALL);
+
+ MMIO_RING_D(RING_HWSTAM, D_ALL);
+
+ MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+
+#define RING_REG(base) (base + 0x28)
+ MMIO_RING_D(RING_REG, D_ALL);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x134)
+ MMIO_RING_D(RING_REG, D_ALL);
+#undef RING_REG
+
+ MMIO_GM(0x2148, D_ALL, NULL, NULL);
+ MMIO_GM(CCID, D_ALL, NULL, NULL);
+ MMIO_GM(0x12198, D_ALL, NULL, NULL);
+ MMIO_D(GEN7_CXT_SIZE, D_ALL);
+
+ MMIO_RING_D(RING_TAIL, D_ALL);
+ MMIO_RING_D(RING_HEAD, D_ALL);
+ MMIO_RING_D(RING_CTL, D_ALL);
+ MMIO_RING_D(RING_ACTHD, D_ALL);
+ MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
+
+ /* RING MODE */
+#define RING_REG(base) (base + 0x29c)
+ MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write);
+#undef RING_REG
+
+ MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
+ ring_timestamp_mmio_read, NULL);
+ MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
+ ring_timestamp_mmio_read, NULL);
+
+ MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK, NULL, NULL);
+
+ MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_D(GAM_ECOCHK, D_ALL);
+ MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_D(0x9030, D_ALL);
+ MMIO_D(0x20a0, D_ALL);
+ MMIO_D(0x2420, D_ALL);
+ MMIO_D(0x2430, D_ALL);
+ MMIO_D(0x2434, D_ALL);
+ MMIO_D(0x2438, D_ALL);
+ MMIO_D(0x243c, D_ALL);
+ MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0xe184, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL);
+
+ /* display */
+ MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_D(0x602a0, D_ALL);
+
+ MMIO_D(0x65050, D_ALL);
+ MMIO_D(0x650b4, D_ALL);
+
+ MMIO_D(0xc4040, D_ALL);
+ MMIO_D(DERRMR, D_ALL);
+
+ MMIO_D(PIPEDSL(PIPE_A), D_ALL);
+ MMIO_D(PIPEDSL(PIPE_B), D_ALL);
+ MMIO_D(PIPEDSL(PIPE_C), D_ALL);
+ MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL);
+
+ MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
+
+ MMIO_D(PIPESTAT(PIPE_A), D_ALL);
+ MMIO_D(PIPESTAT(PIPE_B), D_ALL);
+ MMIO_D(PIPESTAT(PIPE_C), D_ALL);
+ MMIO_D(PIPESTAT(_PIPE_EDP), D_ALL);
+
+ MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A), D_ALL);
+ MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B), D_ALL);
+ MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C), D_ALL);
+ MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP), D_ALL);
+
+ MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A), D_ALL);
+ MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B), D_ALL);
+ MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C), D_ALL);
+ MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP), D_ALL);
+
+ MMIO_D(CURCNTR(PIPE_A), D_ALL);
+ MMIO_D(CURCNTR(PIPE_B), D_ALL);
+ MMIO_D(CURCNTR(PIPE_C), D_ALL);
+
+ MMIO_D(CURPOS(PIPE_A), D_ALL);
+ MMIO_D(CURPOS(PIPE_B), D_ALL);
+ MMIO_D(CURPOS(PIPE_C), D_ALL);
+
+ MMIO_D(CURBASE(PIPE_A), D_ALL);
+ MMIO_D(CURBASE(PIPE_B), D_ALL);
+ MMIO_D(CURBASE(PIPE_C), D_ALL);
+
+ MMIO_D(0x700ac, D_ALL);
+ MMIO_D(0x710ac, D_ALL);
+ MMIO_D(0x720ac, D_ALL);
+
+ MMIO_D(0x70090, D_ALL);
+ MMIO_D(0x70094, D_ALL);
+ MMIO_D(0x70098, D_ALL);
+ MMIO_D(0x7009c, D_ALL);
+
+ MMIO_D(DSPCNTR(PIPE_A), D_ALL);
+ MMIO_D(DSPADDR(PIPE_A), D_ALL);
+ MMIO_D(DSPSTRIDE(PIPE_A), D_ALL);
+ MMIO_D(DSPPOS(PIPE_A), D_ALL);
+ MMIO_D(DSPSIZE(PIPE_A), D_ALL);
+ MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
+ MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
+
+ MMIO_D(DSPCNTR(PIPE_B), D_ALL);
+ MMIO_D(DSPADDR(PIPE_B), D_ALL);
+ MMIO_D(DSPSTRIDE(PIPE_B), D_ALL);
+ MMIO_D(DSPPOS(PIPE_B), D_ALL);
+ MMIO_D(DSPSIZE(PIPE_B), D_ALL);
+ MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
+ MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
+
+ MMIO_D(DSPCNTR(PIPE_C), D_ALL);
+ MMIO_D(DSPADDR(PIPE_C), D_ALL);
+ MMIO_D(DSPSTRIDE(PIPE_C), D_ALL);
+ MMIO_D(DSPPOS(PIPE_C), D_ALL);
+ MMIO_D(DSPSIZE(PIPE_C), D_ALL);
+ MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
+ MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
+
+ MMIO_D(SPRCTL(PIPE_A), D_ALL);
+ MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
+ MMIO_D(SPRSTRIDE(PIPE_A), D_ALL);
+ MMIO_D(SPRPOS(PIPE_A), D_ALL);
+ MMIO_D(SPRSIZE(PIPE_A), D_ALL);
+ MMIO_D(SPRKEYVAL(PIPE_A), D_ALL);
+ MMIO_D(SPRKEYMSK(PIPE_A), D_ALL);
+ MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
+ MMIO_D(SPRKEYMAX(PIPE_A), D_ALL);
+ MMIO_D(SPROFFSET(PIPE_A), D_ALL);
+ MMIO_D(SPRSCALE(PIPE_A), D_ALL);
+ MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
+
+ MMIO_D(SPRCTL(PIPE_B), D_ALL);
+ MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
+ MMIO_D(SPRSTRIDE(PIPE_B), D_ALL);
+ MMIO_D(SPRPOS(PIPE_B), D_ALL);
+ MMIO_D(SPRSIZE(PIPE_B), D_ALL);
+ MMIO_D(SPRKEYVAL(PIPE_B), D_ALL);
+ MMIO_D(SPRKEYMSK(PIPE_B), D_ALL);
+ MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
+ MMIO_D(SPRKEYMAX(PIPE_B), D_ALL);
+ MMIO_D(SPROFFSET(PIPE_B), D_ALL);
+ MMIO_D(SPRSCALE(PIPE_B), D_ALL);
+ MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
+
+ MMIO_D(SPRCTL(PIPE_C), D_ALL);
+ MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
+ MMIO_D(SPRSTRIDE(PIPE_C), D_ALL);
+ MMIO_D(SPRPOS(PIPE_C), D_ALL);
+ MMIO_D(SPRSIZE(PIPE_C), D_ALL);
+ MMIO_D(SPRKEYVAL(PIPE_C), D_ALL);
+ MMIO_D(SPRKEYMSK(PIPE_C), D_ALL);
+ MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
+ MMIO_D(SPRKEYMAX(PIPE_C), D_ALL);
+ MMIO_D(SPROFFSET(PIPE_C), D_ALL);
+ MMIO_D(SPRSCALE(PIPE_C), D_ALL);
+ MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
+
+ MMIO_F(LGC_PALETTE(PIPE_A, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(LGC_PALETTE(PIPE_B, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(LGC_PALETTE(PIPE_C, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
+ MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
+ MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
+ MMIO_D(VTOTAL(TRANSCODER_A), D_ALL);
+ MMIO_D(VBLANK(TRANSCODER_A), D_ALL);
+ MMIO_D(VSYNC(TRANSCODER_A), D_ALL);
+ MMIO_D(BCLRPAT(TRANSCODER_A), D_ALL);
+ MMIO_D(VSYNCSHIFT(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPESRC(TRANSCODER_A), D_ALL);
+
+ MMIO_D(HTOTAL(TRANSCODER_B), D_ALL);
+ MMIO_D(HBLANK(TRANSCODER_B), D_ALL);
+ MMIO_D(HSYNC(TRANSCODER_B), D_ALL);
+ MMIO_D(VTOTAL(TRANSCODER_B), D_ALL);
+ MMIO_D(VBLANK(TRANSCODER_B), D_ALL);
+ MMIO_D(VSYNC(TRANSCODER_B), D_ALL);
+ MMIO_D(BCLRPAT(TRANSCODER_B), D_ALL);
+ MMIO_D(VSYNCSHIFT(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPESRC(TRANSCODER_B), D_ALL);
+
+ MMIO_D(HTOTAL(TRANSCODER_C), D_ALL);
+ MMIO_D(HBLANK(TRANSCODER_C), D_ALL);
+ MMIO_D(HSYNC(TRANSCODER_C), D_ALL);
+ MMIO_D(VTOTAL(TRANSCODER_C), D_ALL);
+ MMIO_D(VBLANK(TRANSCODER_C), D_ALL);
+ MMIO_D(VSYNC(TRANSCODER_C), D_ALL);
+ MMIO_D(BCLRPAT(TRANSCODER_C), D_ALL);
+ MMIO_D(VSYNCSHIFT(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPESRC(TRANSCODER_C), D_ALL);
+
+ MMIO_D(HTOTAL(TRANSCODER_EDP), D_ALL);
+ MMIO_D(HBLANK(TRANSCODER_EDP), D_ALL);
+ MMIO_D(HSYNC(TRANSCODER_EDP), D_ALL);
+ MMIO_D(VTOTAL(TRANSCODER_EDP), D_ALL);
+ MMIO_D(VBLANK(TRANSCODER_EDP), D_ALL);
+ MMIO_D(VSYNC(TRANSCODER_EDP), D_ALL);
+ MMIO_D(BCLRPAT(TRANSCODER_EDP), D_ALL);
+ MMIO_D(VSYNCSHIFT(TRANSCODER_EDP), D_ALL);
+
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_A), D_ALL);
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_A), D_ALL);
+
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_B), D_ALL);
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_B), D_ALL);
+
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_C), D_ALL);
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_C), D_ALL);
+
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP), D_ALL);
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP), D_ALL);
+
+ MMIO_D(PF_CTL(PIPE_A), D_ALL);
+ MMIO_D(PF_WIN_SZ(PIPE_A), D_ALL);
+ MMIO_D(PF_WIN_POS(PIPE_A), D_ALL);
+ MMIO_D(PF_VSCALE(PIPE_A), D_ALL);
+ MMIO_D(PF_HSCALE(PIPE_A), D_ALL);
+
+ MMIO_D(PF_CTL(PIPE_B), D_ALL);
+ MMIO_D(PF_WIN_SZ(PIPE_B), D_ALL);
+ MMIO_D(PF_WIN_POS(PIPE_B), D_ALL);
+ MMIO_D(PF_VSCALE(PIPE_B), D_ALL);
+ MMIO_D(PF_HSCALE(PIPE_B), D_ALL);
+
+ MMIO_D(PF_CTL(PIPE_C), D_ALL);
+ MMIO_D(PF_WIN_SZ(PIPE_C), D_ALL);
+ MMIO_D(PF_WIN_POS(PIPE_C), D_ALL);
+ MMIO_D(PF_VSCALE(PIPE_C), D_ALL);
+ MMIO_D(PF_HSCALE(PIPE_C), D_ALL);
+
+ MMIO_D(WM0_PIPEA_ILK, D_ALL);
+ MMIO_D(WM0_PIPEB_ILK, D_ALL);
+ MMIO_D(WM0_PIPEC_IVB, D_ALL);
+ MMIO_D(WM1_LP_ILK, D_ALL);
+ MMIO_D(WM2_LP_ILK, D_ALL);
+ MMIO_D(WM3_LP_ILK, D_ALL);
+ MMIO_D(WM1S_LP_ILK, D_ALL);
+ MMIO_D(WM2S_LP_IVB, D_ALL);
+ MMIO_D(WM3S_LP_IVB, D_ALL);
+
+ MMIO_D(BLC_PWM_CPU_CTL2, D_ALL);
+ MMIO_D(BLC_PWM_CPU_CTL, D_ALL);
+ MMIO_D(BLC_PWM_PCH_CTL1, D_ALL);
+ MMIO_D(BLC_PWM_PCH_CTL2, D_ALL);
+
+ MMIO_D(0x48268, D_ALL);
+
+ MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
+ gmbus_mmio_write);
+ MMIO_F(PCH_GPIOA, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0xe4f00, 0x28, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_F(_PCH_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_PCH_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ dp_aux_ch_ctl_mmio_write);
+
+ MMIO_RO(PCH_ADPA, D_ALL, 0, ADPA_CRT_HOTPLUG_MONITOR_MASK, NULL, pch_adpa_mmio_write);
+
+ MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write);
+ MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write);
+
+ MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
+ MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
+ MMIO_DH(FDI_RX_IIR(PIPE_C), D_ALL, NULL, fdi_rx_iir_mmio_write);
+ MMIO_DH(FDI_RX_IMR(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_IMR(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_IMR(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
+ MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
+
+ MMIO_D(_PCH_TRANS_HTOTAL_A, D_ALL);
+ MMIO_D(_PCH_TRANS_HBLANK_A, D_ALL);
+ MMIO_D(_PCH_TRANS_HSYNC_A, D_ALL);
+ MMIO_D(_PCH_TRANS_VTOTAL_A, D_ALL);
+ MMIO_D(_PCH_TRANS_VBLANK_A, D_ALL);
+ MMIO_D(_PCH_TRANS_VSYNC_A, D_ALL);
+ MMIO_D(_PCH_TRANS_VSYNCSHIFT_A, D_ALL);
+
+ MMIO_D(_PCH_TRANS_HTOTAL_B, D_ALL);
+ MMIO_D(_PCH_TRANS_HBLANK_B, D_ALL);
+ MMIO_D(_PCH_TRANS_HSYNC_B, D_ALL);
+ MMIO_D(_PCH_TRANS_VTOTAL_B, D_ALL);
+ MMIO_D(_PCH_TRANS_VBLANK_B, D_ALL);
+ MMIO_D(_PCH_TRANS_VSYNC_B, D_ALL);
+ MMIO_D(_PCH_TRANS_VSYNCSHIFT_B, D_ALL);
+
+ MMIO_D(_PCH_TRANSA_DATA_M1, D_ALL);
+ MMIO_D(_PCH_TRANSA_DATA_N1, D_ALL);
+ MMIO_D(_PCH_TRANSA_DATA_M2, D_ALL);
+ MMIO_D(_PCH_TRANSA_DATA_N2, D_ALL);
+ MMIO_D(_PCH_TRANSA_LINK_M1, D_ALL);
+ MMIO_D(_PCH_TRANSA_LINK_N1, D_ALL);
+ MMIO_D(_PCH_TRANSA_LINK_M2, D_ALL);
+ MMIO_D(_PCH_TRANSA_LINK_N2, D_ALL);
+
+ MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL);
+ MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL);
+ MMIO_D(TRANS_DP_CTL(PIPE_C), D_ALL);
+
+ MMIO_D(TVIDEO_DIP_CTL(PIPE_A), D_ALL);
+ MMIO_D(TVIDEO_DIP_DATA(PIPE_A), D_ALL);
+ MMIO_D(TVIDEO_DIP_GCP(PIPE_A), D_ALL);
+
+ MMIO_D(TVIDEO_DIP_CTL(PIPE_B), D_ALL);
+ MMIO_D(TVIDEO_DIP_DATA(PIPE_B), D_ALL);
+ MMIO_D(TVIDEO_DIP_GCP(PIPE_B), D_ALL);
+
+ MMIO_D(TVIDEO_DIP_CTL(PIPE_C), D_ALL);
+ MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL);
+ MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL);
+
+ MMIO_D(_FDI_RXA_MISC, D_ALL);
+ MMIO_D(_FDI_RXB_MISC, D_ALL);
+ MMIO_D(_FDI_RXA_TUSIZE1, D_ALL);
+ MMIO_D(_FDI_RXA_TUSIZE2, D_ALL);
+ MMIO_D(_FDI_RXB_TUSIZE1, D_ALL);
+ MMIO_D(_FDI_RXB_TUSIZE2, D_ALL);
+
+ MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
+ MMIO_D(PCH_PP_DIVISOR, D_ALL);
+ MMIO_D(PCH_PP_STATUS, D_ALL);
+ MMIO_D(PCH_LVDS, D_ALL);
+ MMIO_D(_PCH_DPLL_A, D_ALL);
+ MMIO_D(_PCH_DPLL_B, D_ALL);
+ MMIO_D(_PCH_FPA0, D_ALL);
+ MMIO_D(_PCH_FPA1, D_ALL);
+ MMIO_D(_PCH_FPB0, D_ALL);
+ MMIO_D(_PCH_FPB1, D_ALL);
+ MMIO_D(PCH_DREF_CONTROL, D_ALL);
+ MMIO_D(PCH_RAWCLK_FREQ, D_ALL);
+ MMIO_D(PCH_DPLL_SEL, D_ALL);
+
+ MMIO_D(0x61208, D_ALL);
+ MMIO_D(0x6120c, D_ALL);
+ MMIO_D(PCH_PP_ON_DELAYS, D_ALL);
+ MMIO_D(PCH_PP_OFF_DELAYS, D_ALL);
+
+ MMIO_DH(0xe651c, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read_2, NULL);
+ MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read_3, NULL);
+
+ MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
+ PORTA_HOTPLUG_STATUS_MASK
+ | PORTB_HOTPLUG_STATUS_MASK
+ | PORTC_HOTPLUG_STATUS_MASK
+ | PORTD_HOTPLUG_STATUS_MASK,
+ NULL, NULL);
+
+ MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
+ MMIO_D(FUSE_STRAP, D_ALL);
+ MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL, D_ALL);
+
+ MMIO_D(DISP_ARB_CTL, D_ALL);
+ MMIO_D(DISP_ARB_CTL2, D_ALL);
+
+ MMIO_D(ILK_DISPLAY_CHICKEN1, D_ALL);
+ MMIO_D(ILK_DISPLAY_CHICKEN2, D_ALL);
+ MMIO_D(ILK_DSPCLK_GATE_D, D_ALL);
+
+ MMIO_D(SOUTH_CHICKEN1, D_ALL);
+ MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
+ MMIO_D(_TRANSA_CHICKEN1, D_ALL);
+ MMIO_D(_TRANSB_CHICKEN1, D_ALL);
+ MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL);
+ MMIO_D(_TRANSA_CHICKEN2, D_ALL);
+ MMIO_D(_TRANSB_CHICKEN2, D_ALL);
+
+ MMIO_D(ILK_DPFC_CB_BASE, D_ALL);
+ MMIO_D(ILK_DPFC_CONTROL, D_ALL);
+ MMIO_D(ILK_DPFC_RECOMP_CTL, D_ALL);
+ MMIO_D(ILK_DPFC_STATUS, D_ALL);
+ MMIO_D(ILK_DPFC_FENCE_YOFF, D_ALL);
+ MMIO_D(ILK_DPFC_CHICKEN, D_ALL);
+ MMIO_D(ILK_FBC_RT_BASE, D_ALL);
+
+ MMIO_D(IPS_CTL, D_ALL);
+
+ MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_MODE(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A), D_ALL);
+
+ MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_MODE(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B), D_ALL);
+
+ MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_MODE(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C), D_ALL);
+ MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C), D_ALL);
+
+ MMIO_D(PREC_PAL_INDEX(PIPE_A), D_ALL);
+ MMIO_D(PREC_PAL_DATA(PIPE_A), D_ALL);
+ MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(PREC_PAL_INDEX(PIPE_B), D_ALL);
+ MMIO_D(PREC_PAL_DATA(PIPE_B), D_ALL);
+ MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(PREC_PAL_INDEX(PIPE_C), D_ALL);
+ MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL);
+ MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(0x60110, D_ALL);
+ MMIO_D(0x61110, D_ALL);
+ MMIO_F(0x70400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x71400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x72400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x70440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x71440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x72440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x7044c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x7144c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(0x7244c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+
+ MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL);
+ MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL);
+ MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL);
+ MMIO_D(SPLL_CTL, D_ALL);
+ MMIO_D(_WRPLL_CTL1, D_ALL);
+ MMIO_D(_WRPLL_CTL2, D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_D), D_ALL);
+ MMIO_D(PORT_CLK_SEL(PORT_E), D_ALL);
+ MMIO_D(TRANS_CLK_SEL(TRANSCODER_A), D_ALL);
+ MMIO_D(TRANS_CLK_SEL(TRANSCODER_B), D_ALL);
+ MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL);
+
+ MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL);
+ MMIO_D(0x46508, D_ALL);
+
+ MMIO_D(0x49080, D_ALL);
+ MMIO_D(0x49180, D_ALL);
+ MMIO_D(0x49280, D_ALL);
+
+ MMIO_F(0x49090, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x49190, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x49290, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(GAMMA_MODE(PIPE_A), D_ALL);
+ MMIO_D(GAMMA_MODE(PIPE_B), D_ALL);
+ MMIO_D(GAMMA_MODE(PIPE_C), D_ALL);
+
+ MMIO_D(PIPE_MULT(PIPE_A), D_ALL);
+ MMIO_D(PIPE_MULT(PIPE_B), D_ALL);
+ MMIO_D(PIPE_MULT(PIPE_C), D_ALL);
+
+ MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A), D_ALL);
+ MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B), D_ALL);
+ MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C), D_ALL);
+
+ MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
+ MMIO_D(SBI_ADDR, D_ALL);
+ MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
+ MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
+ MMIO_D(PIXCLK_GATE, D_ALL);
+
+ MMIO_F(_DPA_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_ALL, NULL,
+ dp_aux_ch_ctl_mmio_write);
+
+ MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+ MMIO_DH(DDI_BUF_CTL(PORT_B), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+ MMIO_DH(DDI_BUF_CTL(PORT_C), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+ MMIO_DH(DDI_BUF_CTL(PORT_D), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+ MMIO_DH(DDI_BUF_CTL(PORT_E), D_ALL, NULL, ddi_buf_ctl_mmio_write);
+
+ MMIO_DH(DP_TP_CTL(PORT_A), D_ALL, NULL, dp_tp_ctl_mmio_write);
+ MMIO_DH(DP_TP_CTL(PORT_B), D_ALL, NULL, dp_tp_ctl_mmio_write);
+ MMIO_DH(DP_TP_CTL(PORT_C), D_ALL, NULL, dp_tp_ctl_mmio_write);
+ MMIO_DH(DP_TP_CTL(PORT_D), D_ALL, NULL, dp_tp_ctl_mmio_write);
+ MMIO_DH(DP_TP_CTL(PORT_E), D_ALL, NULL, dp_tp_ctl_mmio_write);
+
+ MMIO_DH(DP_TP_STATUS(PORT_A), D_ALL, NULL, dp_tp_status_mmio_write);
+ MMIO_DH(DP_TP_STATUS(PORT_B), D_ALL, NULL, dp_tp_status_mmio_write);
+ MMIO_DH(DP_TP_STATUS(PORT_C), D_ALL, NULL, dp_tp_status_mmio_write);
+ MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
+ MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
+
+ MMIO_F(_DDI_BUF_TRANS_A, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x64e60, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x64eC0, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x64f20, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x64f80, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
+ MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
+
+ MMIO_DH(_TRANS_DDI_FUNC_CTL_A, D_ALL, NULL, NULL);
+ MMIO_DH(_TRANS_DDI_FUNC_CTL_B, D_ALL, NULL, NULL);
+ MMIO_DH(_TRANS_DDI_FUNC_CTL_C, D_ALL, NULL, NULL);
+ MMIO_DH(_TRANS_DDI_FUNC_CTL_EDP, D_ALL, NULL, NULL);
+
+ MMIO_D(_TRANSA_MSA_MISC, D_ALL);
+ MMIO_D(_TRANSB_MSA_MISC, D_ALL);
+ MMIO_D(_TRANSC_MSA_MISC, D_ALL);
+ MMIO_D(_TRANS_EDP_MSA_MISC, D_ALL);
+
+ MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
+ MMIO_D(FORCEWAKE_ACK, D_ALL);
+ MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
+ MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
+ MMIO_D(GTFIFODBG, D_ALL);
+ MMIO_D(GTFIFOCTL, D_ALL);
+ MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
+ MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
+ MMIO_D(ECOBUS, D_ALL);
+ MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
+ MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
+ MMIO_D(GEN6_RPNSWREQ, D_ALL);
+ MMIO_D(GEN6_RC_VIDEO_FREQ, D_ALL);
+ MMIO_D(GEN6_RP_DOWN_TIMEOUT, D_ALL);
+ MMIO_D(GEN6_RP_INTERRUPT_LIMITS, D_ALL);
+ MMIO_D(GEN6_RPSTAT1, D_ALL);
+ MMIO_D(GEN6_RP_CONTROL, D_ALL);
+ MMIO_D(GEN6_RP_UP_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RP_DOWN_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RP_CUR_UP_EI, D_ALL);
+ MMIO_D(GEN6_RP_CUR_UP, D_ALL);
+ MMIO_D(GEN6_RP_PREV_UP, D_ALL);
+ MMIO_D(GEN6_RP_CUR_DOWN_EI, D_ALL);
+ MMIO_D(GEN6_RP_CUR_DOWN, D_ALL);
+ MMIO_D(GEN6_RP_PREV_DOWN, D_ALL);
+ MMIO_D(GEN6_RP_UP_EI, D_ALL);
+ MMIO_D(GEN6_RP_DOWN_EI, D_ALL);
+ MMIO_D(GEN6_RP_IDLE_HYSTERSIS, D_ALL);
+ MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT, D_ALL);
+ MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT, D_ALL);
+ MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT, D_ALL);
+ MMIO_D(GEN6_RC_EVALUATION_INTERVAL, D_ALL);
+ MMIO_D(GEN6_RC_IDLE_HYSTERSIS, D_ALL);
+ MMIO_D(GEN6_RC_SLEEP, D_ALL);
+ MMIO_D(GEN6_RC1e_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RC6_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
+ MMIO_D(GEN6_PMINTRMSK, D_ALL);
+ MMIO_DH(HSW_PWR_WELL_BIOS, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_DRIVER, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_KVMR, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_DEBUG, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL5, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL6, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+
+ MMIO_D(RSTDBYCTL, D_ALL);
+
+ MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
+ MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
+ MMIO_F(VGT_PVINFO_PAGE, VGT_PVINFO_SIZE, F_UNALIGN, 0, 0, D_ALL, pvinfo_mmio_read, pvinfo_mmio_write);
+ MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
+
+ MMIO_F(MCHBAR_MIRROR_BASE_SNB, 0x40000, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(TILECTL, D_ALL);
+
+ MMIO_D(GEN6_UCGCTL1, D_ALL);
+ MMIO_D(GEN6_UCGCTL2, D_ALL);
+
+ MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL);
+ MMIO_D(GEN6_PCODE_DATA, D_ALL);
+ MMIO_D(0x13812c, D_ALL);
+ MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
+ MMIO_D(HSW_EDRAM_CAP, D_ALL);
+ MMIO_D(HSW_IDICR, D_ALL);
+ MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
+
+ MMIO_D(0x3c, D_ALL);
+ MMIO_D(0x860, D_ALL);
+ MMIO_D(ECOSKPD, D_ALL);
+ MMIO_D(0x121d0, D_ALL);
+ MMIO_D(GEN6_BLITTER_ECOSKPD, D_ALL);
+ MMIO_D(0x41d0, D_ALL);
+ MMIO_D(GAC_ECO_BITS, D_ALL);
+ MMIO_D(0x6200, D_ALL);
+ MMIO_D(0x6204, D_ALL);
+ MMIO_D(0x6208, D_ALL);
+ MMIO_D(0x7118, D_ALL);
+ MMIO_D(0x7180, D_ALL);
+ MMIO_D(0x7408, D_ALL);
+ MMIO_D(0x7c00, D_ALL);
+ MMIO_D(GEN6_MBCTL, D_ALL);
+ MMIO_D(0x911c, D_ALL);
+ MMIO_D(0x9120, D_ALL);
+
+ MMIO_D(GAB_CTL, D_ALL);
+ MMIO_D(0x48800, D_ALL);
+ MMIO_D(0xce044, D_ALL);
+ MMIO_D(0xe6500, D_ALL);
+ MMIO_D(0xe6504, D_ALL);
+ MMIO_D(0xe6600, D_ALL);
+ MMIO_D(0xe6604, D_ALL);
+ MMIO_D(0xe6700, D_ALL);
+ MMIO_D(0xe6704, D_ALL);
+ MMIO_D(0xe6800, D_ALL);
+ MMIO_D(0xe6804, D_ALL);
+ MMIO_D(PCH_GMBUS4, D_ALL);
+ MMIO_D(PCH_GMBUS5, D_ALL);
+
+ MMIO_D(0x902c, D_ALL);
+ MMIO_D(0xec008, D_ALL);
+ MMIO_D(0xec00c, D_ALL);
+ MMIO_D(0xec008 + 0x18, D_ALL);
+ MMIO_D(0xec00c + 0x18, D_ALL);
+ MMIO_D(0xec008 + 0x18 * 2, D_ALL);
+ MMIO_D(0xec00c + 0x18 * 2, D_ALL);
+ MMIO_D(0xec008 + 0x18 * 3, D_ALL);
+ MMIO_D(0xec00c + 0x18 * 3, D_ALL);
+ MMIO_D(0xec408, D_ALL);
+ MMIO_D(0xec40c, D_ALL);
+ MMIO_D(0xec408 + 0x18, D_ALL);
+ MMIO_D(0xec40c + 0x18, D_ALL);
+ MMIO_D(0xec408 + 0x18 * 2, D_ALL);
+ MMIO_D(0xec40c + 0x18 * 2, D_ALL);
+ MMIO_D(0xec408 + 0x18 * 3, D_ALL);
+ MMIO_D(0xec40c + 0x18 * 3, D_ALL);
+ MMIO_D(0xfc810, D_ALL);
+ MMIO_D(0xfc81c, D_ALL);
+ MMIO_D(0xfc828, D_ALL);
+ MMIO_D(0xfc834, D_ALL);
+ MMIO_D(0xfcc00, D_ALL);
+ MMIO_D(0xfcc0c, D_ALL);
+ MMIO_D(0xfcc18, D_ALL);
+ MMIO_D(0xfcc24, D_ALL);
+ MMIO_D(0xfd000, D_ALL);
+ MMIO_D(0xfd00c, D_ALL);
+ MMIO_D(0xfd018, D_ALL);
+ MMIO_D(0xfd024, D_ALL);
+ MMIO_D(0xfd034, D_ALL);
+
+ MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
+ MMIO_D(0x2054, D_ALL);
+ MMIO_D(0x12054, D_ALL);
+ MMIO_D(0x22054, D_ALL);
+ MMIO_D(0x1a054, D_ALL);
+
+ MMIO_D(0x44070, D_ALL);
+
+ MMIO_D(0x215c, D_HSW_PLUS);
+ MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL);
+ MMIO_D(OACONTROL, D_HSW);
+ MMIO_D(0x2b00, D_BDW_PLUS);
+ MMIO_D(0x2360, D_BDW_PLUS);
+ MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(BCS_SWCTRL, D_ALL);
+
+ MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(0x426c, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+
+ return 0;
+}
+
+static int init_broadwell_mmio_info(struct intel_gvt *gvt)
+{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_imr_handler);
+
+ MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_GT_ISR(0), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_GT_ISR(1), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_GT_ISR(2), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_GT_ISR(3), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_PIPE_IER(PIPE_A), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_PIPE_IER(PIPE_B), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_PIPE_IER(PIPE_C), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
+ intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C), D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_PORT_ISR, D_BDW_PLUS);
+
+ MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_DE_MISC_ISR, D_BDW_PLUS);
+
+ MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
+ MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
+ MMIO_D(GEN8_PCU_ISR, D_BDW_PLUS);
+
+ MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
+ intel_vgpu_reg_master_irq_handler);
+
+ MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_D(0x1c134, D_BDW_PLUS);
+
+ MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
+ MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write);
+ MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
+ NULL, NULL);
+ MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
+ NULL, NULL);
+ MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
+ ring_timestamp_mmio_read, NULL);
+
+ MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS);
+
+#define RING_REG(base) (base + 0x230)
+ MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
+ MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x234)
+ MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x244)
+ MMIO_RING_D(RING_REG, D_BDW_PLUS);
+ MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x370)
+ MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 48, F_RO, 0, ~0, D_BDW_PLUS,
+ NULL, NULL);
+#undef RING_REG
+
+#define RING_REG(base) (base + 0x3a0)
+ MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+#undef RING_REG
+
+ MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
+ MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS);
+ MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS);
+ MMIO_D(0x1c1d0, D_BDW_PLUS);
+ MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS);
+ MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
+ MMIO_D(0x1c054, D_BDW_PLUS);
+
+ MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
+ MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
+
+ MMIO_D(GAMTARBMODE, D_BDW_PLUS);
+
+#define RING_REG(base) (base + 0x270)
+ MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
+#undef RING_REG
+
+ MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
+ MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL);
+
+ MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW);
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW);
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW);
+
+ MMIO_D(WM_MISC, D_BDW);
+ MMIO_D(BDW_EDP_PSR_BASE, D_BDW);
+
+ MMIO_D(0x66c00, D_BDW_PLUS);
+ MMIO_D(0x66c04, D_BDW_PLUS);
+
+ MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS);
+
+ MMIO_D(GEN8_EU_DISABLE0, D_BDW_PLUS);
+ MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
+ MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
+
+ MMIO_D(0xfdc, D_BDW);
+ MMIO_D(GEN8_ROW_CHICKEN, D_BDW_PLUS);
+ MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS);
+ MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS);
+
+ MMIO_D(0xb1f0, D_BDW);
+ MMIO_D(0xb1c0, D_BDW);
+ MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(0xb100, D_BDW);
+ MMIO_D(0xb10c, D_BDW);
+ MMIO_D(0xb110, D_BDW);
+
+ MMIO_DH(0x24d0, D_BDW_PLUS, NULL, NULL);
+ MMIO_DH(0x24d4, D_BDW_PLUS, NULL, NULL);
+ MMIO_DH(0x24d8, D_BDW_PLUS, NULL, NULL);
+ MMIO_DH(0x24dc, D_BDW_PLUS, NULL, NULL);
+
+ MMIO_D(0x83a4, D_BDW);
+ MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
+
+ MMIO_D(0x8430, D_BDW);
+
+ MMIO_D(0x110000, D_BDW_PLUS);
+
+ MMIO_D(0x48400, D_BDW_PLUS);
+
+ MMIO_D(0x6e570, D_BDW_PLUS);
+ MMIO_D(0x65f10, D_BDW_PLUS);
+
+ MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0xe180, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+
+ MMIO_D(0x2248, D_BDW);
+
+ return 0;
+}
+
+static int init_skl_mmio_info(struct intel_gvt *gvt)
+{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
+ MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(FORCEWAKE_BLITTER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
+ MMIO_DH(FORCEWAKE_ACK_BLITTER_GEN9, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
+ MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
+
+ MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
+
+ MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
+ MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
+
+ MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write);
+ MMIO_D(0xa210, D_SKL_PLUS);
+ MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
+ MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
+ MMIO_DH(0x4ddc, D_SKL, NULL, skl_misc_ctl_write);
+ MMIO_DH(0x42080, D_SKL, NULL, skl_misc_ctl_write);
+ MMIO_D(0x45504, D_SKL);
+ MMIO_D(0x45520, D_SKL);
+ MMIO_D(0x46000, D_SKL);
+ MMIO_DH(0x46010, D_SKL, NULL, skl_lcpll_write);
+ MMIO_DH(0x46014, D_SKL, NULL, skl_lcpll_write);
+ MMIO_D(0x6C040, D_SKL);
+ MMIO_D(0x6C048, D_SKL);
+ MMIO_D(0x6C050, D_SKL);
+ MMIO_D(0x6C044, D_SKL);
+ MMIO_D(0x6C04C, D_SKL);
+ MMIO_D(0x6C054, D_SKL);
+ MMIO_D(0x6c058, D_SKL);
+ MMIO_D(0x6c05c, D_SKL);
+ MMIO_DH(0X6c060, D_SKL, dpll_status_read, NULL);
+
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL, NULL, pf_write);
+
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL, NULL, pf_write);
+
+ MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL, NULL, pf_write);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL, NULL, NULL);
+ MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL, NULL, NULL);
+ MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL, NULL, NULL);
+
+ MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL, NULL, NULL);
+ MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL, NULL, NULL);
+ MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL, NULL, NULL);
+
+ MMIO_D(0x70380, D_SKL);
+ MMIO_D(0x71380, D_SKL);
+ MMIO_D(0x72380, D_SKL);
+ MMIO_D(0x7039c, D_SKL);
+
+ MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_D(0x8f074, D_SKL);
+ MMIO_D(0x8f004, D_SKL);
+ MMIO_D(0x8f034, D_SKL);
+
+ MMIO_D(0xb11c, D_SKL);
+
+ MMIO_D(0x51000, D_SKL);
+ MMIO_D(0x6c00c, D_SKL);
+
+ MMIO_F(0xc800, 0x7f8, 0, 0, 0, D_SKL, NULL, NULL);
+ MMIO_F(0xb020, 0x80, 0, 0, 0, D_SKL, NULL, NULL);
+
+ MMIO_D(0xd08, D_SKL);
+ MMIO_D(0x20e0, D_SKL);
+ MMIO_D(0x20ec, D_SKL);
+
+ /* TRTT */
+ MMIO_D(0x4de0, D_SKL);
+ MMIO_D(0x4de4, D_SKL);
+ MMIO_D(0x4de8, D_SKL);
+ MMIO_D(0x4dec, D_SKL);
+ MMIO_D(0x4df0, D_SKL);
+ MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write);
+ MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
+
+ MMIO_D(0x45008, D_SKL);
+
+ MMIO_D(0x46430, D_SKL);
+
+ MMIO_D(0x46520, D_SKL);
+
+ MMIO_D(0xc403c, D_SKL);
+ MMIO_D(0xb004, D_SKL);
+ MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
+
+ MMIO_D(0x65900, D_SKL);
+ MMIO_D(0x1082c0, D_SKL);
+ MMIO_D(0x4068, D_SKL);
+ MMIO_D(0x67054, D_SKL);
+ MMIO_D(0x6e560, D_SKL);
+ MMIO_D(0x6e554, D_SKL);
+ MMIO_D(0x2b20, D_SKL);
+ MMIO_D(0x65f00, D_SKL);
+ MMIO_D(0x65f08, D_SKL);
+ MMIO_D(0x320f0, D_SKL);
+
+ MMIO_D(_REG_VCS2_EXCC, D_SKL);
+ MMIO_D(0x70034, D_SKL);
+ MMIO_D(0x71034, D_SKL);
+ MMIO_D(0x72034, D_SKL);
+
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL);
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL);
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
+
+ MMIO_D(0x44500, D_SKL);
+ return 0;
+}
+
+/**
+ * intel_gvt_find_mmio_info - find MMIO information entry by aligned offset
+ * @gvt: GVT device
+ * @offset: register offset
+ *
+ * This function is used to find the MMIO information entry from hash table
+ *
+ * Returns:
+ * pointer to MMIO information entry, NULL if not exists
+ */
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
+ unsigned int offset)
+{
+ struct intel_gvt_mmio_info *e;
+
+ WARN_ON(!IS_ALIGNED(offset, 4));
+
+ hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
+ if (e->offset == offset)
+ return e;
+ }
+ return NULL;
+}
+
+/**
+ * intel_gvt_clean_mmio_info - clean up MMIO information table for GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the driver unloading stage, to clean up the MMIO
+ * information table of GVT device
+ *
+ */
+void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
+{
+ struct hlist_node *tmp;
+ struct intel_gvt_mmio_info *e;
+ int i;
+
+ hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
+ kfree(e);
+
+ vfree(gvt->mmio.mmio_attribute);
+ gvt->mmio.mmio_attribute = NULL;
+}
+
+/**
+ * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
+ * @gvt: GVT device
+ *
+ * This function is called at the initialization stage, to setup the MMIO
+ * information table for GVT device
+ *
+ * Returns:
+ * zero on success, negative if failed.
+ */
+int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
+{
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ int ret;
+
+ gvt->mmio.mmio_attribute = vzalloc(info->mmio_size);
+ if (!gvt->mmio.mmio_attribute)
+ return -ENOMEM;
+
+ ret = init_generic_mmio_info(gvt);
+ if (ret)
+ goto err;
+
+ if (IS_BROADWELL(dev_priv)) {
+ ret = init_broadwell_mmio_info(gvt);
+ if (ret)
+ goto err;
+ } else if (IS_SKYLAKE(dev_priv)) {
+ ret = init_broadwell_mmio_info(gvt);
+ if (ret)
+ goto err;
+ ret = init_skl_mmio_info(gvt);
+ if (ret)
+ goto err;
+ }
+ return 0;
+err:
+ intel_gvt_clean_mmio_info(gvt);
+ return ret;
+}
+
+/**
+ * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset)
+{
+ gvt->mmio.mmio_attribute[offset >> 2] |=
+ F_ACCESSED;
+}
+
+/**
+ * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
+ unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] &
+ F_CMD_ACCESS;
+}
+
+/**
+ * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt,
+ unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] &
+ F_UNALIGN;
+}
+
+/**
+ * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
+ unsigned int offset)
+{
+ gvt->mmio.mmio_attribute[offset >> 2] |=
+ F_CMD_ACCESSED;
+}
+
+/**
+ * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
+ *
+ */
+bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] &
+ F_MODE_MASK;
+}
+
+/**
+ * intel_vgpu_default_mmio_read - default MMIO read handler
+ * @vgpu: a vGPU
+ * @offset: access offset
+ * @p_data: data return buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ read_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
+
+/**
+ * intel_t_default_mmio_write - default MMIO write handler
+ * @vgpu: a vGPU
+ * @offset: access offset
+ * @p_data: write data buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 254df8bf1f35..027ef558d91c 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -19,17 +19,51 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Dexuan Cui
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
*/
#ifndef _GVT_HYPERCALL_H_
#define _GVT_HYPERCALL_H_
+struct intel_gvt_io_emulation_ops {
+ int (*emulate_cfg_read)(void *, unsigned int, void *, unsigned int);
+ int (*emulate_cfg_write)(void *, unsigned int, void *, unsigned int);
+ int (*emulate_mmio_read)(void *, u64, void *, unsigned int);
+ int (*emulate_mmio_write)(void *, u64, void *, unsigned int);
+};
+
+extern struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops;
+
/*
* Specific GVT-g MPT modules function collections. Currently GVT-g supports
* both Xen and KVM by providing dedicated hypervisor-related MPT modules.
*/
struct intel_gvt_mpt {
int (*detect_host)(void);
+ int (*attach_vgpu)(void *vgpu, unsigned long *handle);
+ void (*detach_vgpu)(unsigned long handle);
+ int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
+ unsigned long (*from_virt_to_mfn)(void *p);
+ int (*set_wp_page)(unsigned long handle, u64 gfn);
+ int (*unset_wp_page)(unsigned long handle, u64 gfn);
+ int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
+ unsigned long len);
+ int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
+ unsigned long len);
+ unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
+ int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
+ unsigned long mfn, unsigned int nr, bool map,
+ int type);
+ int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
+ bool map);
};
extern struct intel_gvt_mpt xengt_mpt;
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
new file mode 100644
index 000000000000..f7be02ac4be1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -0,0 +1,741 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Min he <min.he@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+/* common offset among interrupt control registers */
+#define regbase_to_isr(base) (base)
+#define regbase_to_imr(base) (base + 0x4)
+#define regbase_to_iir(base) (base + 0x8)
+#define regbase_to_ier(base) (base + 0xC)
+
+#define iir_to_regbase(iir) (iir - 0x8)
+#define ier_to_regbase(ier) (ier - 0xC)
+
+#define get_event_virt_handler(irq, e) (irq->events[e].v_handler)
+#define get_irq_info(irq, e) (irq->events[e].info)
+
+#define irq_to_gvt(irq) \
+ container_of(irq, struct intel_gvt, irq)
+
+static void update_upstream_irq(struct intel_vgpu *vgpu,
+ struct intel_gvt_irq_info *info);
+
+static const char * const irq_name[INTEL_GVT_EVENT_MAX] = {
+ [RCS_MI_USER_INTERRUPT] = "Render CS MI USER INTERRUPT",
+ [RCS_DEBUG] = "Render EU debug from SVG",
+ [RCS_MMIO_SYNC_FLUSH] = "Render MMIO sync flush status",
+ [RCS_CMD_STREAMER_ERR] = "Render CS error interrupt",
+ [RCS_PIPE_CONTROL] = "Render PIPE CONTROL notify",
+ [RCS_WATCHDOG_EXCEEDED] = "Render CS Watchdog counter exceeded",
+ [RCS_PAGE_DIRECTORY_FAULT] = "Render page directory faults",
+ [RCS_AS_CONTEXT_SWITCH] = "Render AS Context Switch Interrupt",
+
+ [VCS_MI_USER_INTERRUPT] = "Video CS MI USER INTERRUPT",
+ [VCS_MMIO_SYNC_FLUSH] = "Video MMIO sync flush status",
+ [VCS_CMD_STREAMER_ERR] = "Video CS error interrupt",
+ [VCS_MI_FLUSH_DW] = "Video MI FLUSH DW notify",
+ [VCS_WATCHDOG_EXCEEDED] = "Video CS Watchdog counter exceeded",
+ [VCS_PAGE_DIRECTORY_FAULT] = "Video page directory faults",
+ [VCS_AS_CONTEXT_SWITCH] = "Video AS Context Switch Interrupt",
+ [VCS2_MI_USER_INTERRUPT] = "VCS2 Video CS MI USER INTERRUPT",
+ [VCS2_MI_FLUSH_DW] = "VCS2 Video MI FLUSH DW notify",
+ [VCS2_AS_CONTEXT_SWITCH] = "VCS2 Context Switch Interrupt",
+
+ [BCS_MI_USER_INTERRUPT] = "Blitter CS MI USER INTERRUPT",
+ [BCS_MMIO_SYNC_FLUSH] = "Billter MMIO sync flush status",
+ [BCS_CMD_STREAMER_ERR] = "Blitter CS error interrupt",
+ [BCS_MI_FLUSH_DW] = "Blitter MI FLUSH DW notify",
+ [BCS_PAGE_DIRECTORY_FAULT] = "Blitter page directory faults",
+ [BCS_AS_CONTEXT_SWITCH] = "Blitter AS Context Switch Interrupt",
+
+ [VECS_MI_FLUSH_DW] = "Video Enhanced Streamer MI FLUSH DW notify",
+ [VECS_AS_CONTEXT_SWITCH] = "VECS Context Switch Interrupt",
+
+ [PIPE_A_FIFO_UNDERRUN] = "Pipe A FIFO underrun",
+ [PIPE_A_CRC_ERR] = "Pipe A CRC error",
+ [PIPE_A_CRC_DONE] = "Pipe A CRC done",
+ [PIPE_A_VSYNC] = "Pipe A vsync",
+ [PIPE_A_LINE_COMPARE] = "Pipe A line compare",
+ [PIPE_A_ODD_FIELD] = "Pipe A odd field",
+ [PIPE_A_EVEN_FIELD] = "Pipe A even field",
+ [PIPE_A_VBLANK] = "Pipe A vblank",
+ [PIPE_B_FIFO_UNDERRUN] = "Pipe B FIFO underrun",
+ [PIPE_B_CRC_ERR] = "Pipe B CRC error",
+ [PIPE_B_CRC_DONE] = "Pipe B CRC done",
+ [PIPE_B_VSYNC] = "Pipe B vsync",
+ [PIPE_B_LINE_COMPARE] = "Pipe B line compare",
+ [PIPE_B_ODD_FIELD] = "Pipe B odd field",
+ [PIPE_B_EVEN_FIELD] = "Pipe B even field",
+ [PIPE_B_VBLANK] = "Pipe B vblank",
+ [PIPE_C_VBLANK] = "Pipe C vblank",
+ [DPST_PHASE_IN] = "DPST phase in event",
+ [DPST_HISTOGRAM] = "DPST histogram event",
+ [GSE] = "GSE",
+ [DP_A_HOTPLUG] = "DP A Hotplug",
+ [AUX_CHANNEL_A] = "AUX Channel A",
+ [PERF_COUNTER] = "Performance counter",
+ [POISON] = "Poison",
+ [GTT_FAULT] = "GTT fault",
+ [PRIMARY_A_FLIP_DONE] = "Primary Plane A flip done",
+ [PRIMARY_B_FLIP_DONE] = "Primary Plane B flip done",
+ [PRIMARY_C_FLIP_DONE] = "Primary Plane C flip done",
+ [SPRITE_A_FLIP_DONE] = "Sprite Plane A flip done",
+ [SPRITE_B_FLIP_DONE] = "Sprite Plane B flip done",
+ [SPRITE_C_FLIP_DONE] = "Sprite Plane C flip done",
+
+ [PCU_THERMAL] = "PCU Thermal Event",
+ [PCU_PCODE2DRIVER_MAILBOX] = "PCU pcode2driver mailbox event",
+
+ [FDI_RX_INTERRUPTS_TRANSCODER_A] = "FDI RX Interrupts Combined A",
+ [AUDIO_CP_CHANGE_TRANSCODER_A] = "Audio CP Change Transcoder A",
+ [AUDIO_CP_REQUEST_TRANSCODER_A] = "Audio CP Request Transcoder A",
+ [FDI_RX_INTERRUPTS_TRANSCODER_B] = "FDI RX Interrupts Combined B",
+ [AUDIO_CP_CHANGE_TRANSCODER_B] = "Audio CP Change Transcoder B",
+ [AUDIO_CP_REQUEST_TRANSCODER_B] = "Audio CP Request Transcoder B",
+ [FDI_RX_INTERRUPTS_TRANSCODER_C] = "FDI RX Interrupts Combined C",
+ [AUDIO_CP_CHANGE_TRANSCODER_C] = "Audio CP Change Transcoder C",
+ [AUDIO_CP_REQUEST_TRANSCODER_C] = "Audio CP Request Transcoder C",
+ [ERR_AND_DBG] = "South Error and Debug Interupts Combined",
+ [GMBUS] = "Gmbus",
+ [SDVO_B_HOTPLUG] = "SDVO B hotplug",
+ [CRT_HOTPLUG] = "CRT Hotplug",
+ [DP_B_HOTPLUG] = "DisplayPort/HDMI/DVI B Hotplug",
+ [DP_C_HOTPLUG] = "DisplayPort/HDMI/DVI C Hotplug",
+ [DP_D_HOTPLUG] = "DisplayPort/HDMI/DVI D Hotplug",
+ [AUX_CHANNEL_B] = "AUX Channel B",
+ [AUX_CHANNEL_C] = "AUX Channel C",
+ [AUX_CHANNEL_D] = "AUX Channel D",
+ [AUDIO_POWER_STATE_CHANGE_B] = "Audio Power State change Port B",
+ [AUDIO_POWER_STATE_CHANGE_C] = "Audio Power State change Port C",
+ [AUDIO_POWER_STATE_CHANGE_D] = "Audio Power State change Port D",
+
+ [INTEL_GVT_EVENT_RESERVED] = "RESERVED EVENTS!!!",
+};
+
+static inline struct intel_gvt_irq_info *regbase_to_irq_info(
+ struct intel_gvt *gvt,
+ unsigned int reg)
+{
+ struct intel_gvt_irq *irq = &gvt->irq;
+ int i;
+
+ for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) {
+ if (i915_mmio_reg_offset(irq->info[i]->reg_base) == reg)
+ return irq->info[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * intel_vgpu_reg_imr_handler - Generic IMR register emulation write handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the generic IMR register bit change
+ * behavior.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+ u32 changed, masked, unmasked;
+ u32 imr = *(u32 *)p_data;
+
+ gvt_dbg_irq("write IMR %x with val %x\n",
+ reg, imr);
+
+ gvt_dbg_irq("old vIMR %x\n", vgpu_vreg(vgpu, reg));
+
+ /* figure out newly masked/unmasked bits */
+ changed = vgpu_vreg(vgpu, reg) ^ imr;
+ masked = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
+ unmasked = masked ^ changed;
+
+ gvt_dbg_irq("changed %x, masked %x, unmasked %x\n",
+ changed, masked, unmasked);
+
+ vgpu_vreg(vgpu, reg) = imr;
+
+ ops->check_pending_irq(vgpu);
+ gvt_dbg_irq("IRQ: new vIMR %x\n", vgpu_vreg(vgpu, reg));
+ return 0;
+}
+
+/**
+ * intel_vgpu_reg_master_irq_handler - master IRQ write emulation handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the master IRQ register on gen8+.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+ u32 changed, enabled, disabled;
+ u32 ier = *(u32 *)p_data;
+ u32 virtual_ier = vgpu_vreg(vgpu, reg);
+
+ gvt_dbg_irq("write master irq reg %x with val %x\n",
+ reg, ier);
+
+ gvt_dbg_irq("old vreg %x\n", vgpu_vreg(vgpu, reg));
+
+ /*
+ * GEN8_MASTER_IRQ is a special irq register,
+ * only bit 31 is allowed to be modified
+ * and treated as an IER bit.
+ */
+ ier &= GEN8_MASTER_IRQ_CONTROL;
+ virtual_ier &= GEN8_MASTER_IRQ_CONTROL;
+ vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL;
+ vgpu_vreg(vgpu, reg) |= ier;
+
+ /* figure out newly enabled/disable bits */
+ changed = virtual_ier ^ ier;
+ enabled = (virtual_ier & changed) ^ changed;
+ disabled = enabled ^ changed;
+
+ gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
+ changed, enabled, disabled);
+
+ ops->check_pending_irq(vgpu);
+ gvt_dbg_irq("new vreg %x\n", vgpu_vreg(vgpu, reg));
+ return 0;
+}
+
+/**
+ * intel_vgpu_reg_ier_handler - Generic IER write emulation handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the generic IER register behavior.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+ struct intel_gvt_irq_info *info;
+ u32 changed, enabled, disabled;
+ u32 ier = *(u32 *)p_data;
+
+ gvt_dbg_irq("write IER %x with val %x\n",
+ reg, ier);
+
+ gvt_dbg_irq("old vIER %x\n", vgpu_vreg(vgpu, reg));
+
+ /* figure out newly enabled/disable bits */
+ changed = vgpu_vreg(vgpu, reg) ^ ier;
+ enabled = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
+ disabled = enabled ^ changed;
+
+ gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
+ changed, enabled, disabled);
+ vgpu_vreg(vgpu, reg) = ier;
+
+ info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
+ if (WARN_ON(!info))
+ return -EINVAL;
+
+ if (info->has_upstream_irq)
+ update_upstream_irq(vgpu, info);
+
+ ops->check_pending_irq(vgpu);
+ gvt_dbg_irq("new vIER %x\n", vgpu_vreg(vgpu, reg));
+ return 0;
+}
+
+/**
+ * intel_vgpu_reg_iir_handler - Generic IIR write emulation handler
+ * @vgpu: a vGPU
+ * @reg: register offset written by guest
+ * @p_data: register data written by guest
+ * @bytes: register data length
+ *
+ * This function is used to emulate the generic IIR register behavior.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt,
+ iir_to_regbase(reg));
+ u32 iir = *(u32 *)p_data;
+
+ gvt_dbg_irq("write IIR %x with val %x\n", reg, iir);
+
+ if (WARN_ON(!info))
+ return -EINVAL;
+
+ vgpu_vreg(vgpu, reg) &= ~iir;
+
+ if (info->has_upstream_irq)
+ update_upstream_irq(vgpu, info);
+ return 0;
+}
+
+static struct intel_gvt_irq_map gen8_irq_map[] = {
+ { INTEL_GVT_IRQ_INFO_MASTER, 0, INTEL_GVT_IRQ_INFO_GT0, 0xffff },
+ { INTEL_GVT_IRQ_INFO_MASTER, 1, INTEL_GVT_IRQ_INFO_GT0, 0xffff0000 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 2, INTEL_GVT_IRQ_INFO_GT1, 0xffff },
+ { INTEL_GVT_IRQ_INFO_MASTER, 3, INTEL_GVT_IRQ_INFO_GT1, 0xffff0000 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 4, INTEL_GVT_IRQ_INFO_GT2, 0xffff },
+ { INTEL_GVT_IRQ_INFO_MASTER, 6, INTEL_GVT_IRQ_INFO_GT3, 0xffff },
+ { INTEL_GVT_IRQ_INFO_MASTER, 16, INTEL_GVT_IRQ_INFO_DE_PIPE_A, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 17, INTEL_GVT_IRQ_INFO_DE_PIPE_B, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 18, INTEL_GVT_IRQ_INFO_DE_PIPE_C, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 20, INTEL_GVT_IRQ_INFO_DE_PORT, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 22, INTEL_GVT_IRQ_INFO_DE_MISC, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 23, INTEL_GVT_IRQ_INFO_PCH, ~0 },
+ { INTEL_GVT_IRQ_INFO_MASTER, 30, INTEL_GVT_IRQ_INFO_PCU, ~0 },
+ { -1, -1, ~0 },
+};
+
+static void update_upstream_irq(struct intel_vgpu *vgpu,
+ struct intel_gvt_irq_info *info)
+{
+ struct intel_gvt_irq *irq = &vgpu->gvt->irq;
+ struct intel_gvt_irq_map *map = irq->irq_map;
+ struct intel_gvt_irq_info *up_irq_info = NULL;
+ u32 set_bits = 0;
+ u32 clear_bits = 0;
+ int bit;
+ u32 val = vgpu_vreg(vgpu,
+ regbase_to_iir(i915_mmio_reg_offset(info->reg_base)))
+ & vgpu_vreg(vgpu,
+ regbase_to_ier(i915_mmio_reg_offset(info->reg_base)));
+
+ if (!info->has_upstream_irq)
+ return;
+
+ for (map = irq->irq_map; map->up_irq_bit != -1; map++) {
+ if (info->group != map->down_irq_group)
+ continue;
+
+ if (!up_irq_info)
+ up_irq_info = irq->info[map->up_irq_group];
+ else
+ WARN_ON(up_irq_info != irq->info[map->up_irq_group]);
+
+ bit = map->up_irq_bit;
+
+ if (val & map->down_irq_bitmask)
+ set_bits |= (1 << bit);
+ else
+ clear_bits |= (1 << bit);
+ }
+
+ WARN_ON(!up_irq_info);
+
+ if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
+ u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base);
+
+ vgpu_vreg(vgpu, isr) &= ~clear_bits;
+ vgpu_vreg(vgpu, isr) |= set_bits;
+ } else {
+ u32 iir = regbase_to_iir(
+ i915_mmio_reg_offset(up_irq_info->reg_base));
+ u32 imr = regbase_to_imr(
+ i915_mmio_reg_offset(up_irq_info->reg_base));
+
+ vgpu_vreg(vgpu, iir) |= (set_bits & ~vgpu_vreg(vgpu, imr));
+ }
+
+ if (up_irq_info->has_upstream_irq)
+ update_upstream_irq(vgpu, up_irq_info);
+}
+
+static void init_irq_map(struct intel_gvt_irq *irq)
+{
+ struct intel_gvt_irq_map *map;
+ struct intel_gvt_irq_info *up_info, *down_info;
+ int up_bit;
+
+ for (map = irq->irq_map; map->up_irq_bit != -1; map++) {
+ up_info = irq->info[map->up_irq_group];
+ up_bit = map->up_irq_bit;
+ down_info = irq->info[map->down_irq_group];
+
+ set_bit(up_bit, up_info->downstream_irq_bitmap);
+ down_info->has_upstream_irq = true;
+
+ gvt_dbg_irq("[up] grp %d bit %d -> [down] grp %d bitmask %x\n",
+ up_info->group, up_bit,
+ down_info->group, map->down_irq_bitmask);
+ }
+}
+
+/* =======================vEvent injection===================== */
+static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
+{
+ return intel_gvt_hypervisor_inject_msi(vgpu);
+}
+
+static void propagate_event(struct intel_gvt_irq *irq,
+ enum intel_gvt_event_type event, struct intel_vgpu *vgpu)
+{
+ struct intel_gvt_irq_info *info;
+ unsigned int reg_base;
+ int bit;
+
+ info = get_irq_info(irq, event);
+ if (WARN_ON(!info))
+ return;
+
+ reg_base = i915_mmio_reg_offset(info->reg_base);
+ bit = irq->events[event].bit;
+
+ if (!test_bit(bit, (void *)&vgpu_vreg(vgpu,
+ regbase_to_imr(reg_base)))) {
+ gvt_dbg_irq("set bit (%d) for (%s) for vgpu (%d)\n",
+ bit, irq_name[event], vgpu->id);
+ set_bit(bit, (void *)&vgpu_vreg(vgpu,
+ regbase_to_iir(reg_base)));
+ }
+}
+
+/* =======================vEvent Handlers===================== */
+static void handle_default_event_virt(struct intel_gvt_irq *irq,
+ enum intel_gvt_event_type event, struct intel_vgpu *vgpu)
+{
+ if (!vgpu->irq.irq_warn_once[event]) {
+ gvt_dbg_core("vgpu%d: IRQ receive event %d (%s)\n",
+ vgpu->id, event, irq_name[event]);
+ vgpu->irq.irq_warn_once[event] = true;
+ }
+ propagate_event(irq, event, vgpu);
+}
+
+/* =====================GEN specific logic======================= */
+/* GEN8 interrupt routines. */
+
+#define DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(regname, regbase) \
+static struct intel_gvt_irq_info gen8_##regname##_info = { \
+ .name = #regname"-IRQ", \
+ .reg_base = (regbase), \
+ .bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] = \
+ INTEL_GVT_EVENT_RESERVED}, \
+}
+
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt0, GEN8_GT_ISR(0));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt1, GEN8_GT_ISR(1));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt2, GEN8_GT_ISR(2));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt3, GEN8_GT_ISR(3));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_a, GEN8_DE_PIPE_ISR(PIPE_A));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_b, GEN8_DE_PIPE_ISR(PIPE_B));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_c, GEN8_DE_PIPE_ISR(PIPE_C));
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_port, GEN8_DE_PORT_ISR);
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_misc, GEN8_DE_MISC_ISR);
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(pcu, GEN8_PCU_ISR);
+DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(master, GEN8_MASTER_IRQ);
+
+static struct intel_gvt_irq_info gvt_base_pch_info = {
+ .name = "PCH-IRQ",
+ .reg_base = SDEISR,
+ .bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] =
+ INTEL_GVT_EVENT_RESERVED},
+};
+
+static void gen8_check_pending_irq(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt_irq *irq = &vgpu->gvt->irq;
+ int i;
+
+ if (!(vgpu_vreg(vgpu, i915_mmio_reg_offset(GEN8_MASTER_IRQ)) &
+ GEN8_MASTER_IRQ_CONTROL))
+ return;
+
+ for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) {
+ struct intel_gvt_irq_info *info = irq->info[i];
+ u32 reg_base;
+
+ if (!info->has_upstream_irq)
+ continue;
+
+ reg_base = i915_mmio_reg_offset(info->reg_base);
+ if ((vgpu_vreg(vgpu, regbase_to_iir(reg_base))
+ & vgpu_vreg(vgpu, regbase_to_ier(reg_base))))
+ update_upstream_irq(vgpu, info);
+ }
+
+ if (vgpu_vreg(vgpu, i915_mmio_reg_offset(GEN8_MASTER_IRQ))
+ & ~GEN8_MASTER_IRQ_CONTROL)
+ inject_virtual_interrupt(vgpu);
+}
+
+static void gen8_init_irq(
+ struct intel_gvt_irq *irq)
+{
+ struct intel_gvt *gvt = irq_to_gvt(irq);
+
+#define SET_BIT_INFO(s, b, e, i) \
+ do { \
+ s->events[e].bit = b; \
+ s->events[e].info = s->info[i]; \
+ s->info[i]->bit_to_event[b] = e;\
+ } while (0)
+
+#define SET_IRQ_GROUP(s, g, i) \
+ do { \
+ s->info[g] = i; \
+ (i)->group = g; \
+ set_bit(g, s->irq_info_bitmap); \
+ } while (0)
+
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_MASTER, &gen8_master_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT0, &gen8_gt0_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT1, &gen8_gt1_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT2, &gen8_gt2_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT3, &gen8_gt3_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_A, &gen8_de_pipe_a_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_B, &gen8_de_pipe_b_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_C, &gen8_de_pipe_c_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PORT, &gen8_de_port_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_MISC, &gen8_de_misc_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_PCU, &gen8_pcu_info);
+ SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_PCH, &gvt_base_pch_info);
+
+ /* GEN8 level 2 interrupts. */
+
+ /* GEN8 interrupt GT0 events */
+ SET_BIT_INFO(irq, 0, RCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0);
+ SET_BIT_INFO(irq, 4, RCS_PIPE_CONTROL, INTEL_GVT_IRQ_INFO_GT0);
+ SET_BIT_INFO(irq, 8, RCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0);
+
+ SET_BIT_INFO(irq, 16, BCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0);
+ SET_BIT_INFO(irq, 20, BCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT0);
+ SET_BIT_INFO(irq, 24, BCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0);
+
+ /* GEN8 interrupt GT1 events */
+ SET_BIT_INFO(irq, 0, VCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1);
+ SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
+ SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
+
+ if (HAS_BSD2(gvt->dev_priv)) {
+ SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
+ INTEL_GVT_IRQ_INFO_GT1);
+ SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
+ INTEL_GVT_IRQ_INFO_GT1);
+ SET_BIT_INFO(irq, 24, VCS2_AS_CONTEXT_SWITCH,
+ INTEL_GVT_IRQ_INFO_GT1);
+ }
+
+ /* GEN8 interrupt GT3 events */
+ SET_BIT_INFO(irq, 0, VECS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT3);
+ SET_BIT_INFO(irq, 4, VECS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT3);
+ SET_BIT_INFO(irq, 8, VECS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT3);
+
+ SET_BIT_INFO(irq, 0, PIPE_A_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+ SET_BIT_INFO(irq, 0, PIPE_B_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+ SET_BIT_INFO(irq, 0, PIPE_C_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+
+ /* GEN8 interrupt DE PORT events */
+ SET_BIT_INFO(irq, 0, AUX_CHANNEL_A, INTEL_GVT_IRQ_INFO_DE_PORT);
+ SET_BIT_INFO(irq, 3, DP_A_HOTPLUG, INTEL_GVT_IRQ_INFO_DE_PORT);
+
+ /* GEN8 interrupt DE MISC events */
+ SET_BIT_INFO(irq, 0, GSE, INTEL_GVT_IRQ_INFO_DE_MISC);
+
+ /* PCH events */
+ SET_BIT_INFO(irq, 17, GMBUS, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 19, CRT_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 21, DP_B_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
+
+ if (IS_BROADWELL(gvt->dev_priv)) {
+ SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH);
+ SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH);
+
+ SET_BIT_INFO(irq, 4, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+ SET_BIT_INFO(irq, 5, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+
+ SET_BIT_INFO(irq, 4, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+ SET_BIT_INFO(irq, 5, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+
+ SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+ SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+ } else if (IS_SKYLAKE(gvt->dev_priv)) {
+ SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
+ SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
+ SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
+
+ SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+ SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+ SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+ }
+
+ /* GEN8 interrupt PCU events */
+ SET_BIT_INFO(irq, 24, PCU_THERMAL, INTEL_GVT_IRQ_INFO_PCU);
+ SET_BIT_INFO(irq, 25, PCU_PCODE2DRIVER_MAILBOX, INTEL_GVT_IRQ_INFO_PCU);
+}
+
+static struct intel_gvt_irq_ops gen8_irq_ops = {
+ .init_irq = gen8_init_irq,
+ .check_pending_irq = gen8_check_pending_irq,
+};
+
+/**
+ * intel_vgpu_trigger_virtual_event - Trigger a virtual event for a vGPU
+ * @vgpu: a vGPU
+ * @event: interrupt event
+ *
+ * This function is used to trigger a virtual interrupt event for vGPU.
+ * The caller provides the event to be triggered, the framework itself
+ * will emulate the IRQ register bit change.
+ *
+ */
+void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
+ enum intel_gvt_event_type event)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_irq *irq = &gvt->irq;
+ gvt_event_virt_handler_t handler;
+ struct intel_gvt_irq_ops *ops = gvt->irq.ops;
+
+ handler = get_event_virt_handler(irq, event);
+ WARN_ON(!handler);
+
+ handler(irq, event, vgpu);
+
+ ops->check_pending_irq(vgpu);
+}
+
+static void init_events(
+ struct intel_gvt_irq *irq)
+{
+ int i;
+
+ for (i = 0; i < INTEL_GVT_EVENT_MAX; i++) {
+ irq->events[i].info = NULL;
+ irq->events[i].v_handler = handle_default_event_virt;
+ }
+}
+
+static enum hrtimer_restart vblank_timer_fn(struct hrtimer *data)
+{
+ struct intel_gvt_vblank_timer *vblank_timer;
+ struct intel_gvt_irq *irq;
+ struct intel_gvt *gvt;
+
+ vblank_timer = container_of(data, struct intel_gvt_vblank_timer, timer);
+ irq = container_of(vblank_timer, struct intel_gvt_irq, vblank_timer);
+ gvt = container_of(irq, struct intel_gvt, irq);
+
+ intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EMULATE_VBLANK);
+ hrtimer_add_expires_ns(&vblank_timer->timer, vblank_timer->period);
+ return HRTIMER_RESTART;
+}
+
+/**
+ * intel_gvt_clean_irq - clean up GVT-g IRQ emulation subsystem
+ * @gvt: a GVT device
+ *
+ * This function is called at driver unloading stage, to clean up GVT-g IRQ
+ * emulation subsystem.
+ *
+ */
+void intel_gvt_clean_irq(struct intel_gvt *gvt)
+{
+ struct intel_gvt_irq *irq = &gvt->irq;
+
+ hrtimer_cancel(&irq->vblank_timer.timer);
+}
+
+#define VBLNAK_TIMER_PERIOD 16000000
+
+/**
+ * intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem
+ * @gvt: a GVT device
+ *
+ * This function is called at driver loading stage, to initialize the GVT-g IRQ
+ * emulation subsystem.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_gvt_init_irq(struct intel_gvt *gvt)
+{
+ struct intel_gvt_irq *irq = &gvt->irq;
+ struct intel_gvt_vblank_timer *vblank_timer = &irq->vblank_timer;
+
+ gvt_dbg_core("init irq framework\n");
+
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ irq->ops = &gen8_irq_ops;
+ irq->irq_map = gen8_irq_map;
+ } else {
+ WARN_ON(1);
+ return -ENODEV;
+ }
+
+ /* common event initialization */
+ init_events(irq);
+
+ /* gen specific initialization */
+ irq->ops->init_irq(irq);
+
+ init_irq_map(irq);
+
+ hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ vblank_timer->timer.function = vblank_timer_fn;
+ vblank_timer->period = VBLNAK_TIMER_PERIOD;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h
new file mode 100644
index 000000000000..5313fb1b33e1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/interrupt.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Min he <min.he@intel.com>
+ *
+ */
+
+#ifndef _GVT_INTERRUPT_H_
+#define _GVT_INTERRUPT_H_
+
+enum intel_gvt_event_type {
+ RCS_MI_USER_INTERRUPT = 0,
+ RCS_DEBUG,
+ RCS_MMIO_SYNC_FLUSH,
+ RCS_CMD_STREAMER_ERR,
+ RCS_PIPE_CONTROL,
+ RCS_L3_PARITY_ERR,
+ RCS_WATCHDOG_EXCEEDED,
+ RCS_PAGE_DIRECTORY_FAULT,
+ RCS_AS_CONTEXT_SWITCH,
+ RCS_MONITOR_BUFF_HALF_FULL,
+
+ VCS_MI_USER_INTERRUPT,
+ VCS_MMIO_SYNC_FLUSH,
+ VCS_CMD_STREAMER_ERR,
+ VCS_MI_FLUSH_DW,
+ VCS_WATCHDOG_EXCEEDED,
+ VCS_PAGE_DIRECTORY_FAULT,
+ VCS_AS_CONTEXT_SWITCH,
+
+ VCS2_MI_USER_INTERRUPT,
+ VCS2_MI_FLUSH_DW,
+ VCS2_AS_CONTEXT_SWITCH,
+
+ BCS_MI_USER_INTERRUPT,
+ BCS_MMIO_SYNC_FLUSH,
+ BCS_CMD_STREAMER_ERR,
+ BCS_MI_FLUSH_DW,
+ BCS_PAGE_DIRECTORY_FAULT,
+ BCS_AS_CONTEXT_SWITCH,
+
+ VECS_MI_USER_INTERRUPT,
+ VECS_MI_FLUSH_DW,
+ VECS_AS_CONTEXT_SWITCH,
+
+ PIPE_A_FIFO_UNDERRUN,
+ PIPE_B_FIFO_UNDERRUN,
+ PIPE_A_CRC_ERR,
+ PIPE_B_CRC_ERR,
+ PIPE_A_CRC_DONE,
+ PIPE_B_CRC_DONE,
+ PIPE_A_ODD_FIELD,
+ PIPE_B_ODD_FIELD,
+ PIPE_A_EVEN_FIELD,
+ PIPE_B_EVEN_FIELD,
+ PIPE_A_LINE_COMPARE,
+ PIPE_B_LINE_COMPARE,
+ PIPE_C_LINE_COMPARE,
+ PIPE_A_VBLANK,
+ PIPE_B_VBLANK,
+ PIPE_C_VBLANK,
+ PIPE_A_VSYNC,
+ PIPE_B_VSYNC,
+ PIPE_C_VSYNC,
+ PRIMARY_A_FLIP_DONE,
+ PRIMARY_B_FLIP_DONE,
+ PRIMARY_C_FLIP_DONE,
+ SPRITE_A_FLIP_DONE,
+ SPRITE_B_FLIP_DONE,
+ SPRITE_C_FLIP_DONE,
+
+ PCU_THERMAL,
+ PCU_PCODE2DRIVER_MAILBOX,
+
+ DPST_PHASE_IN,
+ DPST_HISTOGRAM,
+ GSE,
+ DP_A_HOTPLUG,
+ AUX_CHANNEL_A,
+ PERF_COUNTER,
+ POISON,
+ GTT_FAULT,
+ ERROR_INTERRUPT_COMBINED,
+
+ FDI_RX_INTERRUPTS_TRANSCODER_A,
+ AUDIO_CP_CHANGE_TRANSCODER_A,
+ AUDIO_CP_REQUEST_TRANSCODER_A,
+ FDI_RX_INTERRUPTS_TRANSCODER_B,
+ AUDIO_CP_CHANGE_TRANSCODER_B,
+ AUDIO_CP_REQUEST_TRANSCODER_B,
+ FDI_RX_INTERRUPTS_TRANSCODER_C,
+ AUDIO_CP_CHANGE_TRANSCODER_C,
+ AUDIO_CP_REQUEST_TRANSCODER_C,
+ ERR_AND_DBG,
+ GMBUS,
+ SDVO_B_HOTPLUG,
+ CRT_HOTPLUG,
+ DP_B_HOTPLUG,
+ DP_C_HOTPLUG,
+ DP_D_HOTPLUG,
+ AUX_CHANNEL_B,
+ AUX_CHANNEL_C,
+ AUX_CHANNEL_D,
+ AUDIO_POWER_STATE_CHANGE_B,
+ AUDIO_POWER_STATE_CHANGE_C,
+ AUDIO_POWER_STATE_CHANGE_D,
+
+ INTEL_GVT_EVENT_RESERVED,
+ INTEL_GVT_EVENT_MAX,
+};
+
+struct intel_gvt_irq;
+struct intel_gvt;
+
+typedef void (*gvt_event_virt_handler_t)(struct intel_gvt_irq *irq,
+ enum intel_gvt_event_type event, struct intel_vgpu *vgpu);
+
+struct intel_gvt_irq_ops {
+ void (*init_irq)(struct intel_gvt_irq *irq);
+ void (*check_pending_irq)(struct intel_vgpu *vgpu);
+};
+
+/* the list of physical interrupt control register groups */
+enum intel_gvt_irq_type {
+ INTEL_GVT_IRQ_INFO_GT,
+ INTEL_GVT_IRQ_INFO_DPY,
+ INTEL_GVT_IRQ_INFO_PCH,
+ INTEL_GVT_IRQ_INFO_PM,
+
+ INTEL_GVT_IRQ_INFO_MASTER,
+ INTEL_GVT_IRQ_INFO_GT0,
+ INTEL_GVT_IRQ_INFO_GT1,
+ INTEL_GVT_IRQ_INFO_GT2,
+ INTEL_GVT_IRQ_INFO_GT3,
+ INTEL_GVT_IRQ_INFO_DE_PIPE_A,
+ INTEL_GVT_IRQ_INFO_DE_PIPE_B,
+ INTEL_GVT_IRQ_INFO_DE_PIPE_C,
+ INTEL_GVT_IRQ_INFO_DE_PORT,
+ INTEL_GVT_IRQ_INFO_DE_MISC,
+ INTEL_GVT_IRQ_INFO_AUD,
+ INTEL_GVT_IRQ_INFO_PCU,
+
+ INTEL_GVT_IRQ_INFO_MAX,
+};
+
+#define INTEL_GVT_IRQ_BITWIDTH 32
+
+/* device specific interrupt bit definitions */
+struct intel_gvt_irq_info {
+ char *name;
+ i915_reg_t reg_base;
+ enum intel_gvt_event_type bit_to_event[INTEL_GVT_IRQ_BITWIDTH];
+ unsigned long warned;
+ int group;
+ DECLARE_BITMAP(downstream_irq_bitmap, INTEL_GVT_IRQ_BITWIDTH);
+ bool has_upstream_irq;
+};
+
+/* per-event information */
+struct intel_gvt_event_info {
+ int bit; /* map to register bit */
+ int policy; /* forwarding policy */
+ struct intel_gvt_irq_info *info; /* register info */
+ gvt_event_virt_handler_t v_handler; /* for v_event */
+};
+
+struct intel_gvt_irq_map {
+ int up_irq_group;
+ int up_irq_bit;
+ int down_irq_group;
+ u32 down_irq_bitmask;
+};
+
+struct intel_gvt_vblank_timer {
+ struct hrtimer timer;
+ u64 period;
+};
+
+/* structure containing device specific IRQ state */
+struct intel_gvt_irq {
+ struct intel_gvt_irq_ops *ops;
+ struct intel_gvt_irq_info *info[INTEL_GVT_IRQ_INFO_MAX];
+ DECLARE_BITMAP(irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX);
+ struct intel_gvt_event_info events[INTEL_GVT_EVENT_MAX];
+ DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
+ struct intel_gvt_irq_map *irq_map;
+ struct intel_gvt_vblank_timer vblank_timer;
+};
+
+int intel_gvt_init_irq(struct intel_gvt *gvt);
+void intel_gvt_clean_irq(struct intel_gvt *gvt);
+
+void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
+ enum intel_gvt_event_type event);
+
+int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
+ void *p_data, unsigned int bytes);
+int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes);
+int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes);
+int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
+ unsigned int reg, void *p_data, unsigned int bytes);
+
+int gvt_ring_id_to_pipe_control_notify_event(int ring_id);
+int gvt_ring_id_to_mi_flush_dw_event(int ring_id);
+int gvt_ring_id_to_mi_user_interrupt_event(int ring_id);
+
+#endif /* _GVT_INTERRUPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
new file mode 100644
index 000000000000..585b01f63254
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -0,0 +1,306 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Kevin Tian <kevin.tian@intel.com>
+ * Dexuan Cui
+ *
+ * Contributors:
+ * Tina Zhang <tina.zhang@intel.com>
+ * Min He <min.he@intel.com>
+ * Niu Bing <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+/**
+ * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
+{
+ u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) &
+ ~GENMASK(3, 0);
+ return gpa - gttmmio_gpa;
+}
+
+#define reg_is_mmio(gvt, reg) \
+ (reg >= 0 && reg < gvt->device_info.mmio_size)
+
+#define reg_is_gtt(gvt, reg) \
+ (reg >= gvt->device_info.gtt_start_offset \
+ && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
+
+/**
+ * intel_vgpu_emulate_mmio_read - emulate MMIO read
+ * @vgpu: a vGPU
+ * @pa: guest physical address
+ * @p_data: data return buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_emulate_mmio_read(void *__vgpu, uint64_t pa,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_vgpu *vgpu = __vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_mmio_info *mmio;
+ unsigned int offset = 0;
+ int ret = -EINVAL;
+
+ mutex_lock(&gvt->lock);
+
+ if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
+ struct intel_vgpu_guest_page *gp;
+
+ gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
+ if (gp) {
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
+ p_data, bytes);
+ if (ret) {
+ gvt_err("vgpu%d: guest page read error %d, "
+ "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
+ vgpu->id, ret,
+ gp->gfn, pa, *(u32 *)p_data, bytes);
+ }
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+ }
+
+ offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
+
+ if (WARN_ON(bytes > 8))
+ goto err;
+
+ if (reg_is_gtt(gvt, offset)) {
+ if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+ goto err;
+ if (WARN_ON(bytes != 4 && bytes != 8))
+ goto err;
+ if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+ goto err;
+
+ ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
+ p_data, bytes);
+ if (ret)
+ goto err;
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
+ if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
+ if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
+ goto err;
+
+ mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
+ if (!mmio && !vgpu->mmio.disable_warn_untrack) {
+ gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
+ vgpu->id, offset, bytes, *(u32 *)p_data);
+
+ if (offset == 0x206c) {
+ gvt_err("------------------------------------------\n");
+ gvt_err("vgpu%d: likely triggers a gfx reset\n",
+ vgpu->id);
+ gvt_err("------------------------------------------\n");
+ vgpu->mmio.disable_warn_untrack = true;
+ }
+ }
+
+ if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
+ if (WARN_ON(!IS_ALIGNED(offset, bytes)))
+ goto err;
+ }
+
+ if (mmio) {
+ if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
+ if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
+ goto err;
+ if (WARN_ON(mmio->offset != offset))
+ goto err;
+ }
+ ret = mmio->read(vgpu, offset, p_data, bytes);
+ } else
+ ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+
+ if (ret)
+ goto err;
+
+ intel_gvt_mmio_set_accessed(gvt, offset);
+ mutex_unlock(&gvt->lock);
+ return 0;
+err:
+ gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
+ vgpu->id, offset, bytes);
+ mutex_unlock(&gvt->lock);
+ return ret;
+}
+
+/**
+ * intel_vgpu_emulate_mmio_write - emulate MMIO write
+ * @vgpu: a vGPU
+ * @pa: guest physical address
+ * @p_data: write data buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_emulate_mmio_write(void *__vgpu, uint64_t pa,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_vgpu *vgpu = __vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_mmio_info *mmio;
+ unsigned int offset = 0;
+ u32 old_vreg = 0, old_sreg = 0;
+ int ret = -EINVAL;
+
+ mutex_lock(&gvt->lock);
+
+ if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
+ struct intel_vgpu_guest_page *gp;
+
+ gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
+ if (gp) {
+ ret = gp->handler(gp, pa, p_data, bytes);
+ if (ret) {
+ gvt_err("vgpu%d: guest page write error %d, "
+ "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
+ vgpu->id, ret,
+ gp->gfn, pa, *(u32 *)p_data, bytes);
+ }
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+ }
+
+ offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
+
+ if (WARN_ON(bytes > 8))
+ goto err;
+
+ if (reg_is_gtt(gvt, offset)) {
+ if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+ goto err;
+ if (WARN_ON(bytes != 4 && bytes != 8))
+ goto err;
+ if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+ goto err;
+
+ ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
+ p_data, bytes);
+ if (ret)
+ goto err;
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
+ if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+ ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
+ mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
+ if (!mmio && !vgpu->mmio.disable_warn_untrack)
+ gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
+ vgpu->id, offset, bytes, *(u32 *)p_data);
+
+ if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
+ if (WARN_ON(!IS_ALIGNED(offset, bytes)))
+ goto err;
+ }
+
+ if (mmio) {
+ u64 ro_mask = mmio->ro_mask;
+
+ if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
+ if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
+ goto err;
+ if (WARN_ON(mmio->offset != offset))
+ goto err;
+ }
+
+ if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
+ old_vreg = vgpu_vreg(vgpu, offset);
+ old_sreg = vgpu_sreg(vgpu, offset);
+ }
+
+ if (!ro_mask) {
+ ret = mmio->write(vgpu, offset, p_data, bytes);
+ } else {
+ /* Protect RO bits like HW */
+ u64 data = 0;
+
+ /* all register bits are RO. */
+ if (ro_mask == ~(u64)0) {
+ gvt_err("vgpu%d: try to write RO reg %x\n",
+ vgpu->id, offset);
+ ret = 0;
+ goto out;
+ }
+ /* keep the RO bits in the virtual register */
+ memcpy(&data, p_data, bytes);
+ data &= ~mmio->ro_mask;
+ data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
+ ret = mmio->write(vgpu, offset, &data, bytes);
+ }
+
+ /* higher 16bits of mode ctl regs are mask bits for change */
+ if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
+ u32 mask = vgpu_vreg(vgpu, offset) >> 16;
+
+ vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
+ | (vgpu_vreg(vgpu, offset) & mask);
+ vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
+ | (vgpu_sreg(vgpu, offset) & mask);
+ }
+ } else
+ ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
+ bytes);
+ if (ret)
+ goto err;
+out:
+ intel_gvt_mmio_set_accessed(gvt, offset);
+ mutex_unlock(&gvt->lock);
+ return 0;
+err:
+ gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n",
+ vgpu->id, offset, bytes);
+ mutex_unlock(&gvt->lock);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
new file mode 100644
index 000000000000..9dc739a01892
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Ke Yu
+ * Kevin Tian <kevin.tian@intel.com>
+ * Dexuan Cui
+ *
+ * Contributors:
+ * Tina Zhang <tina.zhang@intel.com>
+ * Min He <min.he@intel.com>
+ * Niu Bing <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef _GVT_MMIO_H_
+#define _GVT_MMIO_H_
+
+struct intel_gvt;
+struct intel_vgpu;
+
+#define D_SNB (1 << 0)
+#define D_IVB (1 << 1)
+#define D_HSW (1 << 2)
+#define D_BDW (1 << 3)
+#define D_SKL (1 << 4)
+
+#define D_GEN9PLUS (D_SKL)
+#define D_GEN8PLUS (D_BDW | D_SKL)
+#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL)
+#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
+
+#define D_SKL_PLUS (D_SKL)
+#define D_BDW_PLUS (D_BDW | D_SKL)
+#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL)
+#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
+
+#define D_PRE_BDW (D_SNB | D_IVB | D_HSW)
+#define D_PRE_SKL (D_SNB | D_IVB | D_HSW | D_BDW)
+#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL)
+
+struct intel_gvt_mmio_info {
+ u32 offset;
+ u32 size;
+ u32 length;
+ u32 addr_mask;
+ u64 ro_mask;
+ u32 device;
+ int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int);
+ int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int);
+ u32 addr_range;
+ struct hlist_node node;
+};
+
+unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
+bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
+
+int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
+void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
+
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
+ unsigned int offset);
+#define INTEL_GVT_MMIO_OFFSET(reg) ({ \
+ typeof(reg) __reg = reg; \
+ u32 *offset = (u32 *)&__reg; \
+ *offset; \
+})
+
+int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
+int intel_vgpu_emulate_mmio_read(void *__vgpu, u64 pa, void *p_data,
+ unsigned int bytes);
+int intel_vgpu_emulate_mmio_write(void *__vgpu, u64 pa, void *p_data,
+ unsigned int bytes);
+bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
+ unsigned int offset);
+bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt, unsigned int offset);
+void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset);
+void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
+ unsigned int offset);
+bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset);
+int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
+int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 03601e3ffa7c..67858782d327 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -19,6 +19,15 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Dexuan Cui
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
*/
#ifndef _GVT_MPT_H_
@@ -46,4 +55,215 @@ static inline int intel_gvt_hypervisor_detect_host(void)
return intel_gvt_host.mpt->detect_host();
}
+/**
+ * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
+ * related stuffs inside hypervisor.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
+{
+ return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
+}
+
+/**
+ * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
+ * related stuffs inside hypervisor.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
+{
+ intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
+}
+
+#define MSI_CAP_CONTROL(offset) (offset + 2)
+#define MSI_CAP_ADDRESS(offset) (offset + 4)
+#define MSI_CAP_DATA(offset) (offset + 8)
+#define MSI_CAP_EN 0x1
+
+/**
+ * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
+{
+ unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
+ u16 control, data;
+ u32 addr;
+ int ret;
+
+ control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
+ addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
+ data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
+
+ /* Do not generate MSI if MSIEN is disable */
+ if (!(control & MSI_CAP_EN))
+ return 0;
+
+ if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
+ return -EINVAL;
+
+ gvt_dbg_irq("vgpu%d: inject msi address %x data%x\n", vgpu->id, addr,
+ data);
+
+ ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+/**
+ * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
+ * @p: host kernel virtual address
+ *
+ * Returns:
+ * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
+ */
+static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
+{
+ return intel_gvt_host.mpt->from_virt_to_mfn(p);
+}
+
+/**
+ * intel_gvt_hypervisor_set_wp_page - set a guest page to write-protected
+ * @vgpu: a vGPU
+ * @p: intel_vgpu_guest_page
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_wp_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p)
+{
+ int ret;
+
+ if (p->writeprotection)
+ return 0;
+
+ ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, p->gfn);
+ if (ret)
+ return ret;
+ p->writeprotection = true;
+ atomic_inc(&vgpu->gtt.n_write_protected_guest_page);
+ return 0;
+}
+
+/**
+ * intel_gvt_hypervisor_unset_wp_page - remove the write-protection of a
+ * guest page
+ * @vgpu: a vGPU
+ * @p: intel_vgpu_guest_page
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_unset_wp_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_guest_page *p)
+{
+ int ret;
+
+ if (!p->writeprotection)
+ return 0;
+
+ ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, p->gfn);
+ if (ret)
+ return ret;
+ p->writeprotection = false;
+ atomic_dec(&vgpu->gtt.n_write_protected_guest_page);
+ return 0;
+}
+
+/**
+ * intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
+ * @vgpu: a vGPU
+ * @gpa: guest physical address
+ * @buf: host data buffer
+ * @len: data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
+ unsigned long gpa, void *buf, unsigned long len)
+{
+ return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
+}
+
+/**
+ * intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
+ * @vgpu: a vGPU
+ * @gpa: guest physical address
+ * @buf: host data buffer
+ * @len: data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
+ unsigned long gpa, void *buf, unsigned long len)
+{
+ return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
+}
+
+/**
+ * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
+ * @vgpu: a vGPU
+ * @gpfn: guest pfn
+ *
+ * Returns:
+ * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
+ */
+static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
+ struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
+}
+
+enum {
+ GVT_MAP_APERTURE = 0,
+ GVT_MAP_OPREGION,
+};
+
+/**
+ * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
+ * @vgpu: a vGPU
+ * @gfn: guest PFN
+ * @mfn: host PFN
+ * @nr: amount of PFNs
+ * @map: map or unmap
+ * @type: map type
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
+ struct intel_vgpu *vgpu, unsigned long gfn,
+ unsigned long mfn, unsigned int nr,
+ bool map, int type)
+{
+ return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
+ map, type);
+}
+
+/**
+ * intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
+ * @vgpu: a vGPU
+ * @start: the beginning of the guest physical address region
+ * @end: the end of the guest physical address region
+ * @map: map or unmap
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_trap_area(
+ struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
+{
+ return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
+}
+
#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
new file mode 100644
index 000000000000..973c8a9d0b15
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -0,0 +1,344 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/acpi.h>
+#include "i915_drv.h"
+#include "gvt.h"
+
+static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
+{
+ void __iomem *host_va = vgpu->gvt->opregion.opregion_va;
+ u8 *buf;
+ int i;
+
+ if (WARN((vgpu_opregion(vgpu)->va),
+ "vgpu%d: opregion has been initialized already.\n",
+ vgpu->id))
+ return -EINVAL;
+
+ vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC |
+ GFP_DMA32 | __GFP_ZERO,
+ INTEL_GVT_OPREGION_PORDER);
+
+ if (!vgpu_opregion(vgpu)->va)
+ return -ENOMEM;
+
+ memcpy_fromio(vgpu_opregion(vgpu)->va, host_va,
+ INTEL_GVT_OPREGION_SIZE);
+
+ for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
+ vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
+
+ /* for unknown reason, the value in LID field is incorrect
+ * which block the windows guest, so workaround it by force
+ * setting it to "OPEN"
+ */
+ buf = (u8 *)vgpu_opregion(vgpu)->va;
+ buf[INTEL_GVT_OPREGION_CLID] = 0x3;
+
+ return 0;
+}
+
+static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
+{
+ u64 mfn;
+ int i, ret;
+
+ for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
+ mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)
+ + i * PAGE_SIZE);
+ if (mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("fail to get MFN from VA\n");
+ return -EINVAL;
+ }
+ ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
+ vgpu_opregion(vgpu)->gfn[i],
+ mfn, 1, map, GVT_MAP_OPREGION);
+ if (ret) {
+ gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/**
+ * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
+{
+ int i;
+
+ gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
+
+ if (!vgpu_opregion(vgpu)->va)
+ return;
+
+ if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
+ vunmap(vgpu_opregion(vgpu)->va);
+ for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
+ if (vgpu_opregion(vgpu)->pages[i]) {
+ put_page(vgpu_opregion(vgpu)->pages[i]);
+ vgpu_opregion(vgpu)->pages[i] = NULL;
+ }
+ }
+ } else {
+ map_vgpu_opregion(vgpu, false);
+ free_pages((unsigned long)vgpu_opregion(vgpu)->va,
+ INTEL_GVT_OPREGION_PORDER);
+ }
+
+ vgpu_opregion(vgpu)->va = NULL;
+}
+
+/**
+ * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
+ * @vgpu: a vGPU
+ * @gpa: guest physical address of opregion
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
+{
+ int ret;
+
+ gvt_dbg_core("vgpu%d: init vgpu opregion\n", vgpu->id);
+
+ if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
+ gvt_dbg_core("emulate opregion from kernel\n");
+
+ ret = init_vgpu_opregion(vgpu, gpa);
+ if (ret)
+ return ret;
+
+ ret = map_vgpu_opregion(vgpu, true);
+ if (ret)
+ return ret;
+ } else {
+ gvt_dbg_core("emulate opregion from userspace\n");
+
+ /*
+ * If opregion pages are not allocated from host kenrel,
+ * most of the params are meaningless
+ */
+ ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
+ 0, /* not used */
+ 0, /* not used */
+ 2, /* not used */
+ 1,
+ GVT_MAP_OPREGION);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * intel_gvt_clean_opregion - clean host opergion related stuffs
+ * @gvt: a GVT device
+ *
+ */
+void intel_gvt_clean_opregion(struct intel_gvt *gvt)
+{
+ iounmap(gvt->opregion.opregion_va);
+ gvt->opregion.opregion_va = NULL;
+}
+
+/**
+ * intel_gvt_init_opregion - initialize host opergion related stuffs
+ * @gvt: a GVT device
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_gvt_init_opregion(struct intel_gvt *gvt)
+{
+ gvt_dbg_core("init host opregion\n");
+
+ pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
+ &gvt->opregion.opregion_pa);
+
+ gvt->opregion.opregion_va = acpi_os_ioremap(gvt->opregion.opregion_pa,
+ INTEL_GVT_OPREGION_SIZE);
+ if (!gvt->opregion.opregion_va) {
+ gvt_err("fail to map host opregion\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+#define GVT_OPREGION_FUNC(scic) \
+ ({ \
+ u32 __ret; \
+ __ret = (scic & OPREGION_SCIC_FUNC_MASK) >> \
+ OPREGION_SCIC_FUNC_SHIFT; \
+ __ret; \
+ })
+
+#define GVT_OPREGION_SUBFUNC(scic) \
+ ({ \
+ u32 __ret; \
+ __ret = (scic & OPREGION_SCIC_SUBFUNC_MASK) >> \
+ OPREGION_SCIC_SUBFUNC_SHIFT; \
+ __ret; \
+ })
+
+static const char *opregion_func_name(u32 func)
+{
+ const char *name = NULL;
+
+ switch (func) {
+ case 0 ... 3:
+ case 5:
+ case 7 ... 15:
+ name = "Reserved";
+ break;
+
+ case 4:
+ name = "Get BIOS Data";
+ break;
+
+ case 6:
+ name = "System BIOS Callbacks";
+ break;
+
+ default:
+ name = "Unknown";
+ break;
+ }
+ return name;
+}
+
+static const char *opregion_subfunc_name(u32 subfunc)
+{
+ const char *name = NULL;
+
+ switch (subfunc) {
+ case 0:
+ name = "Supported Calls";
+ break;
+
+ case 1:
+ name = "Requested Callbacks";
+ break;
+
+ case 2 ... 3:
+ case 8 ... 9:
+ name = "Reserved";
+ break;
+
+ case 5:
+ name = "Boot Display";
+ break;
+
+ case 6:
+ name = "TV-Standard/Video-Connector";
+ break;
+
+ case 7:
+ name = "Internal Graphics";
+ break;
+
+ case 10:
+ name = "Spread Spectrum Clocks";
+ break;
+
+ case 11:
+ name = "Get AKSV";
+ break;
+
+ default:
+ name = "Unknown";
+ break;
+ }
+ return name;
+};
+
+static bool querying_capabilities(u32 scic)
+{
+ u32 func, subfunc;
+
+ func = GVT_OPREGION_FUNC(scic);
+ subfunc = GVT_OPREGION_SUBFUNC(scic);
+
+ if ((func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
+ subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)
+ || (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
+ subfunc == INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS)
+ || (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS &&
+ subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)) {
+ return true;
+ }
+ return false;
+}
+
+/**
+ * intel_vgpu_emulate_opregion_request - emulating OpRegion request
+ * @vgpu: a vGPU
+ * @swsci: SWSCI request
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
+{
+ u32 *scic, *parm;
+ u32 func, subfunc;
+
+ scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
+ parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
+
+ if (!(swsci & SWSCI_SCI_SELECT)) {
+ gvt_err("vgpu%d: requesting SMI service\n", vgpu->id);
+ return 0;
+ }
+ /* ignore non 0->1 trasitions */
+ if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI]
+ & SWSCI_SCI_TRIGGER) ||
+ !(swsci & SWSCI_SCI_TRIGGER)) {
+ return 0;
+ }
+
+ func = GVT_OPREGION_FUNC(*scic);
+ subfunc = GVT_OPREGION_SUBFUNC(*scic);
+ if (!querying_capabilities(*scic)) {
+ gvt_err("vgpu%d: requesting runtime service: func \"%s\","
+ " subfunc \"%s\"\n",
+ vgpu->id,
+ opregion_func_name(func),
+ opregion_subfunc_name(subfunc));
+ /*
+ * emulate exit status of function call, '0' means
+ * "failure, generic, unsupported or unknown cause"
+ */
+ *scic &= ~OPREGION_SCIC_EXIT_MASK;
+ return 0;
+ }
+
+ *scic = 0;
+ *parm = 0;
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
new file mode 100644
index 000000000000..0dfe789d8f02
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _GVT_REG_H
+#define _GVT_REG_H
+
+#define INTEL_GVT_PCI_CLASS_VGA_OTHER 0x80
+
+#define INTEL_GVT_PCI_GMCH_CONTROL 0x50
+#define BDW_GMCH_GMS_SHIFT 8
+#define BDW_GMCH_GMS_MASK 0xff
+
+#define INTEL_GVT_PCI_SWSCI 0xe8
+#define SWSCI_SCI_SELECT (1 << 15)
+#define SWSCI_SCI_TRIGGER 1
+
+#define INTEL_GVT_PCI_OPREGION 0xfc
+
+#define INTEL_GVT_OPREGION_CLID 0x1AC
+#define INTEL_GVT_OPREGION_SCIC 0x200
+#define OPREGION_SCIC_FUNC_MASK 0x1E
+#define OPREGION_SCIC_FUNC_SHIFT 1
+#define OPREGION_SCIC_SUBFUNC_MASK 0xFF00
+#define OPREGION_SCIC_SUBFUNC_SHIFT 8
+#define OPREGION_SCIC_EXIT_MASK 0xE0
+#define INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA 4
+#define INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS 6
+#define INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS 0
+#define INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS 1
+#define INTEL_GVT_OPREGION_PARM 0x204
+
+#define INTEL_GVT_OPREGION_PAGES 2
+#define INTEL_GVT_OPREGION_PORDER 1
+#define INTEL_GVT_OPREGION_SIZE (2 * 4096)
+
+#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
+
+#define _REG_VECS_EXCC 0x1A028
+#define _REG_VCS2_EXCC 0x1c028
+
+#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
+#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
+
+#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
+ ((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
+
+#define FORCEWAKE_RENDER_GEN9_REG 0xa278
+#define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
+#define FORCEWAKE_BLITTER_GEN9_REG 0xa188
+#define FORCEWAKE_ACK_BLITTER_GEN9_REG 0x130044
+#define FORCEWAKE_MEDIA_GEN9_REG 0xa270
+#define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88
+#define FORCEWAKE_ACK_HSW_REG 0x130044
+
+#define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2))
+#define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3))
+#define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12))
+#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + GTT_PAGE_SIZE)
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
new file mode 100644
index 000000000000..feebb65ba641
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+struct render_mmio {
+ int ring_id;
+ i915_reg_t reg;
+ u32 mask;
+ bool in_context;
+ u32 value;
+};
+
+static struct render_mmio gen8_render_mmio_list[] = {
+ {RCS, _MMIO(0x229c), 0xffff, false},
+ {RCS, _MMIO(0x2248), 0x0, false},
+ {RCS, _MMIO(0x2098), 0x0, false},
+ {RCS, _MMIO(0x20c0), 0xffff, true},
+ {RCS, _MMIO(0x24d0), 0, false},
+ {RCS, _MMIO(0x24d4), 0, false},
+ {RCS, _MMIO(0x24d8), 0, false},
+ {RCS, _MMIO(0x24dc), 0, false},
+ {RCS, _MMIO(0x7004), 0xffff, true},
+ {RCS, _MMIO(0x7008), 0xffff, true},
+ {RCS, _MMIO(0x7000), 0xffff, true},
+ {RCS, _MMIO(0x7010), 0xffff, true},
+ {RCS, _MMIO(0x7300), 0xffff, true},
+ {RCS, _MMIO(0x83a4), 0xffff, true},
+
+ {BCS, _MMIO(0x2229c), 0xffff, false},
+ {BCS, _MMIO(0x2209c), 0xffff, false},
+ {BCS, _MMIO(0x220c0), 0xffff, false},
+ {BCS, _MMIO(0x22098), 0x0, false},
+ {BCS, _MMIO(0x22028), 0x0, false},
+};
+
+static struct render_mmio gen9_render_mmio_list[] = {
+ {RCS, _MMIO(0x229c), 0xffff, false},
+ {RCS, _MMIO(0x2248), 0x0, false},
+ {RCS, _MMIO(0x2098), 0x0, false},
+ {RCS, _MMIO(0x20c0), 0xffff, true},
+ {RCS, _MMIO(0x24d0), 0, false},
+ {RCS, _MMIO(0x24d4), 0, false},
+ {RCS, _MMIO(0x24d8), 0, false},
+ {RCS, _MMIO(0x24dc), 0, false},
+ {RCS, _MMIO(0x7004), 0xffff, true},
+ {RCS, _MMIO(0x7008), 0xffff, true},
+ {RCS, _MMIO(0x7000), 0xffff, true},
+ {RCS, _MMIO(0x7010), 0xffff, true},
+ {RCS, _MMIO(0x7300), 0xffff, true},
+ {RCS, _MMIO(0x83a4), 0xffff, true},
+
+ {RCS, _MMIO(0x40e0), 0, false},
+ {RCS, _MMIO(0x40e4), 0, false},
+ {RCS, _MMIO(0x2580), 0xffff, true},
+ {RCS, _MMIO(0x7014), 0xffff, true},
+ {RCS, _MMIO(0x20ec), 0xffff, false},
+ {RCS, _MMIO(0xb118), 0, false},
+ {RCS, _MMIO(0xe100), 0xffff, true},
+ {RCS, _MMIO(0xe180), 0xffff, true},
+ {RCS, _MMIO(0xe184), 0xffff, true},
+ {RCS, _MMIO(0xe188), 0xffff, true},
+ {RCS, _MMIO(0xe194), 0xffff, true},
+ {RCS, _MMIO(0x4de0), 0, false},
+ {RCS, _MMIO(0x4de4), 0, false},
+ {RCS, _MMIO(0x4de8), 0, false},
+ {RCS, _MMIO(0x4dec), 0, false},
+ {RCS, _MMIO(0x4df0), 0, false},
+ {RCS, _MMIO(0x4df4), 0, false},
+
+ {BCS, _MMIO(0x2229c), 0xffff, false},
+ {BCS, _MMIO(0x2209c), 0xffff, false},
+ {BCS, _MMIO(0x220c0), 0xffff, false},
+ {BCS, _MMIO(0x22098), 0x0, false},
+ {BCS, _MMIO(0x22028), 0x0, false},
+
+ {VCS2, _MMIO(0x1c028), 0xffff, false},
+
+ {VECS, _MMIO(0x1a028), 0xffff, false},
+};
+
+static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
+static u32 gen9_render_mocs_L3[32];
+
+static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ i915_reg_t reg;
+ u32 regs[] = {
+ [RCS] = 0x4260,
+ [VCS] = 0x4264,
+ [VCS2] = 0x4268,
+ [BCS] = 0x426c,
+ [VECS] = 0x4270,
+ };
+
+ if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ return;
+
+ if (!test_and_clear_bit(ring_id, (void *)vgpu->tlb_handle_pending))
+ return;
+
+ reg = _MMIO(regs[ring_id]);
+
+ I915_WRITE(reg, 0x1);
+
+ if (wait_for_atomic((I915_READ(reg) == 0), 50))
+ gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+
+ gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
+}
+
+static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ i915_reg_t offset, l3_offset;
+ u32 regs[] = {
+ [RCS] = 0xc800,
+ [VCS] = 0xc900,
+ [VCS2] = 0xca00,
+ [BCS] = 0xcc00,
+ [VECS] = 0xcb00,
+ };
+ int i;
+
+ if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ return;
+
+ if (!IS_SKYLAKE(dev_priv))
+ return;
+
+ for (i = 0; i < 64; i++) {
+ gen9_render_mocs[ring_id][i] = I915_READ(offset);
+ I915_WRITE(offset, vgpu_vreg(vgpu, offset));
+ POSTING_READ(offset);
+ offset.reg += 4;
+ }
+
+ if (ring_id == RCS) {
+ l3_offset.reg = 0xb020;
+ for (i = 0; i < 32; i++) {
+ gen9_render_mocs_L3[i] = I915_READ(l3_offset);
+ I915_WRITE(l3_offset, vgpu_vreg(vgpu, offset));
+ POSTING_READ(l3_offset);
+ l3_offset.reg += 4;
+ }
+ }
+}
+
+static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ i915_reg_t offset, l3_offset;
+ u32 regs[] = {
+ [RCS] = 0xc800,
+ [VCS] = 0xc900,
+ [VCS2] = 0xca00,
+ [BCS] = 0xcc00,
+ [VECS] = 0xcb00,
+ };
+ int i;
+
+ if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ return;
+
+ if (!IS_SKYLAKE(dev_priv))
+ return;
+
+ for (i = 0; i < 64; i++) {
+ vgpu_vreg(vgpu, offset) = I915_READ(offset);
+ I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
+ POSTING_READ(offset);
+ offset.reg += 4;
+ }
+
+ if (ring_id == RCS) {
+ l3_offset.reg = 0xb020;
+ for (i = 0; i < 32; i++) {
+ vgpu_vreg(vgpu, l3_offset) = I915_READ(l3_offset);
+ I915_WRITE(l3_offset, gen9_render_mocs_L3[i]);
+ POSTING_READ(l3_offset);
+ l3_offset.reg += 4;
+ }
+ }
+}
+
+void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct render_mmio *mmio;
+ u32 v;
+ int i, array_size;
+
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ mmio = gen9_render_mmio_list;
+ array_size = ARRAY_SIZE(gen9_render_mmio_list);
+ load_mocs(vgpu, ring_id);
+ } else {
+ mmio = gen8_render_mmio_list;
+ array_size = ARRAY_SIZE(gen8_render_mmio_list);
+ }
+
+ for (i = 0; i < array_size; i++, mmio++) {
+ if (mmio->ring_id != ring_id)
+ continue;
+
+ mmio->value = I915_READ(mmio->reg);
+ if (mmio->mask)
+ v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
+ else
+ v = vgpu_vreg(vgpu, mmio->reg);
+
+ I915_WRITE(mmio->reg, v);
+ POSTING_READ(mmio->reg);
+
+ gvt_dbg_render("load reg %x old %x new %x\n",
+ i915_mmio_reg_offset(mmio->reg),
+ mmio->value, v);
+ }
+ handle_tlb_pending_event(vgpu, ring_id);
+}
+
+void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct render_mmio *mmio;
+ u32 v;
+ int i, array_size;
+
+ if (IS_SKYLAKE(dev_priv)) {
+ mmio = gen9_render_mmio_list;
+ array_size = ARRAY_SIZE(gen9_render_mmio_list);
+ restore_mocs(vgpu, ring_id);
+ } else {
+ mmio = gen8_render_mmio_list;
+ array_size = ARRAY_SIZE(gen8_render_mmio_list);
+ }
+
+ for (i = 0; i < array_size; i++, mmio++) {
+ if (mmio->ring_id != ring_id)
+ continue;
+
+ vgpu_vreg(vgpu, mmio->reg) = I915_READ(mmio->reg);
+
+ if (mmio->mask) {
+ vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
+ v = mmio->value | (mmio->mask << 16);
+ } else
+ v = mmio->value;
+
+ I915_WRITE(mmio->reg, v);
+ POSTING_READ(mmio->reg);
+
+ gvt_dbg_render("restore reg %x old %x new %x\n",
+ i915_mmio_reg_offset(mmio->reg),
+ mmio->value, v);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gvt/render.h b/drivers/gpu/drm/i915/gvt/render.h
new file mode 100644
index 000000000000..dac1a3cc458b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/render.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#ifndef __GVT_RENDER_H__
+#define __GVT_RENDER_H__
+
+void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id);
+
+void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
new file mode 100644
index 000000000000..1df6a5460f3e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Anhua Xu
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_execlist *execlist;
+ enum intel_engine_id i;
+ struct intel_engine_cs *engine;
+
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ execlist = &vgpu->execlist[i];
+ if (!list_empty(workload_q_head(vgpu, i)))
+ return true;
+ }
+
+ return false;
+}
+
+static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ enum intel_engine_id i;
+ struct intel_engine_cs *engine;
+
+ /* no target to schedule */
+ if (!scheduler->next_vgpu)
+ return;
+
+ gvt_dbg_sched("try to schedule next vgpu %d\n",
+ scheduler->next_vgpu->id);
+
+ /*
+ * after the flag is set, workload dispatch thread will
+ * stop dispatching workload for current vgpu
+ */
+ scheduler->need_reschedule = true;
+
+ /* still have uncompleted workload? */
+ for_each_engine(engine, gvt->dev_priv, i) {
+ if (scheduler->current_workload[i]) {
+ gvt_dbg_sched("still have running workload\n");
+ return;
+ }
+ }
+
+ gvt_dbg_sched("switch to next vgpu %d\n",
+ scheduler->next_vgpu->id);
+
+ /* switch current vgpu */
+ scheduler->current_vgpu = scheduler->next_vgpu;
+ scheduler->next_vgpu = NULL;
+
+ scheduler->need_reschedule = false;
+
+ /* wake up workload dispatch thread */
+ for_each_engine(engine, gvt->dev_priv, i)
+ wake_up(&scheduler->waitq[i]);
+}
+
+struct tbs_vgpu_data {
+ struct list_head list;
+ struct intel_vgpu *vgpu;
+ /* put some per-vgpu sched stats here */
+};
+
+struct tbs_sched_data {
+ struct intel_gvt *gvt;
+ struct delayed_work work;
+ unsigned long period;
+ struct list_head runq_head;
+};
+
+#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
+
+static void tbs_sched_func(struct work_struct *work)
+{
+ struct tbs_sched_data *sched_data = container_of(work,
+ struct tbs_sched_data, work.work);
+ struct tbs_vgpu_data *vgpu_data;
+
+ struct intel_gvt *gvt = sched_data->gvt;
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+
+ struct intel_vgpu *vgpu = NULL;
+ struct list_head *pos, *head;
+
+ mutex_lock(&gvt->lock);
+
+ /* no vgpu or has already had a target */
+ if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
+ goto out;
+
+ if (scheduler->current_vgpu) {
+ vgpu_data = scheduler->current_vgpu->sched_data;
+ head = &vgpu_data->list;
+ } else {
+ gvt_dbg_sched("no current vgpu search from q head\n");
+ head = &sched_data->runq_head;
+ }
+
+ /* search a vgpu with pending workload */
+ list_for_each(pos, head) {
+ if (pos == &sched_data->runq_head)
+ continue;
+
+ vgpu_data = container_of(pos, struct tbs_vgpu_data, list);
+ if (!vgpu_has_pending_workload(vgpu_data->vgpu))
+ continue;
+
+ vgpu = vgpu_data->vgpu;
+ break;
+ }
+
+ if (vgpu) {
+ scheduler->next_vgpu = vgpu;
+ gvt_dbg_sched("pick next vgpu %d\n", vgpu->id);
+ }
+out:
+ if (scheduler->next_vgpu) {
+ gvt_dbg_sched("try to schedule next vgpu %d\n",
+ scheduler->next_vgpu->id);
+ try_to_schedule_next_vgpu(gvt);
+ }
+
+ /*
+ * still have vgpu on runq
+ * or last schedule haven't finished due to running workload
+ */
+ if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
+ schedule_delayed_work(&sched_data->work, sched_data->period);
+
+ mutex_unlock(&gvt->lock);
+}
+
+static int tbs_sched_init(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler =
+ &gvt->scheduler;
+
+ struct tbs_sched_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&data->runq_head);
+ INIT_DELAYED_WORK(&data->work, tbs_sched_func);
+ data->period = GVT_DEFAULT_TIME_SLICE;
+ data->gvt = gvt;
+
+ scheduler->sched_data = data;
+ return 0;
+}
+
+static void tbs_sched_clean(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler =
+ &gvt->scheduler;
+ struct tbs_sched_data *data = scheduler->sched_data;
+
+ cancel_delayed_work(&data->work);
+ kfree(data);
+ scheduler->sched_data = NULL;
+}
+
+static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
+{
+ struct tbs_vgpu_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->vgpu = vgpu;
+ INIT_LIST_HEAD(&data->list);
+
+ vgpu->sched_data = data;
+ return 0;
+}
+
+static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
+{
+ kfree(vgpu->sched_data);
+ vgpu->sched_data = NULL;
+}
+
+static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
+{
+ struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
+ struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
+
+ if (!list_empty(&vgpu_data->list))
+ return;
+
+ list_add_tail(&vgpu_data->list, &sched_data->runq_head);
+ schedule_delayed_work(&sched_data->work, sched_data->period);
+}
+
+static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
+{
+ struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
+
+ list_del_init(&vgpu_data->list);
+}
+
+static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
+ .init = tbs_sched_init,
+ .clean = tbs_sched_clean,
+ .init_vgpu = tbs_sched_init_vgpu,
+ .clean_vgpu = tbs_sched_clean_vgpu,
+ .start_schedule = tbs_sched_start_schedule,
+ .stop_schedule = tbs_sched_stop_schedule,
+};
+
+int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
+{
+ gvt->scheduler.sched_ops = &tbs_schedule_ops;
+
+ return gvt->scheduler.sched_ops->init(gvt);
+}
+
+void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
+{
+ gvt->scheduler.sched_ops->clean(gvt);
+}
+
+int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
+{
+ return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+}
+
+void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
+{
+ vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
+}
+
+void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
+{
+ gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
+
+ vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
+}
+
+void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt_workload_scheduler *scheduler =
+ &vgpu->gvt->scheduler;
+
+ gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
+
+ scheduler->sched_ops->stop_schedule(vgpu);
+
+ if (scheduler->next_vgpu == vgpu)
+ scheduler->next_vgpu = NULL;
+
+ if (scheduler->current_vgpu == vgpu) {
+ /* stop workload dispatching */
+ scheduler->need_reschedule = true;
+ scheduler->current_vgpu = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h
new file mode 100644
index 000000000000..bb8b9097e41a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Anhua Xu
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#ifndef __GVT_SCHED_POLICY__
+#define __GVT_SCHED_POLICY__
+
+struct intel_gvt_sched_policy_ops {
+ int (*init)(struct intel_gvt *gvt);
+ void (*clean)(struct intel_gvt *gvt);
+ int (*init_vgpu)(struct intel_vgpu *vgpu);
+ void (*clean_vgpu)(struct intel_vgpu *vgpu);
+ void (*start_schedule)(struct intel_vgpu *vgpu);
+ void (*stop_schedule)(struct intel_vgpu *vgpu);
+};
+
+int intel_gvt_init_sched_policy(struct intel_gvt *gvt);
+
+void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);
+
+int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu);
+
+void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu);
+
+void intel_vgpu_start_schedule(struct intel_vgpu *vgpu);
+
+void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
new file mode 100644
index 000000000000..e96eaeebeb0a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -0,0 +1,578 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Chanbin Du <changbin.du@intel.com>
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ *
+ */
+
+#include <linux/kthread.h>
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define RING_CTX_OFF(x) \
+ offsetof(struct execlist_ring_context, x)
+
+static void set_context_pdp_root_pointer(
+ struct execlist_ring_context *ring_context,
+ u32 pdp[8])
+{
+ struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ pdp_pair[i].val = pdp[7 - i];
+}
+
+static int populate_shadow_context(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ shadow_ctx->engine[ring_id].state->obj;
+ struct execlist_ring_context *shadow_ring_context;
+ struct page *page;
+ void *dst;
+ unsigned long context_gpa, context_page_num;
+ int i;
+
+ gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
+ workload->ctx_desc.lrca);
+
+ context_page_num = intel_lr_context_size(
+ gvt->dev_priv->engine[ring_id]);
+
+ context_page_num = context_page_num >> PAGE_SHIFT;
+
+ if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ context_page_num = 19;
+
+ i = 2;
+
+ while (i < context_page_num) {
+ context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ (u32)((workload->ctx_desc.lrca + i) <<
+ GTT_PAGE_SHIFT));
+ if (context_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("Invalid guest context descriptor\n");
+ return -EINVAL;
+ }
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
+ dst = kmap_atomic(page);
+ intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
+ GTT_PAGE_SIZE);
+ kunmap_atomic(dst);
+ i++;
+ }
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+ shadow_ring_context = kmap_atomic(page);
+
+#define COPY_REG(name) \
+ intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+ + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
+
+ COPY_REG(ctx_ctrl);
+ COPY_REG(ctx_timestamp);
+
+ if (ring_id == RCS) {
+ COPY_REG(bb_per_ctx_ptr);
+ COPY_REG(rcs_indirect_ctx);
+ COPY_REG(rcs_indirect_ctx_offset);
+ }
+#undef COPY_REG
+
+ set_context_pdp_root_pointer(shadow_ring_context,
+ workload->shadow_mm->shadow_page_table);
+
+ intel_gvt_hypervisor_read_gpa(vgpu,
+ workload->ring_context_gpa +
+ sizeof(*shadow_ring_context),
+ (void *)shadow_ring_context +
+ sizeof(*shadow_ring_context),
+ GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+
+ kunmap_atomic(shadow_ring_context);
+ return 0;
+}
+
+static int shadow_context_status_change(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct intel_vgpu *vgpu = container_of(nb,
+ struct intel_vgpu, shadow_ctx_notifier_block);
+ struct drm_i915_gem_request *req =
+ (struct drm_i915_gem_request *)data;
+ struct intel_gvt_workload_scheduler *scheduler =
+ &vgpu->gvt->scheduler;
+ struct intel_vgpu_workload *workload =
+ scheduler->current_workload[req->engine->id];
+
+ switch (action) {
+ case INTEL_CONTEXT_SCHEDULE_IN:
+ intel_gvt_load_render_mmio(workload->vgpu,
+ workload->ring_id);
+ atomic_set(&workload->shadow_ctx_active, 1);
+ break;
+ case INTEL_CONTEXT_SCHEDULE_OUT:
+ intel_gvt_restore_render_mmio(workload->vgpu,
+ workload->ring_id);
+ atomic_set(&workload->shadow_ctx_active, 0);
+ break;
+ default:
+ WARN_ON(1);
+ return NOTIFY_OK;
+ }
+ wake_up(&workload->shadow_ctx_status_wq);
+ return NOTIFY_OK;
+}
+
+static int dispatch_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct drm_i915_gem_request *rq;
+ int ret;
+
+ gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
+ ring_id, workload);
+
+ shadow_ctx->desc_template = workload->ctx_desc.addressing_mode <<
+ GEN8_CTX_ADDRESSING_MODE_SHIFT;
+
+ rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
+ if (IS_ERR(rq)) {
+ gvt_err("fail to allocate gem request\n");
+ workload->status = PTR_ERR(rq);
+ return workload->status;
+ }
+
+ gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
+
+ workload->req = i915_gem_request_get(rq);
+
+ mutex_lock(&gvt->lock);
+
+ ret = intel_gvt_scan_and_shadow_workload(workload);
+ if (ret)
+ goto err;
+
+ ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
+ if (ret)
+ goto err;
+
+ ret = populate_shadow_context(workload);
+ if (ret)
+ goto err;
+
+ if (workload->prepare) {
+ ret = workload->prepare(workload);
+ if (ret)
+ goto err;
+ }
+
+ mutex_unlock(&gvt->lock);
+
+ gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
+ ring_id, workload->req);
+
+ i915_add_request_no_flush(rq);
+ workload->dispatched = true;
+ return 0;
+err:
+ workload->status = ret;
+
+ mutex_unlock(&gvt->lock);
+
+ i915_add_request_no_flush(rq);
+ return ret;
+}
+
+static struct intel_vgpu_workload *pick_next_workload(
+ struct intel_gvt *gvt, int ring_id)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ struct intel_vgpu_workload *workload = NULL;
+
+ mutex_lock(&gvt->lock);
+
+ /*
+ * no current vgpu / will be scheduled out / no workload
+ * bail out
+ */
+ if (!scheduler->current_vgpu) {
+ gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
+ goto out;
+ }
+
+ if (scheduler->need_reschedule) {
+ gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
+ goto out;
+ }
+
+ if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) {
+ gvt_dbg_sched("ring id %d stop - no available workload\n",
+ ring_id);
+ goto out;
+ }
+
+ /*
+ * still have current workload, maybe the workload disptacher
+ * fail to submit it for some reason, resubmit it.
+ */
+ if (scheduler->current_workload[ring_id]) {
+ workload = scheduler->current_workload[ring_id];
+ gvt_dbg_sched("ring id %d still have current workload %p\n",
+ ring_id, workload);
+ goto out;
+ }
+
+ /*
+ * pick a workload as current workload
+ * once current workload is set, schedule policy routines
+ * will wait the current workload is finished when trying to
+ * schedule out a vgpu.
+ */
+ scheduler->current_workload[ring_id] = container_of(
+ workload_q_head(scheduler->current_vgpu, ring_id)->next,
+ struct intel_vgpu_workload, list);
+
+ workload = scheduler->current_workload[ring_id];
+
+ gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
+
+ atomic_inc(&workload->vgpu->running_workload_num);
+out:
+ mutex_unlock(&gvt->lock);
+ return workload;
+}
+
+static void update_guest_context(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ shadow_ctx->engine[ring_id].state->obj;
+ struct execlist_ring_context *shadow_ring_context;
+ struct page *page;
+ void *src;
+ unsigned long context_gpa, context_page_num;
+ int i;
+
+ gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
+ workload->ctx_desc.lrca);
+
+ context_page_num = intel_lr_context_size(
+ gvt->dev_priv->engine[ring_id]);
+
+ context_page_num = context_page_num >> PAGE_SHIFT;
+
+ if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ context_page_num = 19;
+
+ i = 2;
+
+ while (i < context_page_num) {
+ context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ (u32)((workload->ctx_desc.lrca + i) <<
+ GTT_PAGE_SHIFT));
+ if (context_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("invalid guest context descriptor\n");
+ return;
+ }
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
+ src = kmap_atomic(page);
+ intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
+ GTT_PAGE_SIZE);
+ kunmap_atomic(src);
+ i++;
+ }
+
+ intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
+ RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+ shadow_ring_context = kmap_atomic(page);
+
+#define COPY_REG(name) \
+ intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
+
+ COPY_REG(ctx_ctrl);
+ COPY_REG(ctx_timestamp);
+
+#undef COPY_REG
+
+ intel_gvt_hypervisor_write_gpa(vgpu,
+ workload->ring_context_gpa +
+ sizeof(*shadow_ring_context),
+ (void *)shadow_ring_context +
+ sizeof(*shadow_ring_context),
+ GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+
+ kunmap_atomic(shadow_ring_context);
+}
+
+static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ struct intel_vgpu_workload *workload;
+ int event;
+
+ mutex_lock(&gvt->lock);
+
+ workload = scheduler->current_workload[ring_id];
+
+ if (!workload->status && !workload->vgpu->resetting) {
+ wait_event(workload->shadow_ctx_status_wq,
+ !atomic_read(&workload->shadow_ctx_active));
+
+ update_guest_context(workload);
+
+ for_each_set_bit(event, workload->pending_events,
+ INTEL_GVT_EVENT_MAX)
+ intel_vgpu_trigger_virtual_event(workload->vgpu,
+ event);
+ }
+
+ gvt_dbg_sched("ring id %d complete workload %p status %d\n",
+ ring_id, workload, workload->status);
+
+ scheduler->current_workload[ring_id] = NULL;
+
+ atomic_dec(&workload->vgpu->running_workload_num);
+
+ list_del_init(&workload->list);
+ workload->complete(workload);
+
+ wake_up(&scheduler->workload_complete_wq);
+ mutex_unlock(&gvt->lock);
+}
+
+struct workload_thread_param {
+ struct intel_gvt *gvt;
+ int ring_id;
+};
+
+static DEFINE_MUTEX(scheduler_mutex);
+
+static int workload_thread(void *priv)
+{
+ struct workload_thread_param *p = (struct workload_thread_param *)priv;
+ struct intel_gvt *gvt = p->gvt;
+ int ring_id = p->ring_id;
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ struct intel_vgpu_workload *workload = NULL;
+ int ret;
+ bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
+
+ kfree(p);
+
+ gvt_dbg_core("workload thread for ring %d started\n", ring_id);
+
+ while (!kthread_should_stop()) {
+ ret = wait_event_interruptible(scheduler->waitq[ring_id],
+ kthread_should_stop() ||
+ (workload = pick_next_workload(gvt, ring_id)));
+
+ WARN_ON_ONCE(ret);
+
+ if (kthread_should_stop())
+ break;
+
+ mutex_lock(&scheduler_mutex);
+
+ gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
+ workload->ring_id, workload,
+ workload->vgpu->id);
+
+ intel_runtime_pm_get(gvt->dev_priv);
+
+ gvt_dbg_sched("ring id %d will dispatch workload %p\n",
+ workload->ring_id, workload);
+
+ if (need_force_wake)
+ intel_uncore_forcewake_get(gvt->dev_priv,
+ FORCEWAKE_ALL);
+
+ mutex_lock(&gvt->dev_priv->drm.struct_mutex);
+ ret = dispatch_workload(workload);
+ mutex_unlock(&gvt->dev_priv->drm.struct_mutex);
+
+ if (ret) {
+ gvt_err("fail to dispatch workload, skip\n");
+ goto complete;
+ }
+
+ gvt_dbg_sched("ring id %d wait workload %p\n",
+ workload->ring_id, workload);
+
+ workload->status = i915_wait_request(workload->req,
+ 0, NULL, NULL);
+ if (workload->status != 0)
+ gvt_err("fail to wait workload, skip\n");
+
+complete:
+ gvt_dbg_sched("will complete workload %p\n, status: %d\n",
+ workload, workload->status);
+
+ mutex_lock(&gvt->dev_priv->drm.struct_mutex);
+ complete_current_workload(gvt, ring_id);
+ mutex_unlock(&gvt->dev_priv->drm.struct_mutex);
+
+ i915_gem_request_put(fetch_and_zero(&workload->req));
+
+ if (need_force_wake)
+ intel_uncore_forcewake_put(gvt->dev_priv,
+ FORCEWAKE_ALL);
+
+ intel_runtime_pm_put(gvt->dev_priv);
+
+ mutex_unlock(&scheduler_mutex);
+
+ }
+ return 0;
+}
+
+void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+
+ if (atomic_read(&vgpu->running_workload_num)) {
+ gvt_dbg_sched("wait vgpu idle\n");
+
+ wait_event(scheduler->workload_complete_wq,
+ !atomic_read(&vgpu->running_workload_num));
+ }
+}
+
+void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ int i;
+
+ gvt_dbg_core("clean workload scheduler\n");
+
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ if (scheduler->thread[i]) {
+ kthread_stop(scheduler->thread[i]);
+ scheduler->thread[i] = NULL;
+ }
+ }
+}
+
+int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
+{
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ struct workload_thread_param *param = NULL;
+ int ret;
+ int i;
+
+ gvt_dbg_core("init workload scheduler\n");
+
+ init_waitqueue_head(&scheduler->workload_complete_wq);
+
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ /* check ring mask at init time */
+ if (!HAS_ENGINE(gvt->dev_priv, i))
+ continue;
+
+ init_waitqueue_head(&scheduler->waitq[i]);
+
+ param = kzalloc(sizeof(*param), GFP_KERNEL);
+ if (!param) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ param->gvt = gvt;
+ param->ring_id = i;
+
+ scheduler->thread[i] = kthread_run(workload_thread, param,
+ "gvt workload %d", i);
+ if (IS_ERR(scheduler->thread[i])) {
+ gvt_err("fail to create workload thread\n");
+ ret = PTR_ERR(scheduler->thread[i]);
+ goto err;
+ }
+ }
+ return 0;
+err:
+ intel_gvt_clean_workload_scheduler(gvt);
+ kfree(param);
+ param = NULL;
+ return ret;
+}
+
+void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
+ &vgpu->shadow_ctx_notifier_block);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ /* a little hacky to mark as ctx closed */
+ vgpu->shadow_ctx->closed = true;
+ i915_gem_context_put(vgpu->shadow_ctx);
+
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
+{
+ atomic_set(&vgpu->running_workload_num, 0);
+
+ vgpu->shadow_ctx = i915_gem_context_create_gvt(
+ &vgpu->gvt->dev_priv->drm);
+ if (IS_ERR(vgpu->shadow_ctx))
+ return PTR_ERR(vgpu->shadow_ctx);
+
+ vgpu->shadow_ctx->engine[RCS].initialised = true;
+
+ vgpu->shadow_ctx_notifier_block.notifier_call =
+ shadow_context_status_change;
+
+ atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
+ &vgpu->shadow_ctx_notifier_block);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
new file mode 100644
index 000000000000..3b30c28bff51
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ * Contributors:
+ * Ping Gao <ping.a.gao@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Chanbin Du <changbin.du@intel.com>
+ * Min He <min.he@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ *
+ */
+
+#ifndef _GVT_SCHEDULER_H_
+#define _GVT_SCHEDULER_H_
+
+struct intel_gvt_workload_scheduler {
+ struct intel_vgpu *current_vgpu;
+ struct intel_vgpu *next_vgpu;
+ struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
+ bool need_reschedule;
+
+ wait_queue_head_t workload_complete_wq;
+ struct task_struct *thread[I915_NUM_ENGINES];
+ wait_queue_head_t waitq[I915_NUM_ENGINES];
+
+ void *sched_data;
+ struct intel_gvt_sched_policy_ops *sched_ops;
+};
+
+#define INDIRECT_CTX_ADDR_MASK 0xffffffc0
+#define INDIRECT_CTX_SIZE_MASK 0x3f
+struct shadow_indirect_ctx {
+ struct drm_i915_gem_object *obj;
+ unsigned long guest_gma;
+ unsigned long shadow_gma;
+ void *shadow_va;
+ uint32_t size;
+};
+
+#define PER_CTX_ADDR_MASK 0xfffff000
+struct shadow_per_ctx {
+ unsigned long guest_gma;
+ unsigned long shadow_gma;
+};
+
+struct intel_shadow_wa_ctx {
+ struct intel_vgpu_workload *workload;
+ struct shadow_indirect_ctx indirect_ctx;
+ struct shadow_per_ctx per_ctx;
+
+};
+
+struct intel_vgpu_workload {
+ struct intel_vgpu *vgpu;
+ int ring_id;
+ struct drm_i915_gem_request *req;
+ /* if this workload has been dispatched to i915? */
+ bool dispatched;
+ int status;
+
+ struct intel_vgpu_mm *shadow_mm;
+
+ /* different submission model may need different handler */
+ int (*prepare)(struct intel_vgpu_workload *);
+ int (*complete)(struct intel_vgpu_workload *);
+ struct list_head list;
+
+ DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
+ void *shadow_ring_buffer_va;
+
+ /* execlist context information */
+ struct execlist_ctx_descriptor_format ctx_desc;
+ struct execlist_ring_context *ring_context;
+ unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
+ bool restore_inhibit;
+ struct intel_vgpu_elsp_dwords elsp_dwords;
+ bool emulate_schedule_in;
+ atomic_t shadow_ctx_active;
+ wait_queue_head_t shadow_ctx_status_wq;
+ u64 ring_context_gpa;
+
+ /* shadow batch buffer */
+ struct list_head shadow_bb;
+ struct intel_shadow_wa_ctx wa_ctx;
+};
+
+/* Intel shadow batch buffer is a i915 gem object */
+struct intel_shadow_bb_entry {
+ struct list_head list;
+ struct drm_i915_gem_object *obj;
+ void *va;
+ unsigned long len;
+ void *bb_start_cmd_va;
+};
+
+#define workload_q_head(vgpu, ring_id) \
+ (&(vgpu->workload_q_head[ring_id]))
+
+#define queue_workload(workload) do { \
+ list_add_tail(&workload->list, \
+ workload_q_head(workload->vgpu, workload->ring_id)); \
+ wake_up(&workload->vgpu->gvt-> \
+ scheduler.waitq[workload->ring_id]); \
+} while (0)
+
+int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
+
+void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
+
+void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
+
+int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
+
+void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
new file mode 100644
index 000000000000..53a2d10cf3f1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -0,0 +1,286 @@
+/*
+ * Copyright © 2011-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#if !defined(_GVT_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _GVT_TRACE_H_
+
+#include <linux/types.h>
+#include <linux/stringify.h>
+#include <linux/tracepoint.h>
+#include <asm/tsc.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gvt
+
+TRACE_EVENT(spt_alloc,
+ TP_PROTO(int id, void *spt, int type, unsigned long mfn,
+ unsigned long gpt_gfn),
+
+ TP_ARGS(id, spt, type, mfn, gpt_gfn),
+
+ TP_STRUCT__entry(
+ __field(int, id)
+ __field(void *, spt)
+ __field(int, type)
+ __field(unsigned long, mfn)
+ __field(unsigned long, gpt_gfn)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->spt = spt;
+ __entry->type = type;
+ __entry->mfn = mfn;
+ __entry->gpt_gfn = gpt_gfn;
+ ),
+
+ TP_printk("VM%d [alloc] spt %p type %d mfn 0x%lx gfn 0x%lx\n",
+ __entry->id,
+ __entry->spt,
+ __entry->type,
+ __entry->mfn,
+ __entry->gpt_gfn)
+);
+
+TRACE_EVENT(spt_free,
+ TP_PROTO(int id, void *spt, int type),
+
+ TP_ARGS(id, spt, type),
+
+ TP_STRUCT__entry(
+ __field(int, id)
+ __field(void *, spt)
+ __field(int, type)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->spt = spt;
+ __entry->type = type;
+ ),
+
+ TP_printk("VM%u [free] spt %p type %d\n",
+ __entry->id,
+ __entry->spt,
+ __entry->type)
+);
+
+#define MAX_BUF_LEN 256
+
+TRACE_EVENT(gma_index,
+ TP_PROTO(const char *prefix, unsigned long gma,
+ unsigned long index),
+
+ TP_ARGS(prefix, gma, index),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "%s gma 0x%lx index 0x%lx\n", prefix, gma, index);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(gma_translate,
+ TP_PROTO(int id, char *type, int ring_id, int pt_level,
+ unsigned long gma, unsigned long gpa),
+
+ TP_ARGS(id, type, ring_id, pt_level, gma, gpa),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d %s ring %d pt_level %d gma 0x%lx -> gpa 0x%lx\n",
+ id, type, ring_id, pt_level, gma, gpa);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(spt_refcount,
+ TP_PROTO(int id, char *action, void *spt, int before, int after),
+
+ TP_ARGS(id, action, spt, before, after),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [%s] spt %p before %d -> after %d\n",
+ id, action, spt, before, after);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(spt_change,
+ TP_PROTO(int id, char *action, void *spt, unsigned long gfn,
+ int type),
+
+ TP_ARGS(id, action, spt, gfn, type),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [%s] spt %p gfn 0x%lx type %d\n",
+ id, action, spt, gfn, type);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(gpt_change,
+ TP_PROTO(int id, const char *tag, void *spt, int type, u64 v,
+ unsigned long index),
+
+ TP_ARGS(id, tag, spt, type, v, index),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [%s] spt %p type %d entry 0x%llx index 0x%lx\n",
+ id, tag, spt, type, v, index);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(oos_change,
+ TP_PROTO(int id, const char *tag, int page_id, void *gpt, int type),
+
+ TP_ARGS(id, tag, page_id, gpt, type),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [oos %s] page id %d gpt %p type %d\n",
+ id, tag, page_id, gpt, type);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+TRACE_EVENT(oos_sync,
+ TP_PROTO(int id, int page_id, void *gpt, int type, u64 v,
+ unsigned long index),
+
+ TP_ARGS(id, page_id, gpt, type, v, index),
+
+ TP_STRUCT__entry(
+ __array(char, buf, MAX_BUF_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->buf, MAX_BUF_LEN,
+ "VM%d [oos sync] page id %d gpt %p type %d entry 0x%llx index 0x%lx\n",
+ id, page_id, gpt, type, v, index);
+ ),
+
+ TP_printk("%s", __entry->buf)
+);
+
+#define MAX_CMD_STR_LEN 256
+TRACE_EVENT(gvt_command,
+ TP_PROTO(u8 vm_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len, bool ring_buffer_cmd, cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler),
+
+ TP_ARGS(vm_id, ring_id, ip_gma, cmd_va, cmd_len, ring_buffer_cmd, cost_pre_cmd_handler, cost_cmd_handler),
+
+ TP_STRUCT__entry(
+ __field(u8, vm_id)
+ __field(u8, ring_id)
+ __field(int, i)
+ __array(char, tmp_buf, MAX_CMD_STR_LEN)
+ __array(char, cmd_str, MAX_CMD_STR_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vm_id = vm_id;
+ __entry->ring_id = ring_id;
+ __entry->cmd_str[0] = '\0';
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "VM(%d) Ring(%d): %s ip(%08x) pre handler cost (%llu), handler cost (%llu) ", vm_id, ring_id, ring_buffer_cmd ? "RB":"BB", ip_gma, cost_pre_cmd_handler, cost_cmd_handler);
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ entry->i = 0;
+ while (cmd_len > 0) {
+ if (cmd_len >= 8) {
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x %08x %08x %08x %08x ",
+ cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3],
+ cmd_va[__entry->i+4], cmd_va[__entry->i+5], cmd_va[__entry->i+6], cmd_va[__entry->i+7]);
+ __entry->i += 8;
+ cmd_len -= 8;
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ } else if (cmd_len >= 4) {
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x ",
+ cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3]);
+ __entry->i += 4;
+ cmd_len -= 4;
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ } else if (cmd_len >= 2) {
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x ", cmd_va[__entry->i], cmd_va[__entry->i+1]);
+ __entry->i += 2;
+ cmd_len -= 2;
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ } else if (cmd_len == 1) {
+ snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x ", cmd_va[__entry->i]);
+ __entry->i += 1;
+ cmd_len -= 1;
+ strcat(__entry->cmd_str, __entry->tmp_buf);
+ }
+ }
+ strcat(__entry->cmd_str, "\n");
+ ),
+
+ TP_printk("%s", __entry->cmd_str)
+);
+#endif /* _GVT_TRACE_H_ */
+
+/* This part must be out of protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/gvt/trace_points.c b/drivers/gpu/drm/i915/gvt/trace_points.c
new file mode 100644
index 000000000000..a3deed692b9c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/trace_points.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Jike Song <jike.song@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ *
+ */
+
+#include "trace.h"
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
new file mode 100644
index 000000000000..9401436d721f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Ping Gao <ping.a.gao@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
+
+static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
+{
+ vfree(vgpu->mmio.vreg);
+ vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
+}
+
+static int setup_vgpu_mmio(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+
+ vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
+ if (!vgpu->mmio.vreg)
+ return -ENOMEM;
+
+ vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
+
+ memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
+ memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
+
+ vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
+
+ /* set the bit 0:2(Core C-State ) to C0 */
+ vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+ return 0;
+}
+
+static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
+ struct intel_vgpu_creation_params *param)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+ u16 *gmch_ctl;
+ int i;
+
+ memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
+ info->cfg_space_size);
+
+ if (!param->primary) {
+ vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
+ INTEL_GVT_PCI_CLASS_VGA_OTHER;
+ vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
+ INTEL_GVT_PCI_CLASS_VGA_OTHER;
+ }
+
+ /* Show guest that there isn't any stolen memory.*/
+ gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
+ *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
+
+ intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
+ gvt_aperture_pa_base(gvt), true);
+
+ vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
+ | PCI_COMMAND_MEMORY
+ | PCI_COMMAND_MASTER);
+ /*
+ * Clear the bar upper 32bit and let guest to assign the new value
+ */
+ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
+ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
+
+ for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
+ vgpu->cfg_space.bar[i].size = pci_resource_len(
+ gvt->dev_priv->drm.pdev, i * 2);
+ vgpu->cfg_space.bar[i].tracked = false;
+ }
+}
+
+static void populate_pvinfo_page(struct intel_vgpu *vgpu)
+{
+ /* setup the ballooning information */
+ vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
+ vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1;
+ vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
+ vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
+ vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
+ vgpu_aperture_gmadr_base(vgpu);
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
+ vgpu_aperture_sz(vgpu);
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
+ vgpu_hidden_gmadr_base(vgpu);
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
+ vgpu_hidden_sz(vgpu);
+
+ vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
+
+ gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
+ gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
+ vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
+ gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
+ vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
+ gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
+
+ WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
+}
+
+/**
+ * intel_gvt_destroy_vgpu - destroy a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to destroy a virtual GPU.
+ *
+ */
+void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+
+ mutex_lock(&gvt->lock);
+
+ vgpu->active = false;
+ idr_remove(&gvt->vgpu_idr, vgpu->id);
+
+ if (atomic_read(&vgpu->running_workload_num)) {
+ mutex_unlock(&gvt->lock);
+ intel_gvt_wait_vgpu_idle(vgpu);
+ mutex_lock(&gvt->lock);
+ }
+
+ intel_vgpu_stop_schedule(vgpu);
+ intel_vgpu_clean_sched_policy(vgpu);
+ intel_vgpu_clean_gvt_context(vgpu);
+ intel_vgpu_clean_execlist(vgpu);
+ intel_vgpu_clean_display(vgpu);
+ intel_vgpu_clean_opregion(vgpu);
+ intel_vgpu_clean_gtt(vgpu);
+ intel_gvt_hypervisor_detach_vgpu(vgpu);
+ intel_vgpu_free_resource(vgpu);
+ clean_vgpu_mmio(vgpu);
+ vfree(vgpu);
+
+ mutex_unlock(&gvt->lock);
+}
+
+/**
+ * intel_gvt_create_vgpu - create a virtual GPU
+ * @gvt: GVT device
+ * @param: vGPU creation parameters
+ *
+ * This function is called when user wants to create a virtual GPU.
+ *
+ * Returns:
+ * pointer to intel_vgpu, error pointer if failed.
+ */
+struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
+ struct intel_vgpu_creation_params *param)
+{
+ struct intel_vgpu *vgpu;
+ int ret;
+
+ gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
+ param->handle, param->low_gm_sz, param->high_gm_sz,
+ param->fence_sz);
+
+ vgpu = vzalloc(sizeof(*vgpu));
+ if (!vgpu)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&gvt->lock);
+
+ ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL);
+ if (ret < 0)
+ goto out_free_vgpu;
+
+ vgpu->id = ret;
+ vgpu->handle = param->handle;
+ vgpu->gvt = gvt;
+ bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
+
+ setup_vgpu_cfg_space(vgpu, param);
+
+ ret = setup_vgpu_mmio(vgpu);
+ if (ret)
+ goto out_free_vgpu;
+
+ ret = intel_vgpu_alloc_resource(vgpu, param);
+ if (ret)
+ goto out_clean_vgpu_mmio;
+
+ populate_pvinfo_page(vgpu);
+
+ ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
+ if (ret)
+ goto out_clean_vgpu_resource;
+
+ ret = intel_vgpu_init_gtt(vgpu);
+ if (ret)
+ goto out_detach_hypervisor_vgpu;
+
+ if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
+ ret = intel_vgpu_init_opregion(vgpu, 0);
+ if (ret)
+ goto out_clean_gtt;
+ }
+
+ ret = intel_vgpu_init_display(vgpu);
+ if (ret)
+ goto out_clean_opregion;
+
+ ret = intel_vgpu_init_execlist(vgpu);
+ if (ret)
+ goto out_clean_display;
+
+ ret = intel_vgpu_init_gvt_context(vgpu);
+ if (ret)
+ goto out_clean_execlist;
+
+ ret = intel_vgpu_init_sched_policy(vgpu);
+ if (ret)
+ goto out_clean_shadow_ctx;
+
+ vgpu->active = true;
+ mutex_unlock(&gvt->lock);
+
+ return vgpu;
+
+out_clean_shadow_ctx:
+ intel_vgpu_clean_gvt_context(vgpu);
+out_clean_execlist:
+ intel_vgpu_clean_execlist(vgpu);
+out_clean_display:
+ intel_vgpu_clean_display(vgpu);
+out_clean_opregion:
+ intel_vgpu_clean_opregion(vgpu);
+out_clean_gtt:
+ intel_vgpu_clean_gtt(vgpu);
+out_detach_hypervisor_vgpu:
+ intel_gvt_hypervisor_detach_vgpu(vgpu);
+out_clean_vgpu_resource:
+ intel_vgpu_free_resource(vgpu);
+out_clean_vgpu_mmio:
+ clean_vgpu_mmio(vgpu);
+out_free_vgpu:
+ vfree(vgpu);
+ mutex_unlock(&gvt->lock);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 70980f82a15b..f191d7b66b1d 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1308,10 +1308,11 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
bool active = false;
/* If the command parser is not enabled, report 0 - unsupported */
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
if (intel_engine_needs_cmd_parser(engine)) {
active = true;
break;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 27b0e34dadec..20638d22bbad 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -79,10 +79,8 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
-#define SEP_SEMICOLON ;
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
+ DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
-#undef SEP_SEMICOLON
return 0;
}
@@ -109,7 +107,7 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
static char get_global_flag(struct drm_i915_gem_object *obj)
{
- return i915_gem_object_to_ggtt(obj, NULL) ? 'g' : ' ';
+ return obj->fault_mappable ? 'g' : ' ';
}
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
@@ -152,7 +150,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain);
- for_each_engine_id(engine, dev_priv, id)
+ for_each_engine(engine, dev_priv, id)
seq_printf(m, "%x ",
i915_gem_active_get_seqno(&obj->last_read[id],
&obj->base.dev->struct_mutex));
@@ -188,15 +186,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
}
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
- if (obj->pin_display || obj->fault_mappable) {
- char s[3], *t = s;
- if (obj->pin_display)
- *t++ = 'p';
- if (obj->fault_mappable)
- *t++ = 'f';
- *t = '\0';
- seq_printf(m, " (%s mappable)", s);
- }
engine = i915_gem_active_get_engine(&obj->last_write,
&dev_priv->drm.struct_mutex);
@@ -334,11 +323,12 @@ static void print_batch_pool_stats(struct seq_file *m,
struct drm_i915_gem_object *obj;
struct file_stats stats;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int j;
memset(&stats, 0, sizeof(stats));
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
list_for_each_entry(obj,
&engine->batch_pool.cache_list[j],
@@ -402,7 +392,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
if (ret)
return ret;
- seq_printf(m, "%u objects, %zu bytes\n",
+ seq_printf(m, "%u objects, %llu bytes\n",
dev_priv->mm.object_count,
dev_priv->mm.object_memory);
@@ -607,6 +597,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
struct drm_device *dev = &dev_priv->drm;
struct drm_i915_gem_object *obj;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int total = 0;
int ret, j;
@@ -614,7 +605,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
if (ret)
return ret;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
int count;
@@ -645,12 +636,30 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
return 0;
}
+static void print_request(struct seq_file *m,
+ struct drm_i915_gem_request *rq,
+ const char *prefix)
+{
+ struct pid *pid = rq->ctx->pid;
+ struct task_struct *task;
+
+ rcu_read_lock();
+ task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
+ seq_printf(m, "%s%x [%x:%x] @ %d: %s [%d]\n", prefix,
+ rq->fence.seqno, rq->ctx->hw_id, rq->fence.seqno,
+ jiffies_to_msecs(jiffies - rq->emitted_jiffies),
+ task ? task->comm : "<unknown>",
+ task ? task->pid : -1);
+ rcu_read_unlock();
+}
+
static int i915_gem_request_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
- struct intel_engine_cs *engine;
struct drm_i915_gem_request *req;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret, any;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -658,7 +667,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
return ret;
any = 0;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
int count;
count = 0;
@@ -668,19 +677,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
continue;
seq_printf(m, "%s requests: %d\n", engine->name, count);
- list_for_each_entry(req, &engine->request_list, link) {
- struct pid *pid = req->ctx->pid;
- struct task_struct *task;
-
- rcu_read_lock();
- task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
- seq_printf(m, " %x @ %d: %s [%d]\n",
- req->fence.seqno,
- (int) (jiffies - req->emitted_jiffies),
- task ? task->comm : "<unknown>",
- task ? task->pid : -1);
- rcu_read_unlock();
- }
+ list_for_each_entry(req, &engine->request_list, link)
+ print_request(m, req, " ");
any++;
}
@@ -715,8 +713,9 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_ring_seqno_info(m, engine);
return 0;
@@ -727,6 +726,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int i, pipe;
intel_runtime_pm_get(dev_priv);
@@ -895,7 +895,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
}
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
if (INTEL_GEN(dev_priv) >= 6) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
@@ -943,7 +943,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
const u32 *hws;
int i;
- engine = &dev_priv->engine[(uintptr_t)node->info_ent->data];
+ engine = dev_priv->engine[(uintptr_t)node->info_ent->data];
hws = engine->status_page.page_addr;
if (hws == NULL)
return 0;
@@ -956,6 +956,8 @@ static int i915_hws_info(struct seq_file *m, void *data)
return 0;
}
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
static ssize_t
i915_error_state_write(struct file *filp,
const char __user *ubuf,
@@ -1038,6 +1040,8 @@ static const struct file_operations i915_error_state_fops = {
.release = i915_error_state_release,
};
+#endif
+
static int
i915_next_seqno_get(void *data, u64 *val)
{
@@ -1277,15 +1281,42 @@ out:
return ret;
}
+static void i915_instdone_info(struct drm_i915_private *dev_priv,
+ struct seq_file *m,
+ struct intel_instdone *instdone)
+{
+ int slice;
+ int subslice;
+
+ seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
+ instdone->instdone);
+
+ if (INTEL_GEN(dev_priv) <= 3)
+ return;
+
+ seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
+ instdone->slice_common);
+
+ if (INTEL_GEN(dev_priv) <= 6)
+ return;
+
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+ seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice, instdone->sampler[slice][subslice]);
+
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+ seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice, instdone->row[slice][subslice]);
+}
+
static int i915_hangcheck_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
u64 acthd[I915_NUM_ENGINES];
u32 seqno[I915_NUM_ENGINES];
- u32 instdone[I915_NUM_INSTDONE_REG];
+ struct intel_instdone instdone;
enum intel_engine_id id;
- int j;
if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
seq_printf(m, "Wedged\n");
@@ -1303,12 +1334,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
acthd[id] = intel_engine_get_active_head(engine);
seqno[id] = intel_engine_get_seqno(engine);
}
- i915_get_extra_instdone(dev_priv, instdone);
+ intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
intel_runtime_pm_put(dev_priv);
@@ -1319,7 +1350,10 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
} else
seq_printf(m, "Hangcheck inactive\n");
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct rb_node *rb;
+
seq_printf(m, "%s:\n", engine->name);
seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
engine->hangcheck.seqno,
@@ -1329,6 +1363,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
yesno(intel_engine_has_waiter(engine)),
yesno(test_bit(engine->id,
&dev_priv->gpu_error.missed_irq_rings)));
+ spin_lock(&b->lock);
+ for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+ struct intel_wait *w = container_of(rb, typeof(*w), node);
+
+ seq_printf(m, "\t%s [%d] waiting for %x\n",
+ w->tsk->comm, w->tsk->pid, w->seqno);
+ }
+ spin_unlock(&b->lock);
+
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd,
(long long)acthd[id]);
@@ -1336,18 +1379,14 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
if (engine->id == RCS) {
- seq_puts(m, "\tinstdone read =");
+ seq_puts(m, "\tinstdone read =\n");
- for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
- seq_printf(m, " 0x%08x", instdone[j]);
+ i915_instdone_info(dev_priv, m, &instdone);
- seq_puts(m, "\n\tinstdone accu =");
+ seq_puts(m, "\tinstdone accu =\n");
- for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
- seq_printf(m, " 0x%08x",
- engine->hangcheck.instdone[j]);
-
- seq_puts(m, "\n");
+ i915_instdone_info(dev_priv, m,
+ &engine->hangcheck.instdone);
}
}
@@ -1635,7 +1674,8 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
seq_printf(m, "FBC disabled: %s\n",
dev_priv->fbc.no_fbc_reason);
- if (INTEL_GEN(dev_priv) >= 7)
+ if (intel_fbc_is_active(dev_priv) &&
+ INTEL_GEN(dev_priv) >= 7)
seq_printf(m, "Compressing: %s\n",
yesno(I915_READ(FBC_STATUS2) &
FBC_COMPRESSION_MASK));
@@ -1909,6 +1949,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
+ enum intel_engine_id id;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1935,7 +1976,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, '\n');
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
struct intel_context *ce = &ctx->engine[engine->id];
seq_printf(m, "%s: ", engine->name);
@@ -2002,6 +2043,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
+ enum intel_engine_id id;
int ret;
if (!i915.enable_execlists) {
@@ -2014,7 +2056,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link)
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex);
@@ -2022,84 +2064,6 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
return 0;
}
-static int i915_execlists(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_engine_cs *engine;
- u32 status_pointer;
- u8 read_pointer;
- u8 write_pointer;
- u32 status;
- u32 ctx_id;
- struct list_head *cursor;
- int i, ret;
-
- if (!i915.enable_execlists) {
- seq_puts(m, "Logical Ring Contexts are disabled\n");
- return 0;
- }
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- intel_runtime_pm_get(dev_priv);
-
- for_each_engine(engine, dev_priv) {
- struct drm_i915_gem_request *head_req = NULL;
- int count = 0;
-
- seq_printf(m, "%s\n", engine->name);
-
- status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
- ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine));
- seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
- status, ctx_id);
-
- status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
- seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
-
- read_pointer = GEN8_CSB_READ_PTR(status_pointer);
- write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
- if (read_pointer > write_pointer)
- write_pointer += GEN8_CSB_ENTRIES;
- seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
- read_pointer, write_pointer);
-
- for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
- status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i));
- ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i));
-
- seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
- i, status, ctx_id);
- }
-
- spin_lock_bh(&engine->execlist_lock);
- list_for_each(cursor, &engine->execlist_queue)
- count++;
- head_req = list_first_entry_or_null(&engine->execlist_queue,
- struct drm_i915_gem_request,
- execlist_link);
- spin_unlock_bh(&engine->execlist_lock);
-
- seq_printf(m, "\t%d requests in queue\n", count);
- if (head_req) {
- seq_printf(m, "\tHead request context: %u\n",
- head_req->ctx->hw_id);
- seq_printf(m, "\tHead request tail: %u\n",
- head_req->tail);
- }
-
- seq_putc(m, '\n');
- }
-
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static const char *swizzle_string(unsigned swizzle)
{
switch (swizzle) {
@@ -2201,14 +2165,15 @@ static int per_file_ctx(int id, void *ptr, void *data)
static void gen8_ppgtt_info(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
- struct intel_engine_cs *engine;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int i;
if (!ppgtt)
return;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
seq_printf(m, "%s\n", engine->name);
for (i = 0; i < 4; i++) {
u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
@@ -2223,11 +2188,12 @@ static void gen6_ppgtt_info(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
if (IS_GEN6(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
seq_printf(m, "%s\n", engine->name);
if (IS_GEN7(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n",
@@ -2296,9 +2262,10 @@ out_unlock:
static int count_irq_waiters(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int count = 0;
- for_each_engine(engine, i915)
+ for_each_engine(engine, i915, id)
count += intel_engine_has_waiter(engine);
return count;
@@ -2461,7 +2428,7 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
seq_printf(m, "\tLast submission result: %d\n", client->retcode);
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
u64 submissions = client->submissions[id];
tot += submissions;
seq_printf(m, "\tSubmissions: %llu %s\n",
@@ -2504,7 +2471,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
seq_printf(m, "\nGuC submissions:\n");
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
u64 submissions = guc.submissions[id];
total += submissions;
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
@@ -3121,6 +3088,134 @@ static int i915_display_info(struct seq_file *m, void *unused)
return 0;
}
+static int i915_engine_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, dev_priv, id) {
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct drm_i915_gem_request *rq;
+ struct rb_node *rb;
+ u64 addr;
+
+ seq_printf(m, "%s\n", engine->name);
+ seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [score %d]\n",
+ intel_engine_get_seqno(engine),
+ engine->last_submitted_seqno,
+ engine->hangcheck.seqno,
+ engine->hangcheck.score);
+
+ rcu_read_lock();
+
+ seq_printf(m, "\tRequests:\n");
+
+ rq = list_first_entry(&engine->request_list,
+ struct drm_i915_gem_request, link);
+ if (&rq->link != &engine->request_list)
+ print_request(m, rq, "\t\tfirst ");
+
+ rq = list_last_entry(&engine->request_list,
+ struct drm_i915_gem_request, link);
+ if (&rq->link != &engine->request_list)
+ print_request(m, rq, "\t\tlast ");
+
+ rq = i915_gem_find_active_request(engine);
+ if (rq) {
+ print_request(m, rq, "\t\tactive ");
+ seq_printf(m,
+ "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
+ rq->head, rq->postfix, rq->tail,
+ rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+ rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+ }
+
+ seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
+ I915_READ(RING_START(engine->mmio_base)),
+ rq ? i915_ggtt_offset(rq->ring->vma) : 0);
+ seq_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
+ I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
+ rq ? rq->ring->head : 0);
+ seq_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
+ I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
+ rq ? rq->ring->tail : 0);
+ seq_printf(m, "\tRING_CTL: 0x%08x [%s]\n",
+ I915_READ(RING_CTL(engine->mmio_base)),
+ I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
+
+ rcu_read_unlock();
+
+ addr = intel_engine_get_active_head(engine);
+ seq_printf(m, "\tACTHD: 0x%08x_%08x\n",
+ upper_32_bits(addr), lower_32_bits(addr));
+ addr = intel_engine_get_last_batch_head(engine);
+ seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
+ upper_32_bits(addr), lower_32_bits(addr));
+
+ if (i915.enable_execlists) {
+ u32 ptr, read, write;
+
+ seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
+ I915_READ(RING_EXECLIST_STATUS_LO(engine)),
+ I915_READ(RING_EXECLIST_STATUS_HI(engine)));
+
+ ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
+ read = GEN8_CSB_READ_PTR(ptr);
+ write = GEN8_CSB_WRITE_PTR(ptr);
+ seq_printf(m, "\tExeclist CSB read %d, write %d\n",
+ read, write);
+ if (read >= GEN8_CSB_ENTRIES)
+ read = 0;
+ if (write >= GEN8_CSB_ENTRIES)
+ write = 0;
+ if (read > write)
+ write += GEN8_CSB_ENTRIES;
+ while (read < write) {
+ unsigned int idx = ++read % GEN8_CSB_ENTRIES;
+
+ seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
+ idx,
+ I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
+ I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
+ }
+
+ rcu_read_lock();
+ rq = READ_ONCE(engine->execlist_port[0].request);
+ if (rq)
+ print_request(m, rq, "\t\tELSP[0] ");
+ else
+ seq_printf(m, "\t\tELSP[0] idle\n");
+ rq = READ_ONCE(engine->execlist_port[1].request);
+ if (rq)
+ print_request(m, rq, "\t\tELSP[1] ");
+ else
+ seq_printf(m, "\t\tELSP[1] idle\n");
+ rcu_read_unlock();
+ } else if (INTEL_GEN(dev_priv) > 6) {
+ seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
+ I915_READ(RING_PP_DIR_BASE(engine)));
+ seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
+ I915_READ(RING_PP_DIR_BASE_READ(engine)));
+ seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
+ I915_READ(RING_PP_DIR_DCLV(engine)));
+ }
+
+ spin_lock(&b->lock);
+ for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+ struct intel_wait *w = container_of(rb, typeof(*w), node);
+
+ seq_printf(m, "\t%s [%d] waiting for %x\n",
+ w->tsk->comm, w->tsk->pid, w->seqno);
+ }
+ spin_unlock(&b->lock);
+
+ seq_puts(m, "\n");
+ }
+
+ return 0;
+}
+
static int i915_semaphore_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -3147,7 +3242,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
seqno = (uint64_t *)kmap_atomic(page);
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id) {
uint64_t offset;
seq_printf(m, "%s\n", engine->name);
@@ -3172,7 +3267,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
kunmap_atomic(seqno);
} else {
seq_puts(m, " Last signal:");
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
for (j = 0; j < num_rings; j++)
seq_printf(m, "0x%08x\n",
I915_READ(engine->semaphore.mbox.signal[j]));
@@ -3180,7 +3275,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
}
seq_puts(m, "\nSync seqno:\n");
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
for (j = 0; j < num_rings; j++)
seq_printf(m, " 0x%08x ",
engine->semaphore.sync_seqno[j]);
@@ -3236,7 +3331,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
- for_each_engine_id(engine, dev_priv, id)
+ for_each_engine(engine, dev_priv, id)
seq_printf(m, "HW whitelist count for %s: %d\n",
engine->name, workarounds->hw_whitelist_count[id]);
for (i = 0; i < workarounds->count; ++i) {
@@ -3941,10 +4036,9 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
ret = drm_atomic_commit(state);
out:
- drm_modeset_unlock_all(dev);
WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
- if (ret)
- drm_atomic_state_free(state);
+ drm_modeset_unlock_all(dev);
+ drm_atomic_state_put(state);
}
static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
@@ -4463,7 +4557,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
else if (IS_VALLEYVIEW(dev_priv))
num_levels = 1;
else
- num_levels = ilk_wm_max_level(dev) + 1;
+ num_levels = ilk_wm_max_level(dev_priv) + 1;
drm_modeset_lock_all(dev);
@@ -4579,7 +4673,7 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
else if (IS_VALLEYVIEW(dev_priv))
num_levels = 1;
else
- num_levels = ilk_wm_max_level(dev) + 1;
+ num_levels = ilk_wm_max_level(dev_priv) + 1;
if (len >= sizeof(tmp))
return -EINVAL;
@@ -5275,7 +5369,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
{"i915_dump_lrc", i915_dump_lrc, 0},
- {"i915_execlists", i915_execlists, 0},
{"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
@@ -5287,6 +5380,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_dmc_info", i915_dmc_info, 0},
{"i915_display_info", i915_display_info, 0},
+ {"i915_engine_info", i915_engine_info, 0},
{"i915_semaphore_status", i915_semaphore_status, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
@@ -5309,7 +5403,9 @@ static const struct i915_debugfs_files {
{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
{"i915_ring_test_irq", &i915_ring_test_irq_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
{"i915_error_state", &i915_error_state_fops},
+#endif
{"i915_next_seqno", &i915_next_seqno_fops},
{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 18dfdd5c1b3b..912d5348e3e7 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -114,7 +114,7 @@ static bool i915_error_injected(struct drm_i915_private *dev_priv)
fmt, ##__VA_ARGS__)
-static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
+static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
{
enum intel_pch ret = PCH_NOP;
@@ -125,16 +125,16 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
* make an educated guess as to which PCH is really there.
*/
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
ret = PCH_IBX;
DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
- } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+ } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
ret = PCH_CPT;
DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
ret = PCH_LPT;
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
- } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
ret = PCH_SPT;
DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
}
@@ -174,40 +174,46 @@ static void intel_detect_pch(struct drm_device *dev)
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
- WARN_ON(!IS_GEN5(dev));
+ WARN_ON(!IS_GEN5(dev_priv));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
- WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+ WARN_ON(!(IS_GEN6(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv)));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
- WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+ WARN_ON(!(IS_GEN6(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
- WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
+ WARN_ON(!IS_HASWELL(dev_priv) &&
+ !IS_BROADWELL(dev_priv));
+ WARN_ON(IS_HSW_ULT(dev_priv) ||
+ IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
- WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
+ WARN_ON(!IS_HASWELL(dev_priv) &&
+ !IS_BROADWELL(dev_priv));
+ WARN_ON(!IS_HSW_ULT(dev_priv) &&
+ !IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
- WARN_ON(!IS_SKYLAKE(dev) &&
- !IS_KABYLAKE(dev));
+ WARN_ON(!IS_SKYLAKE(dev_priv) &&
+ !IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
- WARN_ON(!IS_SKYLAKE(dev) &&
- !IS_KABYLAKE(dev));
+ WARN_ON(!IS_SKYLAKE(dev_priv) &&
+ !IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_KBP;
DRM_DEBUG_KMS("Found KabyPoint PCH\n");
- WARN_ON(!IS_KABYLAKE(dev));
+ WARN_ON(!IS_KABYLAKE(dev_priv));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
@@ -215,7 +221,8 @@ static void intel_detect_pch(struct drm_device *dev)
PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
pch->subsystem_device ==
PCI_SUBDEVICE_ID_QEMU)) {
- dev_priv->pch_type = intel_virt_detect_pch(dev);
+ dev_priv->pch_type =
+ intel_virt_detect_pch(dev_priv);
} else
continue;
@@ -255,16 +262,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = dev_priv->overlay ? 1 : 0;
break;
case I915_PARAM_HAS_BSD:
- value = intel_engine_initialized(&dev_priv->engine[VCS]);
+ value = !!dev_priv->engine[VCS];
break;
case I915_PARAM_HAS_BLT:
- value = intel_engine_initialized(&dev_priv->engine[BCS]);
+ value = !!dev_priv->engine[BCS];
break;
case I915_PARAM_HAS_VEBOX:
- value = intel_engine_initialized(&dev_priv->engine[VECS]);
+ value = !!dev_priv->engine[VECS];
break;
case I915_PARAM_HAS_BSD2:
- value = intel_engine_initialized(&dev_priv->engine[VCS2]);
+ value = !!dev_priv->engine[VCS2];
break;
case I915_PARAM_HAS_EXEC_CONSTANTS:
value = INTEL_GEN(dev_priv) >= 4;
@@ -417,12 +424,12 @@ intel_setup_mchbar(struct drm_device *dev)
u32 temp;
bool enabled;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return;
dev_priv->mchbar_need_disable = false;
- if (IS_I915G(dev) || IS_I915GM(dev)) {
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
enabled = !!(temp & DEVEN_MCHBAR_EN);
} else {
@@ -440,7 +447,7 @@ intel_setup_mchbar(struct drm_device *dev)
dev_priv->mchbar_need_disable = true;
/* Space is allocated or reserved, so enable it. */
- if (IS_I915G(dev) || IS_I915GM(dev)) {
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
temp | DEVEN_MCHBAR_EN);
} else {
@@ -456,7 +463,7 @@ intel_teardown_mchbar(struct drm_device *dev)
int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
if (dev_priv->mchbar_need_disable) {
- if (IS_I915G(dev) || IS_I915GM(dev)) {
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
u32 deven_val;
pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
@@ -532,32 +539,6 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
static void i915_gem_fini(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- /*
- * Neither the BIOS, ourselves or any other kernel
- * expects the system to be in execlists mode on startup,
- * so we need to reset the GPU back to legacy mode. And the only
- * known way to disable logical contexts is through a GPU reset.
- *
- * So in order to leave the system in a known default configuration,
- * always reset the GPU upon unload. Afterwards we then clean up the
- * GEM state tracking, flushing off the requests and leaving the
- * system in a known idle state.
- *
- * Note that is of the upmost importance that the GPU is idle and
- * all stray writes are flushed *before* we dismantle the backing
- * storage for the pinned objects.
- *
- * However, since we are uncertain that reseting the GPU on older
- * machines is a good idea, we don't - just in case it leaves the
- * machine in an unusable condition.
- */
- if (HAS_HW_CONTEXTS(dev)) {
- int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
- WARN_ON(reset && reset != -ENODEV);
- }
-
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_engines(dev);
i915_gem_context_fini(dev);
@@ -636,6 +617,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0;
cleanup_gem:
+ if (i915_gem_suspend(dev))
+ DRM_ERROR("failed to idle hardware; continuing to unload!\n");
i915_gem_fini(dev);
cleanup_irq:
intel_guc_fini(dev);
@@ -771,6 +754,19 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
destroy_workqueue(dev_priv->wq);
}
+/*
+ * We don't keep the workarounds for pre-production hardware, so we expect our
+ * driver to fail on these machines in one way or another. A little warning on
+ * dmesg may help both the user and the bug triagers.
+ */
+static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
+{
+ if (IS_HSW_EARLY_SDV(dev_priv) ||
+ IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
+ DRM_ERROR("This is a pre-production stepping. "
+ "It may not be fully functional.\n");
+}
+
/**
* i915_driver_init_early - setup state not requiring device access
* @dev_priv: device private
@@ -838,13 +834,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_device_info_dump(dev_priv);
- /* Not all pre-production machines fall into this category, only the
- * very first ones. Almost everything should work, except for maybe
- * suspend/resume. And we don't implement workarounds that affect only
- * pre-production machines. */
- if (IS_HSW_EARLY_SDV(dev_priv))
- DRM_INFO("This is an early pre-production Haswell machine. "
- "It may not be fully functional.\n");
+ intel_detect_preproduction_hw(dev_priv);
return 0;
@@ -870,7 +860,7 @@ static int i915_mmio_setup(struct drm_device *dev)
int mmio_bar;
int mmio_size;
- mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
/*
* Before gen4, the registers and the GTT are behind different BARs.
* However, from gen4 onwards, the registers and the GTT are shared
@@ -1023,7 +1013,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
/* overlay on gen2 is broken and can't address above 1G */
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
@@ -1070,7 +1060,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* be lost or delayed, but we use them anyways to avoid
* stuck interrupts on some machines.
*/
- if (!IS_I945G(dev) && !IS_I945GM(dev)) {
+ if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) {
if (pci_enable_msi(pdev) < 0)
DRM_DEBUG_DRIVER("can't enable MSI");
}
@@ -1242,6 +1232,10 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver.name, driver.major, driver.minor, driver.patchlevel,
driver.date, pci_name(pdev), dev_priv->drm.primary->index);
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+ DRM_INFO("DRM_I915_DEBUG enabled\n");
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
intel_runtime_pm_put(dev_priv);
@@ -1721,6 +1715,22 @@ int i915_resume_switcheroo(struct drm_device *dev)
return i915_drm_resume(dev);
}
+static void disable_engines_irq(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* Ensure irq handler finishes, and not run again. */
+ disable_irq(dev_priv->drm.irq);
+ for_each_engine(engine, dev_priv, id)
+ tasklet_kill(&engine->irq_tasklet);
+}
+
+static void enable_engines_irq(struct drm_i915_private *dev_priv)
+{
+ enable_irq(dev_priv->drm.irq);
+}
+
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
@@ -1754,7 +1764,11 @@ void i915_reset(struct drm_i915_private *dev_priv)
error->reset_count++;
pr_notice("drm/i915: Resetting chip after gpu hang\n");
+
+ disable_engines_irq(dev_priv);
ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
+ enable_engines_irq(dev_priv);
+
if (ret) {
if (ret != -ENODEV)
DRM_ERROR("Failed to reset chip: %i\n", ret);
@@ -2282,7 +2296,7 @@ static int intel_runtime_suspend(struct device *kdev)
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
return -ENODEV;
- if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
+ if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
DRM_DEBUG_KMS("Suspending device\n");
@@ -2386,7 +2400,7 @@ static int intel_runtime_resume(struct device *kdev)
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
+ if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
DRM_DEBUG_KMS("Resuming device\n");
@@ -2404,7 +2418,7 @@ static int intel_runtime_resume(struct device *kdev)
if (IS_GEN6(dev_priv))
intel_init_pch_refclk(dev);
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
bxt_disable_dc9(dev_priv);
bxt_display_core_init(dev_priv, true);
if (dev_priv->csr.dmc_payload &&
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 685e9e065287..f022f438e5b9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -70,7 +70,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20160919"
+#define DRIVER_DATE "20161024"
+#define DRIVER_TIMESTAMP 1477290335
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -185,6 +186,7 @@ enum plane {
#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
enum port {
+ PORT_NONE = -1,
PORT_A = 0,
PORT_B,
PORT_C,
@@ -581,13 +583,25 @@ struct intel_uncore_funcs {
uint32_t val, bool trace);
};
+struct intel_forcewake_range {
+ u32 start;
+ u32 end;
+
+ enum forcewake_domains domains;
+};
+
struct intel_uncore {
spinlock_t lock; /** lock is also taken in irq contexts. */
+ const struct intel_forcewake_range *fw_domains_table;
+ unsigned int fw_domains_table_entries;
+
struct intel_uncore_funcs funcs;
unsigned fifo_count;
+
enum forcewake_domains fw_domains;
+ enum forcewake_domains fw_domains_active;
struct intel_uncore_forcewake_domain {
struct drm_i915_private *i915;
@@ -633,54 +647,53 @@ struct intel_csr {
uint32_t allowed_dc_mask;
};
-#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
- func(is_mobile) sep \
- func(is_i85x) sep \
- func(is_i915g) sep \
- func(is_i945gm) sep \
- func(is_g33) sep \
- func(hws_needs_physical) sep \
- func(is_g4x) sep \
- func(is_pineview) sep \
- func(is_broadwater) sep \
- func(is_crestline) sep \
- func(is_ivybridge) sep \
- func(is_valleyview) sep \
- func(is_cherryview) sep \
- func(is_haswell) sep \
- func(is_broadwell) sep \
- func(is_skylake) sep \
- func(is_broxton) sep \
- func(is_kabylake) sep \
- func(is_preliminary) sep \
- func(has_fbc) sep \
- func(has_psr) sep \
- func(has_runtime_pm) sep \
- func(has_csr) sep \
- func(has_resource_streamer) sep \
- func(has_rc6) sep \
- func(has_rc6p) sep \
- func(has_dp_mst) sep \
- func(has_gmbus_irq) sep \
- func(has_hw_contexts) sep \
- func(has_logical_ring_contexts) sep \
- func(has_l3_dpf) sep \
- func(has_gmch_display) sep \
- func(has_guc) sep \
- func(has_pipe_cxsr) sep \
- func(has_hotplug) sep \
- func(cursor_needs_physical) sep \
- func(has_overlay) sep \
- func(overlay_needs_physical) sep \
- func(supports_tv) sep \
- func(has_llc) sep \
- func(has_snoop) sep \
- func(has_ddi) sep \
- func(has_fpga_dbg) sep \
- func(has_pooled_eu)
-
-#define DEFINE_FLAG(name) u8 name:1
-#define SEP_SEMICOLON ;
+#define DEV_INFO_FOR_EACH_FLAG(func) \
+ /* Keep is_* in chronological order */ \
+ func(is_mobile); \
+ func(is_i85x); \
+ func(is_i915g); \
+ func(is_i945gm); \
+ func(is_g33); \
+ func(is_g4x); \
+ func(is_pineview); \
+ func(is_broadwater); \
+ func(is_crestline); \
+ func(is_ivybridge); \
+ func(is_valleyview); \
+ func(is_cherryview); \
+ func(is_haswell); \
+ func(is_broadwell); \
+ func(is_skylake); \
+ func(is_broxton); \
+ func(is_kabylake); \
+ func(is_preliminary); \
+ /* Keep has_* in alphabetical order */ \
+ func(has_csr); \
+ func(has_ddi); \
+ func(has_dp_mst); \
+ func(has_fbc); \
+ func(has_fpga_dbg); \
+ func(has_gmbus_irq); \
+ func(has_gmch_display); \
+ func(has_guc); \
+ func(has_hotplug); \
+ func(has_hw_contexts); \
+ func(has_l3_dpf); \
+ func(has_llc); \
+ func(has_logical_ring_contexts); \
+ func(has_overlay); \
+ func(has_pipe_cxsr); \
+ func(has_pooled_eu); \
+ func(has_psr); \
+ func(has_rc6); \
+ func(has_rc6p); \
+ func(has_resource_streamer); \
+ func(has_runtime_pm); \
+ func(has_snoop); \
+ func(cursor_needs_physical); \
+ func(hws_needs_physical); \
+ func(overlay_needs_physical); \
+ func(supports_tv)
struct sseu_dev_info {
u8 slice_mask;
@@ -709,7 +722,9 @@ struct intel_device_info {
u16 gen_mask;
u8 ring_mask; /* Rings supported by the HW */
u8 num_rings;
- DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
+#define DEFINE_FLAG(name) u8 name:1
+ DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
+#undef DEFINE_FLAG
u16 ddb_size; /* in blocks */
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
@@ -726,15 +741,14 @@ struct intel_device_info {
} color;
};
-#undef DEFINE_FLAG
-#undef SEP_SEMICOLON
-
struct intel_display_error_state;
struct drm_i915_error_state {
struct kref ref;
struct timeval time;
+ struct drm_i915_private *i915;
+
char error_msg[128];
bool simulated;
int iommu;
@@ -759,7 +773,7 @@ struct drm_i915_error_state {
u32 gam_ecochk;
u32 gab_ctl;
u32 gfx_mode;
- u32 extra_instdone[I915_NUM_INSTDONE_REG];
+
u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
@@ -775,6 +789,9 @@ struct drm_i915_error_state {
struct i915_address_space *vm;
int num_requests;
+ /* position of active request inside the ring */
+ u32 rq_head, rq_post, rq_tail;
+
/* our own tracking of ring head and tail */
u32 cpu_ring_head;
u32 cpu_ring_tail;
@@ -791,7 +808,6 @@ struct drm_i915_error_state {
u32 hws;
u32 ipeir;
u32 ipehr;
- u32 instdone;
u32 bbstate;
u32 instpm;
u32 instps;
@@ -802,11 +818,13 @@ struct drm_i915_error_state {
u64 faddr;
u32 rc_psmi; /* sleep state */
u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
+ struct intel_instdone instdone;
struct drm_i915_error_object {
- int page_count;
u64 gtt_offset;
u64 gtt_size;
+ int page_count;
+ int unused;
u32 *pages[0];
} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
@@ -815,10 +833,11 @@ struct drm_i915_error_state {
struct drm_i915_error_request {
long jiffies;
pid_t pid;
+ u32 context;
u32 seqno;
u32 head;
u32 tail;
- } *requests;
+ } *requests, execlist[2];
struct drm_i915_error_waiter {
char comm[TASK_COMM_LEN];
@@ -972,6 +991,9 @@ struct intel_fbc {
bool enabled;
bool active;
+ bool underrun_detected;
+ struct work_struct underrun_work;
+
struct intel_fbc_state_cache {
struct {
unsigned int mode_flags;
@@ -1368,7 +1390,7 @@ struct i915_gem_mm {
/* accounting, useful for userland debugging */
spinlock_t object_stat_lock;
- size_t object_memory;
+ u64 object_memory;
u32 object_count;
};
@@ -1620,7 +1642,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
}
struct skl_ddb_allocation {
- struct skl_ddb_entry pipe[I915_MAX_PIPES];
struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
};
@@ -1628,15 +1649,12 @@ struct skl_ddb_allocation {
struct skl_wm_values {
unsigned dirty_pipes;
struct skl_ddb_allocation ddb;
- uint32_t wm_linetime[I915_MAX_PIPES];
- uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
- uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
};
struct skl_wm_level {
- bool plane_en[I915_MAX_PLANES];
- uint16_t plane_res_b[I915_MAX_PLANES];
- uint8_t plane_res_l[I915_MAX_PLANES];
+ bool plane_en;
+ uint16_t plane_res_b;
+ uint8_t plane_res_l;
};
/*
@@ -1759,7 +1777,7 @@ struct drm_i915_private {
struct i915_virtual_gpu vgpu;
- struct intel_gvt gvt;
+ struct intel_gvt *gvt;
struct intel_guc guc;
@@ -1787,7 +1805,7 @@ struct drm_i915_private {
struct pci_dev *bridge_dev;
struct i915_gem_context *kernel_context;
- struct intel_engine_cs engine[I915_NUM_ENGINES];
+ struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct i915_vma *semaphore;
u32 next_seqno;
@@ -2079,7 +2097,8 @@ struct drm_i915_private {
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
- struct intel_encoder *dig_port_map[I915_MAX_PORTS];
+ /* Used to save the pipe-to-encoder mapping for audio */
+ struct intel_encoder *av_enc_map[I915_MAX_PIPES];
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
@@ -2103,19 +2122,11 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
}
/* Simple iterator over all initialised engines */
-#define for_each_engine(engine__, dev_priv__) \
- for ((engine__) = &(dev_priv__)->engine[0]; \
- (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
- (engine__)++) \
- for_each_if (intel_engine_initialized(engine__))
-
-/* Iterator with engine_id */
-#define for_each_engine_id(engine__, dev_priv__, id__) \
- for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \
- (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
- (engine__)++) \
- for_each_if (((id__) = (engine__)->id, \
- intel_engine_initialized(engine__)))
+#define for_each_engine(engine__, dev_priv__, id__) \
+ for ((id__) = 0; \
+ (id__) < I915_NUM_ENGINES; \
+ (id__)++) \
+ for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
#define __mask_next_bit(mask) ({ \
int __idx = ffs(mask) - 1; \
@@ -2126,7 +2137,7 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
/* Iterator over subset of engines selected by mask */
#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \
- tmp__ ? (engine__ = &(dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
+ tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
@@ -2586,8 +2597,9 @@ struct drm_i915_cmd_table {
__p; \
})
#define INTEL_INFO(p) (&__I915__(p)->info)
-#define INTEL_GEN(p) (INTEL_INFO(p)->gen)
-#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
+
+#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen)
+#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id)
#define REVID_FOREVER 0xff
#define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision)
@@ -2598,7 +2610,7 @@ struct drm_i915_cmd_table {
*
* Use GEN_FOREVER for unbound start and or end.
*/
-#define IS_GEN(p, s, e) ({ \
+#define IS_GEN(dev_priv, s, e) ({ \
unsigned int __s = (s), __e = (e); \
BUILD_BUG_ON(!__builtin_constant_p(s)); \
BUILD_BUG_ON(!__builtin_constant_p(e)); \
@@ -2608,7 +2620,7 @@ struct drm_i915_cmd_table {
__e = BITS_PER_LONG - 1; \
else \
__e = (e) - 1; \
- !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \
+ !!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \
})
/*
@@ -2619,73 +2631,73 @@ struct drm_i915_cmd_table {
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
-#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
-#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
+#define IS_I830(dev_priv) (INTEL_DEVID(dev_priv) == 0x3577)
+#define IS_845G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2562)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572)
+#define IS_I865G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2572)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592)
-#define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772)
+#define IS_I915GM(dev_priv) (INTEL_DEVID(dev_priv) == 0x2592)
+#define IS_I945G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2772)
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42)
-#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001)
-#define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011)
+#define IS_GM45(dev_priv) (INTEL_DEVID(dev_priv) == 0x2A42)
+#define IS_G4X(dev_priv) ((dev_priv)->info.is_g4x)
+#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
+#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046)
-#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
-#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \
- INTEL_DEVID(dev) == 0x0152 || \
- INTEL_DEVID(dev) == 0x015a)
-#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
-#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview)
-#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
-#define IS_BROADWELL(dev) (INTEL_INFO(dev)->is_broadwell)
-#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
-#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
-#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
+#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
+#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.is_ivybridge)
+#define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \
+ INTEL_DEVID(dev_priv) == 0x0152 || \
+ INTEL_DEVID(dev_priv) == 0x015a)
+#define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.is_valleyview)
+#define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.is_cherryview)
+#define IS_HASWELL(dev_priv) ((dev_priv)->info.is_haswell)
+#define IS_BROADWELL(dev_priv) ((dev_priv)->info.is_broadwell)
+#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.is_skylake)
+#define IS_BROXTON(dev_priv) ((dev_priv)->info.is_broxton)
+#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.is_kabylake)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
-#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
- (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
-#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
- ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
- (INTEL_DEVID(dev) & 0xf) == 0xb || \
- (INTEL_DEVID(dev) & 0xf) == 0xe))
+#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
+#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
+ ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \
+ (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \
+ (INTEL_DEVID(dev_priv) & 0xf) == 0xe))
/* ULX machines are also considered ULT. */
-#define IS_BDW_ULX(dev) (IS_BROADWELL(dev) && \
- (INTEL_DEVID(dev) & 0xf) == 0xe)
-#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
- (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
-#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
- (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
-#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
- (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
+#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
+#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
+#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
/* ULX machines are also considered ULT. */
-#define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \
- INTEL_DEVID(dev) == 0x0A1E)
-#define IS_SKL_ULT(dev) (INTEL_DEVID(dev) == 0x1906 || \
- INTEL_DEVID(dev) == 0x1913 || \
- INTEL_DEVID(dev) == 0x1916 || \
- INTEL_DEVID(dev) == 0x1921 || \
- INTEL_DEVID(dev) == 0x1926)
-#define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \
- INTEL_DEVID(dev) == 0x1915 || \
- INTEL_DEVID(dev) == 0x191E)
-#define IS_KBL_ULT(dev) (INTEL_DEVID(dev) == 0x5906 || \
- INTEL_DEVID(dev) == 0x5913 || \
- INTEL_DEVID(dev) == 0x5916 || \
- INTEL_DEVID(dev) == 0x5921 || \
- INTEL_DEVID(dev) == 0x5926)
-#define IS_KBL_ULX(dev) (INTEL_DEVID(dev) == 0x590E || \
- INTEL_DEVID(dev) == 0x5915 || \
- INTEL_DEVID(dev) == 0x591E)
-#define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \
- (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
-#define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \
- (INTEL_DEVID(dev) & 0x00F0) == 0x0030)
+#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
+ INTEL_DEVID(dev_priv) == 0x0A1E)
+#define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \
+ INTEL_DEVID(dev_priv) == 0x1913 || \
+ INTEL_DEVID(dev_priv) == 0x1916 || \
+ INTEL_DEVID(dev_priv) == 0x1921 || \
+ INTEL_DEVID(dev_priv) == 0x1926)
+#define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \
+ INTEL_DEVID(dev_priv) == 0x1915 || \
+ INTEL_DEVID(dev_priv) == 0x191E)
+#define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \
+ INTEL_DEVID(dev_priv) == 0x5913 || \
+ INTEL_DEVID(dev_priv) == 0x5916 || \
+ INTEL_DEVID(dev_priv) == 0x5921 || \
+ INTEL_DEVID(dev_priv) == 0x5926)
+#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
+ INTEL_DEVID(dev_priv) == 0x5915 || \
+ INTEL_DEVID(dev_priv) == 0x591E)
+#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
@@ -2705,7 +2717,8 @@ struct drm_i915_cmd_table {
#define BXT_REVID_B0 0x3
#define BXT_REVID_C0 0x9
-#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
+#define IS_BXT_REVID(dev_priv, since, until) \
+ (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
#define KBL_REVID_A0 0x0
#define KBL_REVID_B0 0x1
@@ -2713,8 +2726,8 @@ struct drm_i915_cmd_table {
#define KBL_REVID_D0 0x3
#define KBL_REVID_E0 0x4
-#define IS_KBL_REVID(p, since, until) \
- (IS_KABYLAKE(p) && IS_REVID(p, since, until))
+#define IS_KBL_REVID(dev_priv, since, until) \
+ (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
/*
* The genX designation typically refers to the render engine, so render
@@ -2722,14 +2735,14 @@ struct drm_i915_cmd_table {
* have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
* chips, etc.).
*/
-#define IS_GEN2(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(1)))
-#define IS_GEN3(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(2)))
-#define IS_GEN4(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(3)))
-#define IS_GEN5(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(4)))
-#define IS_GEN6(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(5)))
-#define IS_GEN7(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(6)))
-#define IS_GEN8(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(7)))
-#define IS_GEN9(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(8)))
+#define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1)))
+#define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2)))
+#define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3)))
+#define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4)))
+#define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5)))
+#define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6)))
+#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
+#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
#define ENGINE_MASK(id) BIT(id)
#define RENDER_RING ENGINE_MASK(RCS)
@@ -2750,8 +2763,8 @@ struct drm_i915_cmd_table {
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
#define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
-#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
- HAS_EDRAM(dev))
+#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
+ IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
#define HWS_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->hws_needs_physical)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->has_hw_contexts)
@@ -2764,7 +2777,7 @@ struct drm_i915_cmd_table {
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
-#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_845G(dev_priv))
/* WaRsDisableCoarsePowerGating:skl,bxt */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
@@ -2784,8 +2797,9 @@ struct drm_i915_cmd_table {
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
-#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
- IS_I915GM(dev)))
+#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
+ !(IS_I915G(dev_priv) || \
+ IS_I915GM(dev_priv)))
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
@@ -2793,19 +2807,19 @@ struct drm_i915_cmd_table {
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
+#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
#define HAS_DP_MST(dev) (INTEL_INFO(dev)->has_dp_mst)
-#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
+#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (INTEL_INFO(dev)->has_psr)
-#define HAS_RUNTIME_PM(dev) (INTEL_INFO(dev)->has_runtime_pm)
#define HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
#define HAS_RC6p(dev) (INTEL_INFO(dev)->has_rc6p)
#define HAS_CSR(dev) (INTEL_INFO(dev)->has_csr)
+#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
/*
* For now, anything with a GuC requires uCode loading, and then supports
* command submission once loaded. But these are logically independent
@@ -2832,22 +2846,27 @@ struct drm_i915_cmd_table {
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
-#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
-#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP)
-#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
-#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
-#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
-#define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
-#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
-#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
-#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
-#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
+#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
+#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
+#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
+#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
+#define HAS_PCH_LPT_LP(dev_priv) \
+ ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+#define HAS_PCH_LPT_H(dev_priv) \
+ ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
+#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
+#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
+#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
+#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
+
+#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
-#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->has_gmch_display)
+#define HAS_LSPCON(dev_priv) (IS_GEN9(dev_priv))
/* DPF == dynamic parity feature */
-#define HAS_L3_DPF(dev) (INTEL_INFO(dev)->has_l3_dpf)
-#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
+#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
+#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
+ 2 : HAS_L3_DPF(dev_priv))
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
@@ -2974,7 +2993,7 @@ int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
{
- return dev_priv->gvt.initialized;
+ return dev_priv->gvt;
}
static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
@@ -3087,7 +3106,7 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
- size_t size);
+ u64 size);
struct drm_i915_gem_object *i915_gem_object_create_from_data(
struct drm_device *dev, const void *data, size_t size);
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
@@ -3161,14 +3180,15 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
- BUG_ON(obj->pages == NULL);
+ GEM_BUG_ON(obj->pages == NULL);
obj->pages_pin_count++;
}
static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
- BUG_ON(obj->pages_pin_count == 0);
+ GEM_BUG_ON(obj->pages_pin_count == 0);
obj->pages_pin_count--;
+ GEM_BUG_ON(obj->pages_pin_count < obj->bind_count);
}
enum i915_map_type {
@@ -3526,6 +3546,8 @@ static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
#endif
/* i915_gpu_error.c */
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
@@ -3546,7 +3568,20 @@ void i915_error_state_get(struct drm_device *dev,
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
void i915_destroy_error_state(struct drm_device *dev);
-void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
+#else
+
+static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
+ const char *error_msg)
+{
+}
+
+static inline void i915_destroy_error_state(struct drm_device *dev)
+{
+}
+
+#endif
+
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */
@@ -3596,6 +3631,9 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
enum port port);
+bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
+ enum port port);
+
/* intel_opregion.c */
#ifdef CONFIG_ACPI
@@ -3812,11 +3850,11 @@ __raw_write(64, q)
#define INTEL_BROADCAST_RGB_FULL 1
#define INTEL_BROADCAST_RGB_LIMITED 2
-static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev)
+static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
{
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return VLV_VGACNTRL;
- else if (INTEL_INFO(dev)->gen >= 5)
+ else if (INTEL_GEN(dev_priv) >= 5)
return CPU_VGACNTRL;
else
return VGACNTRL;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 23960de81b57..bf5e9064e02d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -82,7 +82,7 @@ remove_mappable_node(struct drm_mm_node *node)
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
- size_t size)
+ u64 size)
{
spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count++;
@@ -91,7 +91,7 @@ static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
}
static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
- size_t size)
+ u64 size)
{
spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count--;
@@ -919,8 +919,7 @@ out_unpin:
if (node.allocated) {
wmb();
ggtt->base.clear_range(&ggtt->base,
- node.start, node.size,
- true);
+ node.start, node.size);
i915_gem_object_unpin_pages(obj);
remove_mappable_node(&node);
} else {
@@ -1228,8 +1227,7 @@ out_unpin:
if (node.allocated) {
wmb();
ggtt->base.clear_range(&ggtt->base,
- node.start, node.size,
- true);
+ node.start, node.size);
i915_gem_object_unpin_pages(obj);
remove_mappable_node(&node);
} else {
@@ -1813,8 +1811,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
view.params.partial.offset = rounddown(page_offset, chunk_size);
view.params.partial.size =
min_t(unsigned int, chunk_size,
- (area->vm_end - area->vm_start) / PAGE_SIZE -
- view.params.partial.offset);
+ vma_pages(area) - view.params.partial.offset);
/* If the partial covers the entire object, just create a
* normal VMA.
@@ -2208,6 +2205,15 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
return 0;
}
+static unsigned int swiotlb_max_size(void)
+{
+#if IS_ENABLED(CONFIG_SWIOTLB)
+ return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
+#else
+ return 0;
+#endif
+}
+
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
@@ -2219,6 +2225,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct sgt_iter sgt_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
+ unsigned int max_segment;
int ret;
gfp_t gfp;
@@ -2229,6 +2236,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+ max_segment = swiotlb_max_size();
+ if (!max_segment)
+ max_segment = rounddown(UINT_MAX, PAGE_SIZE);
+
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
return -ENOMEM;
@@ -2264,22 +2275,15 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* our own buffer, now let the real VM do its job and
* go down in flames if truly OOM.
*/
- i915_gem_shrink_all(dev_priv);
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto err_pages;
}
}
-#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
- st->nents++;
- sg_set_page(sg, page, PAGE_SIZE, 0);
- sg = sg_next(sg);
- continue;
- }
-#endif
- if (!i || page_to_pfn(page) != last_pfn + 1) {
+ if (!i ||
+ sg->length >= max_segment ||
+ page_to_pfn(page) != last_pfn + 1) {
if (i)
sg = sg_next(sg);
st->nents++;
@@ -2292,9 +2296,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
/* Check that the i965g/gm workaround works. */
WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
}
-#ifdef CONFIG_SWIOTLB
- if (!swiotlb_nr_tbl())
-#endif
+ if (sg) /* loop terminated early; short sg table */
sg_mark_end(sg);
obj->pages = st;
@@ -2581,8 +2583,6 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
struct i915_gem_context *incomplete_ctx;
bool ring_hung;
- /* Ensure irq handler finishes, and not run again. */
- tasklet_kill(&engine->irq_tasklet);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
@@ -2591,6 +2591,9 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
return;
ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
+ if (engine->hangcheck.seqno != intel_engine_get_seqno(engine))
+ ring_hung = false;
+
i915_set_reset_status(request->ctx, ring_hung);
if (!ring_hung)
return;
@@ -2621,10 +2624,11 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
void i915_gem_reset(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
i915_gem_retire_requests(dev_priv);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_gem_reset_engine(engine);
i915_gem_restore_fences(&dev_priv->drm);
@@ -2672,12 +2676,13 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
i915_gem_context_lost(dev_priv);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_gem_cleanup_engine(engine);
mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
@@ -2716,6 +2721,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
container_of(work, typeof(*dev_priv), gt.idle_work.work);
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
bool rearm_hangcheck;
if (!READ_ONCE(dev_priv->gt.awake))
@@ -2738,7 +2744,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
if (dev_priv->gt.active_engines)
goto out_unlock;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
i915_gem_batch_pool_fini(&engine->batch_pool);
GEM_BUG_ON(!dev_priv->gt.awake);
@@ -2931,9 +2937,10 @@ int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
if (engine->last_context == NULL)
continue;
@@ -3095,6 +3102,9 @@ search_free:
goto err_unpin;
}
+
+ GEM_BUG_ON(vma->node.start < start);
+ GEM_BUG_ON(vma->node.start + vma->node.size > end);
}
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
@@ -3173,7 +3183,7 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
*/
wmb();
if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
- POSTING_READ(RING_ACTHD(dev_priv->engine[RCS].mmio_base));
+ POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
@@ -3468,7 +3478,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
level = I915_CACHE_LLC;
break;
case I915_CACHING_DISPLAY:
- level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
+ level = HAS_WT(dev_priv) ? I915_CACHE_WT : I915_CACHE_NONE;
break;
default:
return -EINVAL;
@@ -3526,7 +3536,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* with that bit in the PTE to main memory with just one PIPE_CONTROL.
*/
ret = i915_gem_object_set_cache_level(obj,
- HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
+ HAS_WT(to_i915(obj->base.dev)) ?
+ I915_CACHE_WT : I915_CACHE_NONE);
if (ret) {
vma = ERR_PTR(ret);
goto err_unpin_display;
@@ -3795,7 +3806,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 alignment,
u64 flags)
{
- struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct i915_address_space *vm = &dev_priv->ggtt.base;
struct i915_vma *vma;
int ret;
@@ -3808,6 +3820,41 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
(i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
return ERR_PTR(-ENOSPC);
+ if (flags & PIN_MAPPABLE) {
+ u32 fence_size;
+
+ fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
+ i915_gem_object_get_tiling(obj));
+ /* If the required space is larger than the available
+ * aperture, we will not able to find a slot for the
+ * object and unbinding the object now will be in
+ * vain. Worse, doing so may cause us to ping-pong
+ * the object in and out of the Global GTT and
+ * waste a lot of cycles under the mutex.
+ */
+ if (fence_size > dev_priv->ggtt.mappable_end)
+ return ERR_PTR(-E2BIG);
+
+ /* If NONBLOCK is set the caller is optimistically
+ * trying to cache the full object within the mappable
+ * aperture, and *must* have a fallback in place for
+ * situations where we cannot bind the object. We
+ * can be a little more lax here and use the fallback
+ * more often to avoid costly migrations of ourselves
+ * and other objects within the aperture.
+ *
+ * Half-the-aperture is used as a simple heuristic.
+ * More interesting would to do search for a free
+ * block prior to making the commitment to unbind.
+ * That caters for the self-harm case, and with a
+ * little more heuristics (e.g. NOFAULT, NOEVICT)
+ * we could try to minimise harm to others.
+ */
+ if (flags & PIN_NONBLOCK &&
+ fence_size > dev_priv->ggtt.mappable_end / 2)
+ return ERR_PTR(-ENOSPC);
+ }
+
WARN(i915_vma_is_pinned(vma),
"bo is already pinned in ggtt with incorrect alignment:"
" offset=%08x, req.alignment=%llx,"
@@ -4086,14 +4133,29 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.put_pages = i915_gem_object_put_pages_gtt,
};
-struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
- size_t size)
+/* Note we don't consider signbits :| */
+#define overflows_type(x, T) \
+ (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
+
+struct drm_i915_gem_object *
+i915_gem_object_create(struct drm_device *dev, u64 size)
{
struct drm_i915_gem_object *obj;
struct address_space *mapping;
gfp_t mask;
int ret;
+ /* There is a prevalence of the assumption that we fit the object's
+ * page count inside a 32bit _signed_ variable. Let's document this and
+ * catch if we ever need to fix it. In the meantime, if you do spot
+ * such a local variable, please consider fixing!
+ */
+ if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
+ return ERR_PTR(-E2BIG);
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
obj = i915_gem_object_alloc(dev);
if (obj == NULL)
return ERR_PTR(-ENOMEM);
@@ -4270,6 +4332,30 @@ int i915_gem_suspend(struct drm_device *dev)
*/
WARN_ON(dev_priv->gt.awake);
+ /*
+ * Neither the BIOS, ourselves or any other kernel
+ * expects the system to be in execlists mode on startup,
+ * so we need to reset the GPU back to legacy mode. And the only
+ * known way to disable logical contexts is through a GPU reset.
+ *
+ * So in order to leave the system in a known default configuration,
+ * always reset the GPU upon unload and suspend. Afterwards we then
+ * clean up the GEM state tracking, flushing off the requests and
+ * leaving the system in a known idle state.
+ *
+ * Note that is of the upmost importance that the GPU is idle and
+ * all stray writes are flushed *before* we dismantle the backing
+ * storage for the pinned objects.
+ *
+ * However, since we are uncertain that resetting the GPU on older
+ * machines is a good idea, we don't - just in case it leaves the
+ * machine in an unusable condition.
+ */
+ if (HAS_HW_CONTEXTS(dev)) {
+ int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
+ WARN_ON(reset && reset != -ENODEV);
+ }
+
return 0;
err:
@@ -4304,44 +4390,42 @@ void i915_gem_init_swizzling(struct drm_device *dev)
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_TILE_SURFACE_SWIZZLING);
- if (IS_GEN5(dev))
+ if (IS_GEN5(dev_priv))
return;
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
- else if (IS_GEN7(dev))
+ else if (IS_GEN7(dev_priv))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
- else if (IS_GEN8(dev))
+ else if (IS_GEN8(dev_priv))
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
else
BUG();
}
-static void init_unused_ring(struct drm_device *dev, u32 base)
+static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(RING_CTL(base), 0);
I915_WRITE(RING_HEAD(base), 0);
I915_WRITE(RING_TAIL(base), 0);
I915_WRITE(RING_START(base), 0);
}
-static void init_unused_rings(struct drm_device *dev)
-{
- if (IS_I830(dev)) {
- init_unused_ring(dev, PRB1_BASE);
- init_unused_ring(dev, SRB0_BASE);
- init_unused_ring(dev, SRB1_BASE);
- init_unused_ring(dev, SRB2_BASE);
- init_unused_ring(dev, SRB3_BASE);
- } else if (IS_GEN2(dev)) {
- init_unused_ring(dev, SRB0_BASE);
- init_unused_ring(dev, SRB1_BASE);
- } else if (IS_GEN3(dev)) {
- init_unused_ring(dev, PRB1_BASE);
- init_unused_ring(dev, PRB2_BASE);
+static void init_unused_rings(struct drm_i915_private *dev_priv)
+{
+ if (IS_I830(dev_priv)) {
+ init_unused_ring(dev_priv, PRB1_BASE);
+ init_unused_ring(dev_priv, SRB0_BASE);
+ init_unused_ring(dev_priv, SRB1_BASE);
+ init_unused_ring(dev_priv, SRB2_BASE);
+ init_unused_ring(dev_priv, SRB3_BASE);
+ } else if (IS_GEN2(dev_priv)) {
+ init_unused_ring(dev_priv, SRB0_BASE);
+ init_unused_ring(dev_priv, SRB1_BASE);
+ } else if (IS_GEN3(dev_priv)) {
+ init_unused_ring(dev_priv, PRB1_BASE);
+ init_unused_ring(dev_priv, PRB2_BASE);
}
}
@@ -4350,6 +4434,7 @@ i915_gem_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret;
/* Double layer security blanket, see i915_gem_init() */
@@ -4358,12 +4443,12 @@ i915_gem_init_hw(struct drm_device *dev)
if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
- if (IS_HASWELL(dev))
- I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
+ if (IS_HASWELL(dev_priv))
+ I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
- if (HAS_PCH_NOP(dev)) {
- if (IS_IVYBRIDGE(dev)) {
+ if (HAS_PCH_NOP(dev_priv)) {
+ if (IS_IVYBRIDGE(dev_priv)) {
u32 temp = I915_READ(GEN7_MSG_CTL);
temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
I915_WRITE(GEN7_MSG_CTL, temp);
@@ -4382,7 +4467,7 @@ i915_gem_init_hw(struct drm_device *dev)
* will prevent c3 entry. Makes sure all unused rings
* are totally idle.
*/
- init_unused_rings(dev);
+ init_unused_rings(dev_priv);
BUG_ON(!dev_priv->kernel_context);
@@ -4393,7 +4478,7 @@ i915_gem_init_hw(struct drm_device *dev)
}
/* Need to do basic initialisation of all rings first: */
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
ret = engine->init_hw(engine);
if (ret)
goto out;
@@ -4492,17 +4577,12 @@ i915_gem_cleanup_engines(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
dev_priv->gt.cleanup_engine(engine);
}
-static void
-init_engine_lists(struct intel_engine_cs *engine)
-{
- INIT_LIST_HEAD(&engine->request_list);
-}
-
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
@@ -4539,7 +4619,6 @@ void
i915_gem_load_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int i;
dev_priv->objects =
kmem_cache_create("i915_gem_object",
@@ -4563,8 +4642,6 @@ i915_gem_load_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- for (i = 0; i < I915_NUM_ENGINES; i++)
- init_engine_lists(&dev_priv->engine[i]);
INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
i915_gem_retire_work_handler);
INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index df10f4e95736..5dca32ac1c67 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -192,7 +192,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
* This is only applicable for Ivy Bridge devices since
* later platforms don't have L3 control bits in the PTE.
*/
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_IVYBRIDGE(to_i915(dev))) {
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret)) {
@@ -474,10 +474,11 @@ int i915_gem_context_init(struct drm_device *dev)
void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
if (engine->last_context) {
i915_gem_context_unpin(engine->last_context, engine);
engine->last_context = NULL;
@@ -492,13 +493,13 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
if (!i915_gem_context_is_default(ctx))
continue;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
ctx->engine[engine->id].initialised = false;
ctx->remap_slice = ALL_L3_SLICES(dev_priv);
}
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
struct intel_context *kce =
&dev_priv->kernel_context->engine[engine->id];
@@ -563,6 +564,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
struct drm_i915_private *dev_priv = req->i915;
struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
+ enum intel_engine_id id;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
@@ -605,7 +607,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(num_rings));
- for_each_engine(signaller, dev_priv) {
+ for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
@@ -634,7 +636,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(num_rings));
- for_each_engine(signaller, dev_priv) {
+ for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
@@ -929,8 +931,9 @@ int i915_switch_context(struct drm_i915_gem_request *req)
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *req;
int ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 5b6f81c1dbca..b5e9e669f50f 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -37,8 +37,9 @@ static bool
gpu_is_idle(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
if (intel_engine_is_active(engine))
return false;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 7adb4c77cc7f..e52affdcc125 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -370,8 +370,7 @@ static void reloc_cache_fini(struct reloc_cache *cache)
ggtt->base.clear_range(&ggtt->base,
cache->node.start,
- cache->node.size,
- true);
+ cache->node.size);
drm_mm_remove_node(&cache->node);
} else {
i915_vma_unpin((struct i915_vma *)cache->node.mm);
@@ -429,7 +428,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
}
if (cache->vaddr) {
- io_mapping_unmap_atomic(unmask_page(cache->vaddr));
+ io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
} else {
struct i915_vma *vma;
int ret;
@@ -474,7 +473,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset += page << PAGE_SHIFT;
}
- vaddr = io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
+ vaddr = (void __force *) io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
cache->page = page;
cache->vaddr = (unsigned long)vaddr;
@@ -552,27 +551,13 @@ repeat:
return 0;
}
-static bool object_is_idle(struct drm_i915_gem_object *obj)
-{
- unsigned long active = i915_gem_object_get_active(obj);
- int idx;
-
- for_each_active(active, idx) {
- if (!i915_gem_active_is_idle(&obj->last_read[idx],
- &obj->base.dev->struct_mutex))
- return false;
- }
-
- return true;
-}
-
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_vmas *eb,
struct drm_i915_gem_relocation_entry *reloc,
struct reloc_cache *cache)
{
- struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_i915_obj;
struct i915_vma *target_vma;
@@ -591,7 +576,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
* through the ppgtt for non_secure batchbuffers. */
- if (unlikely(IS_GEN6(dev) &&
+ if (unlikely(IS_GEN6(dev_priv) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
PIN_GLOBAL);
@@ -649,10 +634,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -EINVAL;
}
- /* We can't wait for rendering with pagefaults disabled */
- if (pagefault_disabled() && !object_is_idle(obj))
- return -EFAULT;
-
ret = relocate_entry(obj, reloc, cache, target_offset);
if (ret)
return ret;
@@ -679,12 +660,23 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
remain = entry->relocation_count;
while (remain) {
struct drm_i915_gem_relocation_entry *r = stack_reloc;
- int count = remain;
- if (count > ARRAY_SIZE(stack_reloc))
- count = ARRAY_SIZE(stack_reloc);
+ unsigned long unwritten;
+ unsigned int count;
+
+ count = min_t(unsigned int, remain, ARRAY_SIZE(stack_reloc));
remain -= count;
- if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]))) {
+ /* This is the fast path and we cannot handle a pagefault
+ * whilst holding the struct mutex lest the user pass in the
+ * relocations contained within a mmaped bo. For in such a case
+ * we, the page fault handler would call i915_gem_fault() and
+ * we would try to acquire the struct mutex again. Obviously
+ * this is bad and so lockdep complains vehemently.
+ */
+ pagefault_disable();
+ unwritten = __copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]));
+ pagefault_enable();
+ if (unlikely(unwritten)) {
ret = -EFAULT;
goto out;
}
@@ -696,11 +688,26 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
if (ret)
goto out;
- if (r->presumed_offset != offset &&
- __put_user(r->presumed_offset,
- &user_relocs->presumed_offset)) {
- ret = -EFAULT;
- goto out;
+ if (r->presumed_offset != offset) {
+ pagefault_disable();
+ unwritten = __put_user(r->presumed_offset,
+ &user_relocs->presumed_offset);
+ pagefault_enable();
+ if (unlikely(unwritten)) {
+ /* Note that reporting an error now
+ * leaves everything in an inconsistent
+ * state as we have *already* changed
+ * the relocation value inside the
+ * object. As we have not changed the
+ * reloc.presumed_offset or will not
+ * change the execobject.offset, on the
+ * call we may not rewrite the value
+ * inside the object, leaving it
+ * dangling and causing a GPU hang.
+ */
+ ret = -EFAULT;
+ goto out;
+ }
}
user_relocs++;
@@ -740,20 +747,11 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
struct i915_vma *vma;
int ret = 0;
- /* This is the fast path and we cannot handle a pagefault whilst
- * holding the struct mutex lest the user pass in the relocations
- * contained within a mmaped bo. For in such a case we, the page
- * fault handler would call i915_gem_fault() and we would try to
- * acquire the struct mutex again. Obviously this is bad and so
- * lockdep complains vehemently.
- */
- pagefault_disable();
list_for_each_entry(vma, &eb->vmas, exec_list) {
ret = i915_gem_execbuffer_relocate_vma(vma, eb);
if (ret)
break;
}
- pagefault_enable();
return ret;
}
@@ -1599,12 +1597,12 @@ eb_select_engine(struct drm_i915_private *dev_priv,
return NULL;
}
- engine = &dev_priv->engine[_VCS(bsd_idx)];
+ engine = dev_priv->engine[_VCS(bsd_idx)];
} else {
- engine = &dev_priv->engine[user_ring_map[user_ring_id]];
+ engine = dev_priv->engine[user_ring_map[user_ring_id]];
}
- if (!intel_engine_initialized(engine)) {
+ if (!engine) {
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
return NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 2c7ba0ee127c..a6daf2deab74 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -465,7 +465,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
+ if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) {
/*
* On BDW+, swizzling is not used. We leave the CPU memory
* controller in charge of optimizing memory accesses without
@@ -504,19 +504,20 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
}
- } else if (IS_GEN5(dev)) {
+ } else if (IS_GEN5(dev_priv)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (IS_GEN2(dev)) {
+ } else if (IS_GEN2(dev_priv)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+ } else if (IS_MOBILE(dev_priv) || (IS_GEN3(dev_priv) &&
+ !IS_G33(dev_priv))) {
uint32_t dcc;
/* On 9xx chipsets, channel interleave by the CPU is
@@ -554,7 +555,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
}
/* check for L-shaped memory aka modified enhanced addressing */
- if (IS_GEN4(dev) &&
+ if (IS_GEN4(dev_priv) &&
!(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 0bb4232f66bc..062fb0ad75da 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -191,15 +191,13 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
{
vma->vm->clear_range(vma->vm,
vma->node.start,
- vma->size,
- true);
+ vma->size);
}
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid)
+ enum i915_cache_level level)
{
- gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
+ gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
pte |= addr;
switch (level) {
@@ -234,9 +232,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
static gen6_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 unused)
+ u32 unused)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -256,9 +254,9 @@ static gen6_pte_t snb_pte_encode(dma_addr_t addr,
static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 unused)
+ u32 unused)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -280,9 +278,9 @@ static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
static gen6_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 flags)
+ u32 flags)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
if (!(flags & PTE_READ_ONLY))
@@ -296,9 +294,9 @@ static gen6_pte_t byt_pte_encode(dma_addr_t addr,
static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 unused)
+ u32 unused)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
if (level != I915_CACHE_NONE)
@@ -309,9 +307,9 @@ static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
static gen6_pte_t iris_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 unused)
+ u32 unused)
{
- gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+ gen6_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -373,27 +371,29 @@ static void *kmap_page_dma(struct i915_page_dma *p)
/* We use the flushing unmap only with ppgtt structures:
* page directories, page tables and scratch pages.
*/
-static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
+static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
{
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
- if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+ if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
drm_clflush_virt_range(vaddr, PAGE_SIZE);
kunmap_atomic(vaddr);
}
#define kmap_px(px) kmap_page_dma(px_base(px))
-#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
+#define kunmap_px(ppgtt, vaddr) \
+ kunmap_page_dma(to_i915((ppgtt)->base.dev), (vaddr))
#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
-#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
-#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
+#define fill_px(dev_priv, px, v) fill_page_dma((dev_priv), px_base(px), (v))
+#define fill32_px(dev_priv, px, v) \
+ fill_page_dma_32((dev_priv), px_base(px), (v))
-static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
- const uint64_t val)
+static void fill_page_dma(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p, const uint64_t val)
{
int i;
uint64_t * const vaddr = kmap_page_dma(p);
@@ -401,17 +401,17 @@ static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
for (i = 0; i < 512; i++)
vaddr[i] = val;
- kunmap_page_dma(dev, vaddr);
+ kunmap_page_dma(dev_priv, vaddr);
}
-static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
- const uint32_t val32)
+static void fill_page_dma_32(struct drm_i915_private *dev_priv,
+ struct i915_page_dma *p, const uint32_t val32)
{
uint64_t v = val32;
v = v << 32 | val32;
- fill_page_dma(dev, p, v);
+ fill_page_dma(dev_priv, p, v);
}
static int
@@ -472,9 +472,9 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
gen8_pte_t scratch_pte;
scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true);
+ I915_CACHE_LLC);
- fill_px(vm->dev, pt, scratch_pte);
+ fill_px(to_i915(vm->dev), pt, scratch_pte);
}
static void gen6_initialize_pt(struct i915_address_space *vm,
@@ -485,9 +485,9 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
WARN_ON(vm->scratch_page.daddr == 0);
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true, 0);
+ I915_CACHE_LLC, 0);
- fill32_px(vm->dev, pt, scratch_pte);
+ fill32_px(to_i915(vm->dev), pt, scratch_pte);
}
static struct i915_page_directory *alloc_pd(struct drm_device *dev)
@@ -534,7 +534,7 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
- fill_px(vm->dev, pd, scratch_pde);
+ fill_px(to_i915(vm->dev), pd, scratch_pde);
}
static int __pdp_init(struct drm_device *dev,
@@ -615,7 +615,7 @@ static void gen8_initialize_pdp(struct i915_address_space *vm,
scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
- fill_px(vm->dev, pdp, scratch_pdpe);
+ fill_px(to_i915(vm->dev), pdp, scratch_pdpe);
}
static void gen8_initialize_pml4(struct i915_address_space *vm,
@@ -626,7 +626,7 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
I915_CACHE_LLC);
- fill_px(vm->dev, pml4, scratch_pml4e);
+ fill_px(to_i915(vm->dev), pml4, scratch_pml4e);
}
static void
@@ -706,85 +706,157 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
}
-static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp,
- uint64_t start,
- uint64_t length,
- gen8_pte_t scratch_pte)
+/* Removes entries from a single page table, releasing it if it's empty.
+ * Caller can use the return value to update higher-level entries.
+ */
+static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
+ struct i915_page_table *pt,
+ uint64_t start,
+ uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ unsigned int pte_start = gen8_pte_index(start);
+ unsigned int num_entries = gen8_pte_count(start, length);
+ uint64_t pte;
gen8_pte_t *pt_vaddr;
- unsigned pdpe = gen8_pdpe_index(start);
- unsigned pde = gen8_pde_index(start);
- unsigned pte = gen8_pte_index(start);
- unsigned num_entries = length >> PAGE_SHIFT;
- unsigned last_pte, i;
+ gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
+ I915_CACHE_LLC);
- if (WARN_ON(!pdp))
- return;
+ if (WARN_ON(!px_page(pt)))
+ return false;
- while (num_entries) {
- struct i915_page_directory *pd;
- struct i915_page_table *pt;
+ bitmap_clear(pt->used_ptes, pte_start, num_entries);
- if (WARN_ON(!pdp->page_directory[pdpe]))
- break;
+ if (bitmap_empty(pt->used_ptes, GEN8_PTES)) {
+ free_pt(vm->dev, pt);
+ return true;
+ }
- pd = pdp->page_directory[pdpe];
+ pt_vaddr = kmap_px(pt);
- if (WARN_ON(!pd->page_table[pde]))
- break;
+ for (pte = pte_start; pte < num_entries; pte++)
+ pt_vaddr[pte] = scratch_pte;
- pt = pd->page_table[pde];
+ kunmap_px(ppgtt, pt_vaddr);
- if (WARN_ON(!px_page(pt)))
- break;
+ return false;
+}
- last_pte = pte + num_entries;
- if (last_pte > GEN8_PTES)
- last_pte = GEN8_PTES;
+/* Removes entries from a single page dir, releasing it if it's empty.
+ * Caller can use the return value to update higher-level entries
+ */
+static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
+ struct i915_page_directory *pd,
+ uint64_t start,
+ uint64_t length)
+{
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_page_table *pt;
+ uint64_t pde;
+ gen8_pde_t *pde_vaddr;
+ gen8_pde_t scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt),
+ I915_CACHE_LLC);
- pt_vaddr = kmap_px(pt);
+ gen8_for_each_pde(pt, pd, start, length, pde) {
+ if (WARN_ON(!pd->page_table[pde]))
+ break;
- for (i = pte; i < last_pte; i++) {
- pt_vaddr[i] = scratch_pte;
- num_entries--;
+ if (gen8_ppgtt_clear_pt(vm, pt, start, length)) {
+ __clear_bit(pde, pd->used_pdes);
+ pde_vaddr = kmap_px(pd);
+ pde_vaddr[pde] = scratch_pde;
+ kunmap_px(ppgtt, pde_vaddr);
}
+ }
- kunmap_px(ppgtt, pt_vaddr);
+ if (bitmap_empty(pd->used_pdes, I915_PDES)) {
+ free_pd(vm->dev, pd);
+ return true;
+ }
+
+ return false;
+}
- pte = 0;
- if (++pde == I915_PDES) {
- if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
- break;
- pde = 0;
+/* Removes entries from a single page dir pointer, releasing it if it's empty.
+ * Caller can use the return value to update higher-level entries
+ */
+static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
+ struct i915_page_directory_pointer *pdp,
+ uint64_t start,
+ uint64_t length)
+{
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_page_directory *pd;
+ uint64_t pdpe;
+ gen8_ppgtt_pdpe_t *pdpe_vaddr;
+ gen8_ppgtt_pdpe_t scratch_pdpe =
+ gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
+
+ gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
+ if (WARN_ON(!pdp->page_directory[pdpe]))
+ break;
+
+ if (gen8_ppgtt_clear_pd(vm, pd, start, length)) {
+ __clear_bit(pdpe, pdp->used_pdpes);
+ if (USES_FULL_48BIT_PPGTT(vm->dev)) {
+ pdpe_vaddr = kmap_px(pdp);
+ pdpe_vaddr[pdpe] = scratch_pdpe;
+ kunmap_px(ppgtt, pdpe_vaddr);
+ }
}
}
+
+ if (USES_FULL_48BIT_PPGTT(vm->dev) &&
+ bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(vm->dev))) {
+ free_pdp(vm->dev, pdp);
+ return true;
+ }
+
+ return false;
}
-static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length,
- bool use_scratch)
+/* Removes entries from a single pml4.
+ * This is the top-level structure in 4-level page tables used on gen8+.
+ * Empty entries are always scratch pml4e.
+ */
+static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
+ struct i915_pml4 *pml4,
+ uint64_t start,
+ uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, use_scratch);
+ struct i915_page_directory_pointer *pdp;
+ uint64_t pml4e;
+ gen8_ppgtt_pml4e_t *pml4e_vaddr;
+ gen8_ppgtt_pml4e_t scratch_pml4e =
+ gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC);
- if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
- gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
- scratch_pte);
- } else {
- uint64_t pml4e;
- struct i915_page_directory_pointer *pdp;
+ GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm->dev));
- gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
- gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
- scratch_pte);
+ gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
+ if (WARN_ON(!pml4->pdps[pml4e]))
+ break;
+
+ if (gen8_ppgtt_clear_pdp(vm, pdp, start, length)) {
+ __clear_bit(pml4e, pml4->used_pml4es);
+ pml4e_vaddr = kmap_px(pml4);
+ pml4e_vaddr[pml4e] = scratch_pml4e;
+ kunmap_px(ppgtt, pml4e_vaddr);
}
}
}
+static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
+ uint64_t start, uint64_t length)
+{
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+ if (USES_FULL_48BIT_PPGTT(vm->dev))
+ gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length);
+ else
+ gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length);
+}
+
static void
gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp,
@@ -809,7 +881,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
- cache_level, true);
+ cache_level);
if (++pte == GEN8_PTES) {
kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
@@ -1452,7 +1524,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint64_t start = ppgtt->base.start;
uint64_t length = ppgtt->base.total;
gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true);
+ I915_CACHE_LLC);
if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
@@ -1569,7 +1641,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true, 0);
+ I915_CACHE_LLC, 0);
gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
u32 expected;
@@ -1728,8 +1800,9 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
@@ -1741,12 +1814,13 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
uint32_t ecochk, ecobits;
+ enum intel_engine_id id;
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
ecochk = I915_READ(GAM_ECOCHK);
- if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev_priv)) {
ecochk |= ECOCHK_PPGTT_WB_HSW;
} else {
ecochk |= ECOCHK_PPGTT_LLC_IVB;
@@ -1754,7 +1828,7 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
}
I915_WRITE(GAM_ECOCHK, ecochk);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
/* GFX_MODE is per-ring on gen7+ */
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
@@ -1782,8 +1856,7 @@ static void gen6_ppgtt_enable(struct drm_device *dev)
/* PPGTT support for Sandybdrige/Gen6 and later */
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
uint64_t start,
- uint64_t length,
- bool use_scratch)
+ uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen6_pte_t *pt_vaddr, scratch_pte;
@@ -1794,7 +1867,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned last_pte, i;
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true, 0);
+ I915_CACHE_LLC, 0);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -1832,7 +1905,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
pt_vaddr[act_pte] =
- vm->pte_encode(addr, cache_level, true, flags);
+ vm->pte_encode(addr, cache_level, flags);
if (++act_pte == GEN6_PTES) {
kunmap_px(ppgtt, pt_vaddr);
@@ -2056,11 +2129,11 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
int ret;
ppgtt->base.pte_encode = ggtt->base.pte_encode;
- if (intel_vgpu_active(dev_priv) || IS_GEN6(dev))
+ if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
ppgtt->switch_mm = gen6_mm_switch;
- else if (IS_HASWELL(dev))
+ else if (IS_HASWELL(dev_priv))
ppgtt->switch_mm = hsw_mm_switch;
- else if (IS_GEN7(dev))
+ else if (IS_GEN7(dev_priv))
ppgtt->switch_mm = gen7_mm_switch;
else
BUG();
@@ -2129,13 +2202,13 @@ static void gtt_write_workarounds(struct drm_device *dev)
* workarounds here even if they get overwritten by GPU reset.
*/
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
- if (IS_BROADWELL(dev))
+ if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
- else if (IS_CHERRYVIEW(dev))
+ else if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
- else if (IS_SKYLAKE(dev))
+ else if (IS_SKYLAKE(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
}
@@ -2157,6 +2230,8 @@ static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
int i915_ppgtt_init_hw(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
gtt_write_workarounds(dev);
/* In the case of execlists, PPGTT is enabled by the context descriptor
@@ -2168,9 +2243,9 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
if (!USES_PPGTT(dev))
return 0;
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
gen6_ppgtt_enable(dev);
- else if (IS_GEN7(dev))
+ else if (IS_GEN7(dev_priv))
gen7_ppgtt_enable(dev);
else if (INTEL_INFO(dev)->gen >= 8)
gen8_ppgtt_enable(dev);
@@ -2239,11 +2314,12 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
if (INTEL_INFO(dev_priv)->gen < 6)
return;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
u32 fault_reg;
fault_reg = I915_READ(RING_FAULT_REG(engine));
if (fault_reg & RING_FAULT_VALID) {
@@ -2260,7 +2336,10 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
fault_reg & ~RING_FAULT_VALID);
}
}
- POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS]));
+
+ /* Engine specific init may not have been done till this point. */
+ if (dev_priv->engine[RCS])
+ POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
}
static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
@@ -2286,8 +2365,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
i915_check_and_clear_faults(dev_priv);
- ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
- true);
+ ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
i915_ggtt_flush(dev_priv);
}
@@ -2321,7 +2399,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
- gen8_set_pte(pte, gen8_pte_encode(addr, level, true));
+ gen8_set_pte(pte, gen8_pte_encode(addr, level));
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
@@ -2348,7 +2426,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
for_each_sgt_dma(addr, sgt_iter, st) {
- gtt_entry = gen8_pte_encode(addr, level, true);
+ gtt_entry = gen8_pte_encode(addr, level);
gen8_set_pte(&gtt_entries[i++], gtt_entry);
}
@@ -2412,7 +2490,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
- iowrite32(vm->pte_encode(addr, level, true, flags), pte);
+ iowrite32(vm->pte_encode(addr, level, flags), pte);
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
@@ -2445,7 +2523,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
for_each_sgt_dma(addr, sgt_iter, st) {
- gtt_entry = vm->pte_encode(addr, level, true, flags);
+ gtt_entry = vm->pte_encode(addr, level, flags);
iowrite32(gtt_entry, &gtt_entries[i++]);
}
@@ -2469,16 +2547,12 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
}
static void nop_clear_range(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length,
- bool use_scratch)
+ uint64_t start, uint64_t length)
{
}
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length,
- bool use_scratch)
+ uint64_t start, uint64_t length)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
@@ -2498,8 +2572,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
num_entries = max_entries;
scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC,
- use_scratch);
+ I915_CACHE_LLC);
for (i = 0; i < num_entries; i++)
gen8_set_pte(&gtt_base[i], scratch_pte);
readl(gtt_base);
@@ -2509,8 +2582,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
- uint64_t length,
- bool use_scratch)
+ uint64_t length)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
@@ -2530,7 +2602,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
num_entries = max_entries;
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, use_scratch, 0);
+ I915_CACHE_LLC, 0);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
@@ -2577,8 +2649,7 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
static void i915_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
- uint64_t length,
- bool unused)
+ uint64_t length)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned first_entry = start >> PAGE_SHIFT;
@@ -2662,13 +2733,11 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
if (vma->flags & I915_VMA_GLOBAL_BIND)
vma->vm->clear_range(vma->vm,
- vma->node.start, size,
- true);
+ vma->node.start, size);
if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
appgtt->base.clear_range(&appgtt->base,
- vma->node.start, size,
- true);
+ vma->node.start, size);
}
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
@@ -2717,6 +2786,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
*/
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long hole_start, hole_end;
+ struct i915_hw_ppgtt *ppgtt;
struct drm_mm_node *entry;
int ret;
@@ -2724,45 +2794,48 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
if (ret)
return ret;
+ /* Reserve a mappable slot for our lockless error capture */
+ ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
+ &ggtt->error_capture,
+ 4096, 0, -1,
+ 0, ggtt->mappable_end,
+ 0, 0);
+ if (ret)
+ return ret;
+
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
ggtt->base.clear_range(&ggtt->base, hole_start,
- hole_end - hole_start, true);
+ hole_end - hole_start);
}
/* And finally clear the reserved guard page */
ggtt->base.clear_range(&ggtt->base,
- ggtt->base.total - PAGE_SIZE, PAGE_SIZE,
- true);
+ ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
- struct i915_hw_ppgtt *ppgtt;
-
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return -ENOMEM;
+ if (!ppgtt) {
+ ret = -ENOMEM;
+ goto err;
+ }
ret = __hw_ppgtt_init(ppgtt, dev_priv);
- if (ret) {
- kfree(ppgtt);
- return ret;
- }
+ if (ret)
+ goto err_ppgtt;
- if (ppgtt->base.allocate_va_range)
+ if (ppgtt->base.allocate_va_range) {
ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
ppgtt->base.total);
- if (ret) {
- ppgtt->base.cleanup(&ppgtt->base);
- kfree(ppgtt);
- return ret;
+ if (ret)
+ goto err_ppgtt_cleanup;
}
ppgtt->base.clear_range(&ppgtt->base,
ppgtt->base.start,
- ppgtt->base.total,
- true);
+ ppgtt->base.total);
dev_priv->mm.aliasing_ppgtt = ppgtt;
WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
@@ -2770,6 +2843,14 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
}
return 0;
+
+err_ppgtt_cleanup:
+ ppgtt->base.cleanup(&ppgtt->base);
+err_ppgtt:
+ kfree(ppgtt);
+err:
+ drm_mm_remove_node(&ggtt->error_capture);
+ return ret;
}
/**
@@ -2788,6 +2869,9 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
i915_gem_cleanup_stolen(&dev_priv->drm);
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_mm_remove_node(&ggtt->error_capture);
+
if (drm_mm_initialized(&ggtt->base.mm)) {
intel_vgt_deballoon(dev_priv);
@@ -2895,7 +2979,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
- if (IS_BROXTON(ggtt->base.dev))
+ if (IS_BROXTON(to_i915(ggtt->base.dev)))
ggtt->gsm = ioremap_nocache(phys_addr, size);
else
ggtt->gsm = ioremap_wc(phys_addr, size);
@@ -3237,8 +3321,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_check_and_clear_faults(dev_priv);
/* First fill our portion of the GTT with scratch pages */
- ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
- true);
+ ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
@@ -3267,7 +3350,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
ggtt->base.closed = false;
if (INTEL_INFO(dev)->gen >= 8) {
- if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+ if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index ec78be2f8c77..c241d8143255 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -395,7 +395,7 @@ struct i915_address_space {
/* FIXME: Need a more generic return type */
gen6_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
- bool valid, u32 flags); /* Create a valid PTE */
+ u32 flags); /* Create a valid PTE */
/* flags for pte_encode */
#define PTE_READ_ONLY (1<<0)
int (*allocate_va_range)(struct i915_address_space *vm,
@@ -403,8 +403,7 @@ struct i915_address_space {
uint64_t length);
void (*clear_range)(struct i915_address_space *vm,
uint64_t start,
- uint64_t length,
- bool use_scratch);
+ uint64_t length);
void (*insert_page)(struct i915_address_space *vm,
dma_addr_t addr,
uint64_t offset,
@@ -450,6 +449,8 @@ struct i915_ggtt {
bool do_idle_maps;
int mtrr;
+
+ struct drm_mm_node error_capture;
};
struct i915_hw_ppgtt {
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 95b7e9afd5f8..a98c0f42badd 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -72,9 +72,9 @@ render_state_get_rodata(const struct drm_i915_gem_request *req)
static int render_state_setup(struct render_state *so)
{
- struct drm_device *dev = so->vma->vm->dev;
+ struct drm_i915_private *dev_priv = to_i915(so->vma->vm->dev);
const struct intel_renderstate_rodata *rodata = so->rodata;
- const bool has_64bit_reloc = INTEL_GEN(dev) >= 8;
+ const bool has_64bit_reloc = INTEL_GEN(dev_priv) >= 8;
unsigned int i = 0, reloc_index = 0;
struct page *page;
u32 *d;
@@ -115,7 +115,7 @@ static int render_state_setup(struct render_state *so)
so->aux_batch_offset = i * sizeof(u32);
- if (HAS_POOLED_EU(dev)) {
+ if (HAS_POOLED_EU(dev_priv)) {
/*
* We always program 3x6 pool config but depending upon which
* subslice is disabled HW drops down to appropriate config
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 8832f8ec1583..f9af2a00625e 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -26,12 +26,12 @@
#include "i915_drv.h"
-static const char *i915_fence_get_driver_name(struct fence *fence)
+static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
return "i915";
}
-static const char *i915_fence_get_timeline_name(struct fence *fence)
+static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{
/* Timelines are bound by eviction to a VM. However, since
* we only have a global seqno at the moment, we only have
@@ -42,12 +42,12 @@ static const char *i915_fence_get_timeline_name(struct fence *fence)
return "global";
}
-static bool i915_fence_signaled(struct fence *fence)
+static bool i915_fence_signaled(struct dma_fence *fence)
{
return i915_gem_request_completed(to_request(fence));
}
-static bool i915_fence_enable_signaling(struct fence *fence)
+static bool i915_fence_enable_signaling(struct dma_fence *fence)
{
if (i915_fence_signaled(fence))
return false;
@@ -56,7 +56,7 @@ static bool i915_fence_enable_signaling(struct fence *fence)
return true;
}
-static signed long i915_fence_wait(struct fence *fence,
+static signed long i915_fence_wait(struct dma_fence *fence,
bool interruptible,
signed long timeout_jiffies)
{
@@ -85,26 +85,26 @@ static signed long i915_fence_wait(struct fence *fence,
return timeout_jiffies;
}
-static void i915_fence_value_str(struct fence *fence, char *str, int size)
+static void i915_fence_value_str(struct dma_fence *fence, char *str, int size)
{
snprintf(str, size, "%u", fence->seqno);
}
-static void i915_fence_timeline_value_str(struct fence *fence, char *str,
+static void i915_fence_timeline_value_str(struct dma_fence *fence, char *str,
int size)
{
snprintf(str, size, "%u",
intel_engine_get_seqno(to_request(fence)->engine));
}
-static void i915_fence_release(struct fence *fence)
+static void i915_fence_release(struct dma_fence *fence)
{
struct drm_i915_gem_request *req = to_request(fence);
kmem_cache_free(req->i915->requests, req);
}
-const struct fence_ops i915_fence_ops = {
+const struct dma_fence_ops i915_fence_ops = {
.get_driver_name = i915_fence_get_driver_name,
.get_timeline_name = i915_fence_get_timeline_name,
.enable_signaling = i915_fence_enable_signaling,
@@ -256,10 +256,11 @@ static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret;
/* Carefully retire all requests without writing to the rings */
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
ret = intel_engine_idle(engine,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
@@ -276,7 +277,7 @@ static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
}
/* Finally reset hw state */
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
intel_engine_init_seqno(engine, seqno);
return 0;
@@ -387,8 +388,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* The reference count is incremented atomically. If it is zero,
* the lookup knows the request is unallocated and complete. Otherwise,
* it is either still in use, or has been reallocated and reset
- * with fence_init(). This increment is safe for release as we check
- * that the request we have a reference to and matches the active
+ * with dma_fence_init(). This increment is safe for release as we
+ * check that the request we have a reference to and matches the active
* request.
*
* Before we increment the refcount, we chase the request->engine
@@ -411,11 +412,11 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
goto err;
spin_lock_init(&req->lock);
- fence_init(&req->fence,
- &i915_fence_ops,
- &req->lock,
- engine->fence_context,
- seqno);
+ dma_fence_init(&req->fence,
+ &i915_fence_ops,
+ &req->lock,
+ engine->fence_context,
+ seqno);
i915_sw_fence_init(&req->submit, submit_notify);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 974bd7bcc801..bceeaa3a5193 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -25,7 +25,7 @@
#ifndef I915_GEM_REQUEST_H
#define I915_GEM_REQUEST_H
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include "i915_gem.h"
#include "i915_sw_fence.h"
@@ -62,7 +62,7 @@ struct intel_signal_node {
* The requests are reference counted.
*/
struct drm_i915_gem_request {
- struct fence fence;
+ struct dma_fence fence;
spinlock_t lock;
/** On Which ring this request was generated */
@@ -145,9 +145,9 @@ struct drm_i915_gem_request {
struct list_head execlist_link;
};
-extern const struct fence_ops i915_fence_ops;
+extern const struct dma_fence_ops i915_fence_ops;
-static inline bool fence_is_i915(struct fence *fence)
+static inline bool fence_is_i915(struct dma_fence *fence)
{
return fence->ops == &i915_fence_ops;
}
@@ -172,7 +172,7 @@ i915_gem_request_get_engine(struct drm_i915_gem_request *req)
}
static inline struct drm_i915_gem_request *
-to_request(struct fence *fence)
+to_request(struct dma_fence *fence)
{
/* We assume that NULL fence/request are interoperable */
BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
@@ -183,19 +183,19 @@ to_request(struct fence *fence)
static inline struct drm_i915_gem_request *
i915_gem_request_get(struct drm_i915_gem_request *req)
{
- return to_request(fence_get(&req->fence));
+ return to_request(dma_fence_get(&req->fence));
}
static inline struct drm_i915_gem_request *
i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
{
- return to_request(fence_get_rcu(&req->fence));
+ return to_request(dma_fence_get_rcu(&req->fence));
}
static inline void
i915_gem_request_put(struct drm_i915_gem_request *req)
{
- fence_put(&req->fence);
+ dma_fence_put(&req->fence);
}
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
@@ -497,7 +497,7 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
* compiler.
*
* The atomic operation at the heart of
- * i915_gem_request_get_rcu(), see fence_get_rcu(), is
+ * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is
* atomic_inc_not_zero() which is only a full memory barrier
* when successful. That is, if i915_gem_request_get_rcu()
* returns the request (and so with the reference counted
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 1c237d02f30b..de25b6e0a101 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -182,8 +182,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
!is_vmalloc_addr(obj->mapping))
continue;
- if ((flags & I915_SHRINK_ACTIVE) == 0 &&
- i915_gem_object_is_active(obj))
+ if (!(flags & I915_SHRINK_ACTIVE) &&
+ (i915_gem_object_is_active(obj) ||
+ obj->framebuffer_references))
continue;
if (!can_release_pages(obj))
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 59989e8ee5dc..f4f6d3a48b05 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -115,7 +115,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
pci_read_config_dword(pdev, INTEL_BSM, &bsm);
base = bsm & INTEL_BSM_MASK;
- } else if (IS_I865G(dev)) {
+ } else if (IS_I865G(dev_priv)) {
u32 tseg_size = 0;
u16 toud = 0;
u8 tmp;
@@ -154,7 +154,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
tom = tmp * MB(32);
base = tom - tseg_size - ggtt->stolen_size;
- } else if (IS_845G(dev)) {
+ } else if (IS_845G(dev_priv)) {
u32 tseg_size = 0;
u32 tom;
u8 tmp;
@@ -178,7 +178,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
tom = tmp * MB(32);
base = tom - tseg_size - ggtt->stolen_size;
- } else if (IS_I830(dev)) {
+ } else if (IS_I830(dev_priv)) {
u32 tseg_size = 0;
u32 tom;
u8 tmp;
@@ -204,7 +204,8 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
return 0;
/* make sure we don't clobber the GTT if it's within stolen memory */
- if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
+ if (INTEL_GEN(dev_priv) <= 4 && !IS_G33(dev_priv) &&
+ !IS_G4X(dev_priv)) {
struct {
u32 start, end;
} stolen[2] = {
@@ -214,7 +215,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
u64 ggtt_start, ggtt_end;
ggtt_start = I915_READ(PGTBL_CTL);
- if (IS_GEN4(dev))
+ if (IS_GEN4(dev_priv))
ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
(ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
@@ -270,7 +271,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* GEN3 firmware likes to smash pci bridges into the stolen
* range. Apparently this works.
*/
- if (r == NULL && !IS_GEN3(dev)) {
+ if (r == NULL && !IS_GEN3(dev_priv)) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
base, base + (uint32_t)ggtt->stolen_size);
base = 0;
@@ -437,7 +438,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
case 3:
break;
case 4:
- if (IS_G4X(dev))
+ if (IS_G4X(dev_priv))
g4x_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
break;
@@ -456,7 +457,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
break;
default:
if (IS_BROADWELL(dev_priv) ||
- IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev))
+ IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
bdw_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
else
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index a14b1e3d4c78..c21bc0068d20 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -62,6 +62,7 @@
static bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
int tile_width;
/* Linear is always fine */
@@ -71,8 +72,8 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (tiling_mode > I915_TILING_LAST)
return false;
- if (IS_GEN2(dev) ||
- (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+ if (IS_GEN2(dev_priv) ||
+ (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev_priv)))
tile_width = 128;
else
tile_width = 512;
@@ -90,7 +91,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (stride > 8192)
return false;
- if (IS_GEN3(dev)) {
+ if (IS_GEN3(dev_priv)) {
if (size > I830_FENCE_MAX_SIZE_VAL << 20)
return false;
} else {
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 334f15df7c8d..242b9a927899 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -28,6 +28,8 @@
*/
#include <generated/utsrelease.h>
+#include <linux/stop_machine.h>
+#include <linux/zlib.h>
#include "i915_drv.h"
static const char *engine_str(int engine)
@@ -172,6 +174,110 @@ static void i915_error_puts(struct drm_i915_error_state_buf *e,
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
#define err_puts(e, s) i915_error_puts(e, s)
+#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
+
+static bool compress_init(struct z_stream_s *zstream)
+{
+ memset(zstream, 0, sizeof(*zstream));
+
+ zstream->workspace =
+ kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (!zstream->workspace)
+ return false;
+
+ if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
+ kfree(zstream->workspace);
+ return false;
+ }
+
+ return true;
+}
+
+static int compress_page(struct z_stream_s *zstream,
+ void *src,
+ struct drm_i915_error_object *dst)
+{
+ zstream->next_in = src;
+ zstream->avail_in = PAGE_SIZE;
+
+ do {
+ if (zstream->avail_out == 0) {
+ unsigned long page;
+
+ page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page)
+ return -ENOMEM;
+
+ dst->pages[dst->page_count++] = (void *)page;
+
+ zstream->next_out = (void *)page;
+ zstream->avail_out = PAGE_SIZE;
+ }
+
+ if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
+ return -EIO;
+ } while (zstream->avail_in);
+
+ /* Fallback to uncompressed if we increase size? */
+ if (0 && zstream->total_out > zstream->total_in)
+ return -E2BIG;
+
+ return 0;
+}
+
+static void compress_fini(struct z_stream_s *zstream,
+ struct drm_i915_error_object *dst)
+{
+ if (dst) {
+ zlib_deflate(zstream, Z_FINISH);
+ dst->unused = zstream->avail_out;
+ }
+
+ zlib_deflateEnd(zstream);
+ kfree(zstream->workspace);
+}
+
+static void err_compression_marker(struct drm_i915_error_state_buf *m)
+{
+ err_puts(m, ":");
+}
+
+#else
+
+static bool compress_init(struct z_stream_s *zstream)
+{
+ return true;
+}
+
+static int compress_page(struct z_stream_s *zstream,
+ void *src,
+ struct drm_i915_error_object *dst)
+{
+ unsigned long page;
+
+ page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page)
+ return -ENOMEM;
+
+ dst->pages[dst->page_count++] =
+ memcpy((void *)page, src, PAGE_SIZE);
+
+ return 0;
+}
+
+static void compress_fini(struct z_stream_s *zstream,
+ struct drm_i915_error_object *dst)
+{
+}
+
+static void err_compression_marker(struct drm_i915_error_state_buf *m)
+{
+ err_puts(m, "~");
+}
+
+#endif
+
static void print_error_buffers(struct drm_i915_error_state_buf *m,
const char *name,
struct drm_i915_error_buffer *err,
@@ -228,13 +334,57 @@ static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
return "unknown";
}
+static void error_print_instdone(struct drm_i915_error_state_buf *m,
+ struct drm_i915_error_engine *ee)
+{
+ int slice;
+ int subslice;
+
+ err_printf(m, " INSTDONE: 0x%08x\n",
+ ee->instdone.instdone);
+
+ if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3)
+ return;
+
+ err_printf(m, " SC_INSTDONE: 0x%08x\n",
+ ee->instdone.slice_common);
+
+ if (INTEL_GEN(m->i915) <= 6)
+ return;
+
+ for_each_instdone_slice_subslice(m->i915, slice, subslice)
+ err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.sampler[slice][subslice]);
+
+ for_each_instdone_slice_subslice(m->i915, slice, subslice)
+ err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.row[slice][subslice]);
+}
+
+static void error_print_request(struct drm_i915_error_state_buf *m,
+ const char *prefix,
+ struct drm_i915_error_request *erq)
+{
+ if (!erq->seqno)
+ return;
+
+ err_printf(m, "%s pid %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
+ prefix, erq->pid,
+ erq->context, erq->seqno,
+ jiffies_to_msecs(jiffies - erq->jiffies),
+ erq->head, erq->tail);
+}
+
static void error_print_engine(struct drm_i915_error_state_buf *m,
struct drm_i915_error_engine *ee)
{
err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
err_printf(m, " START: 0x%08x\n", ee->start);
- err_printf(m, " HEAD: 0x%08x\n", ee->head);
- err_printf(m, " TAIL: 0x%08x\n", ee->tail);
+ err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
+ err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
+ ee->tail, ee->rq_post, ee->rq_tail);
err_printf(m, " CTL: 0x%08x\n", ee->ctl);
err_printf(m, " MODE: 0x%08x\n", ee->mode);
err_printf(m, " HWS: 0x%08x\n", ee->hws);
@@ -242,7 +392,9 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
(u32)(ee->acthd>>32), (u32)ee->acthd);
err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
- err_printf(m, " INSTDONE: 0x%08x\n", ee->instdone);
+
+ error_print_instdone(m, ee);
+
if (ee->batchbuffer) {
u64 start = ee->batchbuffer->gtt_offset;
u64 end = start + ee->batchbuffer->gtt_size;
@@ -296,6 +448,8 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " hangcheck: %s [%d]\n",
hangcheck_action_to_str(ee->hangcheck_action),
ee->hangcheck_score);
+ error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
+ error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
}
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -307,28 +461,72 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
+static int
+ascii85_encode_len(int len)
+{
+ return DIV_ROUND_UP(len, 4);
+}
+
+static bool
+ascii85_encode(u32 in, char *out)
+{
+ int i;
+
+ if (in == 0)
+ return false;
+
+ out[5] = '\0';
+ for (i = 5; i--; ) {
+ out[i] = '!' + in % 85;
+ in /= 85;
+ }
+
+ return true;
+}
+
static void print_error_obj(struct drm_i915_error_state_buf *m,
+ struct intel_engine_cs *engine,
+ const char *name,
struct drm_i915_error_object *obj)
{
- int page, offset, elt;
+ char out[6];
+ int page;
+
+ if (!obj)
+ return;
+
+ if (name) {
+ err_printf(m, "%s --- %s = 0x%08x %08x\n",
+ engine ? engine->name : "global", name,
+ upper_32_bits(obj->gtt_offset),
+ lower_32_bits(obj->gtt_offset));
+ }
+
+ err_compression_marker(m);
+ for (page = 0; page < obj->page_count; page++) {
+ int i, len;
+
+ len = PAGE_SIZE;
+ if (page == obj->page_count - 1)
+ len -= obj->unused;
+ len = ascii85_encode_len(len);
- for (page = offset = 0; page < obj->page_count; page++) {
- for (elt = 0; elt < PAGE_SIZE/4; elt++) {
- err_printf(m, "%08x : %08x\n", offset,
- obj->pages[page][elt]);
- offset += 4;
+ for (i = 0; i < len; i++) {
+ if (ascii85_encode(obj->pages[page][i], out))
+ err_puts(m, out);
+ else
+ err_puts(m, "z");
}
}
+ err_puts(m, "\n");
}
static void err_print_capabilities(struct drm_i915_error_state_buf *m,
const struct intel_device_info *info)
{
#define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x))
-#define SEP_SEMICOLON ;
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
+ DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
-#undef SEP_SEMICOLON
}
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
@@ -339,8 +537,8 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_i915_error_state *error = error_priv->error;
struct drm_i915_error_object *obj;
- int i, j, offset, elt;
int max_hangcheck_score;
+ int i, j;
if (!error) {
err_printf(m, "no error state collected\n");
@@ -391,7 +589,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
for (i = 0; i < 4; i++)
err_printf(m, "GTIER gt %d: 0x%08x\n", i,
error->gtier[i]);
- } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
+ } else if (HAS_PCH_SPLIT(dev_priv) || IS_VALLEYVIEW(dev_priv))
err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
@@ -402,10 +600,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
for (i = 0; i < dev_priv->num_fence_regs; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
- for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
- err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
- error->extra_instdone[i]);
-
if (INTEL_INFO(dev)->gen >= 6) {
err_printf(m, "ERROR: 0x%08x\n", error->error);
@@ -416,7 +610,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- if (IS_GEN7(dev))
+ if (IS_GEN7(dev_priv))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -438,7 +632,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
len += scnprintf(buf + len, sizeof(buf), "%s%s",
first ? "" : ", ",
- dev_priv->engine[j].name);
+ dev_priv->engine[j]->name);
first = 0;
}
scnprintf(buf + len, sizeof(buf), ")");
@@ -456,7 +650,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
obj = ee->batchbuffer;
if (obj) {
- err_puts(m, dev_priv->engine[i].name);
+ err_puts(m, dev_priv->engine[i]->name);
if (ee->pid != -1)
err_printf(m, " (submitted by %s [%d])",
ee->comm,
@@ -464,37 +658,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
- print_error_obj(m, obj);
- }
-
- obj = ee->wa_batchbuffer;
- if (obj) {
- err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
- dev_priv->engine[i].name,
- lower_32_bits(obj->gtt_offset));
- print_error_obj(m, obj);
+ print_error_obj(m, dev_priv->engine[i], NULL, obj);
}
if (ee->num_requests) {
err_printf(m, "%s --- %d requests\n",
- dev_priv->engine[i].name,
+ dev_priv->engine[i]->name,
ee->num_requests);
- for (j = 0; j < ee->num_requests; j++) {
- err_printf(m, " pid %d, seqno 0x%08x, emitted %ld, head 0x%08x, tail 0x%08x\n",
- ee->requests[j].pid,
- ee->requests[j].seqno,
- ee->requests[j].jiffies,
- ee->requests[j].head,
- ee->requests[j].tail);
- }
+ for (j = 0; j < ee->num_requests; j++)
+ error_print_request(m, " ", &ee->requests[j]);
}
if (IS_ERR(ee->waiters)) {
err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
- dev_priv->engine[i].name);
+ dev_priv->engine[i]->name);
} else if (ee->num_waiters) {
err_printf(m, "%s --- %d waiters\n",
- dev_priv->engine[i].name,
+ dev_priv->engine[i]->name,
ee->num_waiters);
for (j = 0; j < ee->num_waiters; j++) {
err_printf(m, " seqno 0x%08x for %s [%d]\n",
@@ -504,77 +684,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
- if ((obj = ee->ringbuffer)) {
- err_printf(m, "%s --- ringbuffer = 0x%08x\n",
- dev_priv->engine[i].name,
- lower_32_bits(obj->gtt_offset));
- print_error_obj(m, obj);
- }
+ print_error_obj(m, dev_priv->engine[i],
+ "ringbuffer", ee->ringbuffer);
- if ((obj = ee->hws_page)) {
- u64 hws_offset = obj->gtt_offset;
- u32 *hws_page = &obj->pages[0][0];
+ print_error_obj(m, dev_priv->engine[i],
+ "HW Status", ee->hws_page);
- if (i915.enable_execlists) {
- hws_offset += LRC_PPHWSP_PN * PAGE_SIZE;
- hws_page = &obj->pages[LRC_PPHWSP_PN][0];
- }
- err_printf(m, "%s --- HW Status = 0x%08llx\n",
- dev_priv->engine[i].name, hws_offset);
- offset = 0;
- for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
- err_printf(m, "[%04x] %08x %08x %08x %08x\n",
- offset,
- hws_page[elt],
- hws_page[elt+1],
- hws_page[elt+2],
- hws_page[elt+3]);
- offset += 16;
- }
- }
+ print_error_obj(m, dev_priv->engine[i],
+ "HW context", ee->ctx);
- obj = ee->wa_ctx;
- if (obj) {
- u64 wa_ctx_offset = obj->gtt_offset;
- u32 *wa_ctx_page = &obj->pages[0][0];
- struct intel_engine_cs *engine = &dev_priv->engine[RCS];
- u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
- engine->wa_ctx.per_ctx.size);
-
- err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
- dev_priv->engine[i].name, wa_ctx_offset);
- offset = 0;
- for (elt = 0; elt < wa_ctx_size; elt += 4) {
- err_printf(m, "[%04x] %08x %08x %08x %08x\n",
- offset,
- wa_ctx_page[elt + 0],
- wa_ctx_page[elt + 1],
- wa_ctx_page[elt + 2],
- wa_ctx_page[elt + 3]);
- offset += 16;
- }
- }
+ print_error_obj(m, dev_priv->engine[i],
+ "WA context", ee->wa_ctx);
- if ((obj = ee->ctx)) {
- err_printf(m, "%s --- HW Context = 0x%08x\n",
- dev_priv->engine[i].name,
- lower_32_bits(obj->gtt_offset));
- print_error_obj(m, obj);
- }
+ print_error_obj(m, dev_priv->engine[i],
+ "WA batchbuffer", ee->wa_batchbuffer);
}
- if ((obj = error->semaphore)) {
- err_printf(m, "Semaphore page = 0x%08x\n",
- lower_32_bits(obj->gtt_offset));
- for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
- err_printf(m, "[%04x] %08x %08x %08x %08x\n",
- elt * 4,
- obj->pages[0][elt],
- obj->pages[0][elt+1],
- obj->pages[0][elt+2],
- obj->pages[0][elt+3]);
- }
- }
+ print_error_obj(m, NULL, "Semaphores", error->semaphore);
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
@@ -629,7 +755,7 @@ static void i915_error_object_free(struct drm_i915_error_object *obj)
return;
for (page = 0; page < obj->page_count; page++)
- kfree(obj->pages[page]);
+ free_page((unsigned long)obj->pages[page]);
kfree(obj);
}
@@ -667,104 +793,63 @@ static void i915_error_state_free(struct kref *error_ref)
}
static struct drm_i915_error_object *
-i915_error_object_create(struct drm_i915_private *dev_priv,
+i915_error_object_create(struct drm_i915_private *i915,
struct i915_vma *vma)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct drm_i915_gem_object *src;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ const u64 slot = ggtt->error_capture.start;
struct drm_i915_error_object *dst;
- int num_pages;
- bool use_ggtt;
- int i = 0;
- u64 reloc_offset;
+ struct z_stream_s zstream;
+ unsigned long num_pages;
+ struct sgt_iter iter;
+ dma_addr_t dma;
if (!vma)
return NULL;
- src = vma->obj;
- if (!src->pages)
- return NULL;
-
- num_pages = src->base.size >> PAGE_SHIFT;
-
- dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
+ num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
+ num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
+ dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
+ GFP_ATOMIC | __GFP_NOWARN);
if (!dst)
return NULL;
dst->gtt_offset = vma->node.start;
dst->gtt_size = vma->node.size;
+ dst->page_count = 0;
+ dst->unused = 0;
- reloc_offset = dst->gtt_offset;
- use_ggtt = (src->cache_level == I915_CACHE_NONE &&
- (vma->flags & I915_VMA_GLOBAL_BIND) &&
- reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
-
- /* Cannot access stolen address directly, try to use the aperture */
- if (src->stolen) {
- use_ggtt = true;
-
- if (!(vma->flags & I915_VMA_GLOBAL_BIND))
- goto unwind;
-
- reloc_offset = vma->node.start;
- if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
- goto unwind;
+ if (!compress_init(&zstream)) {
+ kfree(dst);
+ return NULL;
}
- /* Cannot access snooped pages through the aperture */
- if (use_ggtt && src->cache_level != I915_CACHE_NONE &&
- !HAS_LLC(dev_priv))
- goto unwind;
+ for_each_sgt_dma(dma, iter, vma->pages) {
+ void __iomem *s;
+ int ret;
- dst->page_count = num_pages;
- while (num_pages--) {
- unsigned long flags;
- void *d;
+ ggtt->base.insert_page(&ggtt->base, dma, slot,
+ I915_CACHE_NONE, 0);
- d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
- if (d == NULL)
- goto unwind;
-
- local_irq_save(flags);
- if (use_ggtt) {
- void __iomem *s;
-
- /* Simply ignore tiling or any overlapping fence.
- * It's part of the error state, and this hopefully
- * captures what the GPU read.
- */
-
- s = io_mapping_map_atomic_wc(&ggtt->mappable,
- reloc_offset);
- memcpy_fromio(d, s, PAGE_SIZE);
- io_mapping_unmap_atomic(s);
- } else {
- struct page *page;
- void *s;
-
- page = i915_gem_object_get_page(src, i);
-
- drm_clflush_pages(&page, 1);
-
- s = kmap_atomic(page);
- memcpy(d, s, PAGE_SIZE);
- kunmap_atomic(s);
-
- drm_clflush_pages(&page, 1);
- }
- local_irq_restore(flags);
+ s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
+ ret = compress_page(&zstream, (void __force *)s, dst);
+ io_mapping_unmap_atomic(s);
- dst->pages[i++] = d;
- reloc_offset += PAGE_SIZE;
+ if (ret)
+ goto unwind;
}
-
- return dst;
+ goto out;
unwind:
- while (i--)
- kfree(dst->pages[i]);
+ while (dst->page_count--)
+ free_page((unsigned long)dst->pages[dst->page_count]);
kfree(dst);
- return NULL;
+ dst = NULL;
+
+out:
+ compress_fini(&zstream, dst);
+ ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
+ return dst;
}
/* The error capture is special as tries to run underneath the normal
@@ -855,7 +940,8 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
if (engine_id)
*engine_id = i;
- return error->engine[i].ipehr ^ error->engine[i].instdone;
+ return error->engine[i].ipehr ^
+ error->engine[i].instdone.instdone;
}
}
@@ -891,7 +977,7 @@ static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
if (!error->semaphore)
return;
- for_each_engine_id(to, dev_priv, id) {
+ for_each_engine(to, dev_priv, id) {
int idx;
u16 signal_offset;
u32 *tmp;
@@ -998,7 +1084,6 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- ee->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
if (INTEL_GEN(dev_priv) >= 8) {
@@ -1010,9 +1095,10 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
ee->faddr = I915_READ(DMA_FADD_I8XX);
ee->ipeir = I915_READ(IPEIR);
ee->ipehr = I915_READ(IPEHR);
- ee->instdone = I915_READ(GEN2_INSTDONE);
}
+ intel_engine_get_instdone(engine, &ee->instdone);
+
ee->waiting = intel_engine_has_waiter(engine);
ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
ee->acthd = intel_engine_get_active_head(engine);
@@ -1079,6 +1165,20 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
}
}
+static void record_request(struct drm_i915_gem_request *request,
+ struct drm_i915_error_request *erq)
+{
+ erq->context = request->ctx->hw_id;
+ erq->seqno = request->fence.seqno;
+ erq->jiffies = request->emitted_jiffies;
+ erq->head = request->head;
+ erq->tail = request->tail;
+
+ rcu_read_lock();
+ erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
+ rcu_read_unlock();
+}
+
static void engine_record_requests(struct intel_engine_cs *engine,
struct drm_i915_gem_request *first,
struct drm_i915_error_engine *ee)
@@ -1102,8 +1202,6 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0;
request = first;
list_for_each_entry_from(request, &engine->request_list, link) {
- struct drm_i915_error_request *erq;
-
if (count >= ee->num_requests) {
/*
* If the ring request list was changed in
@@ -1123,19 +1221,22 @@ static void engine_record_requests(struct intel_engine_cs *engine,
break;
}
- erq = &ee->requests[count++];
- erq->seqno = request->fence.seqno;
- erq->jiffies = request->emitted_jiffies;
- erq->head = request->head;
- erq->tail = request->tail;
-
- rcu_read_lock();
- erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
- rcu_read_unlock();
+ record_request(request, &ee->requests[count++]);
}
ee->num_requests = count;
}
+static void error_record_engine_execlists(struct intel_engine_cs *engine,
+ struct drm_i915_error_engine *ee)
+{
+ unsigned int n;
+
+ for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
+ if (engine->execlist_port[n].request)
+ record_request(engine->execlist_port[n].request,
+ &ee->execlist[n]);
+}
+
static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
@@ -1146,20 +1247,21 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
i915_error_object_create(dev_priv, dev_priv->semaphore);
for (i = 0; i < I915_NUM_ENGINES; i++) {
- struct intel_engine_cs *engine = &dev_priv->engine[i];
+ struct intel_engine_cs *engine = dev_priv->engine[i];
struct drm_i915_error_engine *ee = &error->engine[i];
struct drm_i915_gem_request *request;
ee->pid = -1;
ee->engine_id = -1;
- if (!intel_engine_initialized(engine))
+ if (!engine)
continue;
ee->engine_id = i;
error_record_engine_registers(error, engine, ee);
error_record_engine_waiters(engine, ee);
+ error_record_engine_execlists(engine, ee);
request = i915_gem_find_active_request(engine);
if (request) {
@@ -1202,6 +1304,10 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
error->simulated |=
request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
+ ee->rq_head = request->head;
+ ee->rq_post = request->postfix;
+ ee->rq_tail = request->tail;
+
ring = request->ring;
ee->cpu_ring_head = ring->head;
ee->cpu_ring_tail = ring->tail;
@@ -1318,13 +1424,13 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
*/
/* 1: Registers specific to a single generation */
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv)) {
error->gtier[0] = I915_READ(GTIER);
error->ier = I915_READ(VLV_IER);
error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
}
- if (IS_GEN7(dev))
+ if (IS_GEN7(dev_priv))
error->err_int = I915_READ(GEN7_ERR_INT);
if (INTEL_INFO(dev)->gen >= 8) {
@@ -1332,7 +1438,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
}
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev_priv)) {
error->forcewake = I915_READ_FW(FORCEWAKE);
error->gab_ctl = I915_READ(GAB_CTL);
error->gfx_mode = I915_READ(GFX_MODE);
@@ -1349,7 +1455,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
}
/* 3: Feature specific registers */
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
error->gam_ecochk = I915_READ(GAM_ECOCHK);
error->gac_eco = I915_READ(GAC_ECO_BITS);
}
@@ -1362,18 +1468,16 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
error->ier = I915_READ(GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
error->gtier[i] = I915_READ(GEN8_GT_IER(i));
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
error->ier = I915_READ(DEIER);
error->gtier[0] = I915_READ(GTIER);
- } else if (IS_GEN2(dev)) {
+ } else if (IS_GEN2(dev_priv)) {
error->ier = I915_READ16(IER);
- } else if (!IS_VALLEYVIEW(dev)) {
+ } else if (!IS_VALLEYVIEW(dev_priv)) {
error->ier = I915_READ(IER);
}
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
-
- i915_get_extra_instdone(dev_priv, error->extra_instdone);
}
static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
@@ -1418,6 +1522,27 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
sizeof(error->device_info));
}
+static int capture(void *data)
+{
+ struct drm_i915_error_state *error = data;
+
+ i915_capture_gen_state(error->i915, error);
+ i915_capture_reg_state(error->i915, error);
+ i915_gem_record_fences(error->i915, error);
+ i915_gem_record_rings(error->i915, error);
+ i915_capture_active_buffers(error->i915, error);
+ i915_capture_pinned_buffers(error->i915, error);
+
+ do_gettimeofday(&error->time);
+
+ error->overlay = intel_overlay_capture_error_state(error->i915);
+ error->display = intel_display_capture_error_state(error->i915);
+
+ return 0;
+}
+
+#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
+
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
@@ -1435,6 +1560,9 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error;
unsigned long flags;
+ if (!i915.error_capture)
+ return;
+
if (READ_ONCE(dev_priv->gpu_error.first_error))
return;
@@ -1446,18 +1574,9 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
}
kref_init(&error->ref);
+ error->i915 = dev_priv;
- i915_capture_gen_state(dev_priv, error);
- i915_capture_reg_state(dev_priv, error);
- i915_gem_record_fences(dev_priv, error);
- i915_gem_record_rings(dev_priv, error);
- i915_capture_active_buffers(dev_priv, error);
- i915_capture_pinned_buffers(dev_priv, error);
-
- do_gettimeofday(&error->time);
-
- error->overlay = intel_overlay_capture_error_state(dev_priv);
- error->display = intel_display_capture_error_state(dev_priv);
+ stop_machine(capture, error, NULL);
i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
@@ -1476,7 +1595,8 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
return;
}
- if (!warned) {
+ if (!warned &&
+ ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
@@ -1497,7 +1617,6 @@ void i915_error_state_get(struct drm_device *dev,
if (error_priv->error)
kref_get(&error_priv->error->ref);
spin_unlock_irq(&dev_priv->gpu_error.lock);
-
}
void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
@@ -1519,33 +1638,3 @@ void i915_destroy_error_state(struct drm_device *dev)
if (error)
kref_put(&error->ref, i915_error_state_free);
}
-
-const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
-{
- switch (type) {
- case I915_CACHE_NONE: return " uncached";
- case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
- case I915_CACHE_L3_LLC: return " L3+LLC";
- case I915_CACHE_WT: return " WT";
- default: return "";
- }
-}
-
-/* NB: please notice the memset */
-void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
- uint32_t *instdone)
-{
- memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
-
- if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
- instdone[0] = I915_READ(GEN2_INSTDONE);
- else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
- instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
- instdone[1] = I915_READ(GEN4_INSTDONE1);
- } else if (INTEL_GEN(dev_priv) >= 7) {
- instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
- instdone[1] = I915_READ(GEN7_SC_INSTDONE);
- instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
- instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
- }
-}
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 3106dcc06fe9..a1f76c8f8cde 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -917,6 +917,7 @@ static void guc_addon_create(struct intel_guc *guc)
struct guc_policies *policies;
struct guc_mmio_reg_state *reg_state;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
struct page *page;
u32 size;
@@ -944,10 +945,10 @@ static void guc_addon_create(struct intel_guc *guc)
* so its address won't change after we've told the GuC where
* to find it.
*/
- engine = &dev_priv->engine[RCS];
+ engine = dev_priv->engine[RCS];
ads->golden_context_lrca = engine->status_page.ggtt_offset;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
/* GuC scheduling policies */
@@ -960,7 +961,7 @@ static void guc_addon_create(struct intel_guc *guc)
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
reg_state->mmio_white_list[engine->guc_id].mmio_start =
engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
@@ -1014,9 +1015,10 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
+ struct drm_i915_gem_request *request;
struct i915_guc_client *client;
struct intel_engine_cs *engine;
- struct drm_i915_gem_request *request;
+ enum intel_engine_id id;
/* client for execbuf submission */
client = guc_client_alloc(dev_priv,
@@ -1033,7 +1035,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
guc_init_doorbell_hw(guc);
/* Take over from manual control of ELSP (execlists) */
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
engine->submit_request = i915_guc_submit;
/* Replay the current set of previously submitted requests */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3fc286cd1157..23315e5461bf 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1058,8 +1058,9 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
static bool any_waiters(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
if (intel_engine_has_waiter(engine))
return true;
@@ -1257,20 +1258,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VCS]);
+ notify_ring(dev_priv->engine[VCS]);
}
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VCS]);
+ notify_ring(dev_priv->engine[VCS]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[BCS]);
+ notify_ring(dev_priv->engine[BCS]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
@@ -1340,21 +1341,21 @@ static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir[4])
{
if (gt_iir[0]) {
- gen8_cs_irq_handler(&dev_priv->engine[RCS],
+ gen8_cs_irq_handler(dev_priv->engine[RCS],
gt_iir[0], GEN8_RCS_IRQ_SHIFT);
- gen8_cs_irq_handler(&dev_priv->engine[BCS],
+ gen8_cs_irq_handler(dev_priv->engine[BCS],
gt_iir[0], GEN8_BCS_IRQ_SHIFT);
}
if (gt_iir[1]) {
- gen8_cs_irq_handler(&dev_priv->engine[VCS],
+ gen8_cs_irq_handler(dev_priv->engine[VCS],
gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
- gen8_cs_irq_handler(&dev_priv->engine[VCS2],
+ gen8_cs_irq_handler(dev_priv->engine[VCS2],
gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
}
if (gt_iir[3])
- gen8_cs_irq_handler(&dev_priv->engine[VECS],
+ gen8_cs_irq_handler(dev_priv->engine[VECS],
gt_iir[3], GEN8_VECS_IRQ_SHIFT);
if (gt_iir[2] & dev_priv->pm_rps_events)
@@ -1598,7 +1599,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (HAS_VEBOX(dev_priv)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VECS]);
+ notify_ring(dev_priv->engine[VECS]);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
@@ -2551,92 +2552,52 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
wake_up_all(&dev_priv->gpu_error.reset_queue);
}
-static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
+static inline void
+i915_err_print_instdone(struct drm_i915_private *dev_priv,
+ struct intel_instdone *instdone)
{
- uint32_t instdone[I915_NUM_INSTDONE_REG];
- u32 eir = I915_READ(EIR);
- int pipe, i;
+ int slice;
+ int subslice;
+
+ pr_err(" INSTDONE: 0x%08x\n", instdone->instdone);
- if (!eir)
+ if (INTEL_GEN(dev_priv) <= 3)
return;
- pr_err("render error detected, EIR: 0x%08x\n", eir);
+ pr_err(" SC_INSTDONE: 0x%08x\n", instdone->slice_common);
- i915_get_extra_instdone(dev_priv, instdone);
+ if (INTEL_GEN(dev_priv) <= 6)
+ return;
- if (IS_G4X(dev_priv)) {
- if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
- u32 ipeir = I915_READ(IPEIR_I965);
-
- pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
- pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
- for (i = 0; i < ARRAY_SIZE(instdone); i++)
- pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
- pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
- pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
- I915_WRITE(IPEIR_I965, ipeir);
- POSTING_READ(IPEIR_I965);
- }
- if (eir & GM45_ERROR_PAGE_TABLE) {
- u32 pgtbl_err = I915_READ(PGTBL_ER);
- pr_err("page table error\n");
- pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
- I915_WRITE(PGTBL_ER, pgtbl_err);
- POSTING_READ(PGTBL_ER);
- }
- }
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+ pr_err(" SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice, instdone->sampler[slice][subslice]);
- if (!IS_GEN2(dev_priv)) {
- if (eir & I915_ERROR_PAGE_TABLE) {
- u32 pgtbl_err = I915_READ(PGTBL_ER);
- pr_err("page table error\n");
- pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
- I915_WRITE(PGTBL_ER, pgtbl_err);
- POSTING_READ(PGTBL_ER);
- }
- }
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
+ pr_err(" ROW_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice, instdone->row[slice][subslice]);
+}
- if (eir & I915_ERROR_MEMORY_REFRESH) {
- pr_err("memory refresh error:\n");
- for_each_pipe(dev_priv, pipe)
- pr_err("pipe %c stat: 0x%08x\n",
- pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
- /* pipestat has already been acked */
- }
- if (eir & I915_ERROR_INSTRUCTION) {
- pr_err("instruction error\n");
- pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
- for (i = 0; i < ARRAY_SIZE(instdone); i++)
- pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
- if (INTEL_GEN(dev_priv) < 4) {
- u32 ipeir = I915_READ(IPEIR);
-
- pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
- pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
- pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
- I915_WRITE(IPEIR, ipeir);
- POSTING_READ(IPEIR);
- } else {
- u32 ipeir = I915_READ(IPEIR_I965);
-
- pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
- pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
- pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
- pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
- I915_WRITE(IPEIR_I965, ipeir);
- POSTING_READ(IPEIR_I965);
- }
- }
+static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
+{
+ u32 eir;
+
+ if (!IS_GEN2(dev_priv))
+ I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
+
+ if (INTEL_GEN(dev_priv) < 4)
+ I915_WRITE(IPEIR, I915_READ(IPEIR));
+ else
+ I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
- I915_WRITE(EIR, eir);
- POSTING_READ(EIR);
+ I915_WRITE(EIR, I915_READ(EIR));
eir = I915_READ(EIR);
if (eir) {
/*
* some errors might have become stuck,
* mask them.
*/
- DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
+ DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
I915_WRITE(EMR, I915_READ(EMR) | eir);
I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
}
@@ -2665,7 +2626,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
va_end(args);
i915_capture_error_state(dev_priv, engine_mask, error_msg);
- i915_report_and_clear_eir(dev_priv);
+ i915_clear_error_registers(dev_priv);
if (!engine_mask)
return;
@@ -2694,45 +2655,40 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- if (INTEL_INFO(dev)->gen >= 4)
- i915_enable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_STATUS);
- else
- i915_enable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
-static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
- DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_enable_display_irq(dev_priv, bit);
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
-static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
+ uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
+ DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_STATUS);
+ ilk_enable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -2753,38 +2709,36 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
+static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_STATUS |
- PIPE_START_VBLANK_INTERRUPT_STATUS);
+ i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
+static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
- DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_disable_display_irq(dev_priv, bit);
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
+static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
+ uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
+ DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_STATUS);
+ ilk_disable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -2816,9 +2770,10 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_engine_cs *signaller;
+ enum intel_engine_id id;
if (INTEL_GEN(dev_priv) >= 8) {
- for_each_engine(signaller, dev_priv) {
+ for_each_engine(signaller, dev_priv, id) {
if (engine == signaller)
continue;
@@ -2828,7 +2783,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
} else {
u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
- for_each_engine(signaller, dev_priv) {
+ for_each_engine(signaller, dev_priv, id) {
if(engine == signaller)
continue;
@@ -2949,35 +2904,52 @@ static int semaphore_passed(struct intel_engine_cs *engine)
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
engine->hangcheck.deadlock = 0;
}
+static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
+{
+ u32 tmp = current_instdone | *old_instdone;
+ bool unchanged;
+
+ unchanged = tmp == *old_instdone;
+ *old_instdone |= tmp;
+
+ return unchanged;
+}
+
static bool subunits_stuck(struct intel_engine_cs *engine)
{
- u32 instdone[I915_NUM_INSTDONE_REG];
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_instdone instdone;
+ struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
bool stuck;
- int i;
+ int slice;
+ int subslice;
if (engine->id != RCS)
return true;
- i915_get_extra_instdone(engine->i915, instdone);
+ intel_engine_get_instdone(engine, &instdone);
/* There might be unstable subunit states even when
* actual head is not moving. Filter out the unstable ones by
* accumulating the undone -> done transitions and only
* consider those as progress.
*/
- stuck = true;
- for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
- const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
-
- if (tmp != engine->hangcheck.instdone[i])
- stuck = false;
-
- engine->hangcheck.instdone[i] |= tmp;
+ stuck = instdone_unchanged(instdone.instdone,
+ &accu_instdone->instdone);
+ stuck &= instdone_unchanged(instdone.slice_common,
+ &accu_instdone->slice_common);
+
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
+ stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
+ &accu_instdone->sampler[slice][subslice]);
+ stuck &= instdone_unchanged(instdone.row[slice][subslice],
+ &accu_instdone->row[slice][subslice]);
}
return stuck;
@@ -2989,7 +2961,7 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
if (acthd != engine->hangcheck.acthd) {
/* Clear subunit states on head movement */
- memset(engine->hangcheck.instdone, 0,
+ memset(&engine->hangcheck.instdone, 0,
sizeof(engine->hangcheck.instdone));
return HANGCHECK_ACTIVE;
@@ -3061,6 +3033,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
container_of(work, typeof(*dev_priv),
gpu_error.hangcheck_work.work);
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
unsigned int hung = 0, stuck = 0;
int busy_count = 0;
#define BUSY 1
@@ -3080,7 +3053,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
bool busy = intel_engine_has_waiter(engine);
u64 acthd;
u32 seqno;
@@ -3159,7 +3132,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
/* Clear head and subunit states on seqno movement */
acthd = 0;
- memset(engine->hangcheck.instdone, 0,
+ memset(&engine->hangcheck.instdone, 0,
sizeof(engine->hangcheck.instdone));
}
@@ -3197,12 +3170,12 @@ static void ibx_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (HAS_PCH_NOP(dev))
+ if (HAS_PCH_NOP(dev_priv))
return;
GEN5_IRQ_RESET(SDE);
- if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
+ if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
I915_WRITE(SERR_INT, 0xffffffff);
}
@@ -3218,7 +3191,7 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (HAS_PCH_NOP(dev))
+ if (HAS_PCH_NOP(dev_priv))
return;
WARN_ON(I915_READ(SDEIER) != 0);
@@ -3293,7 +3266,7 @@ static void ironlake_irq_reset(struct drm_device *dev)
I915_WRITE(HWSTAM, 0xffffffff);
GEN5_IRQ_RESET(DE);
- if (IS_GEN7(dev))
+ if (IS_GEN7(dev_priv))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
gen5_gt_irq_reset(dev);
@@ -3343,7 +3316,7 @@ static void gen8_irq_reset(struct drm_device *dev)
GEN5_IRQ_RESET(GEN8_DE_MISC_);
GEN5_IRQ_RESET(GEN8_PCU_);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_reset(dev);
}
@@ -3532,10 +3505,10 @@ static void ibx_irq_postinstall(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask;
- if (HAS_PCH_NOP(dev))
+ if (HAS_PCH_NOP(dev_priv))
return;
- if (HAS_PCH_IBX(dev))
+ if (HAS_PCH_IBX(dev_priv))
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
else
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
@@ -3552,14 +3525,14 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
pm_irqs = gt_irqs = 0;
dev_priv->gt_irq_mask = ~0;
- if (HAS_L3_DPF(dev)) {
+ if (HAS_L3_DPF(dev_priv)) {
/* L3 parity interrupt is always unmasked. */
- dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
- gt_irqs |= GT_PARITY_ERROR(dev);
+ dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
+ gt_irqs |= GT_PARITY_ERROR(dev_priv);
}
gt_irqs |= GT_RENDER_USER_INTERRUPT;
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
gt_irqs |= ILK_BSD_USER_INTERRUPT;
} else {
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
@@ -3616,7 +3589,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
ibx_irq_postinstall(dev);
- if (IS_IRONLAKE_M(dev)) {
+ if (IS_IRONLAKE_M(dev_priv)) {
/* Enable PCU event interrupts
*
* spinlocking not required here for correctness since interrupt
@@ -3756,13 +3729,13 @@ static int gen8_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_pre_postinstall(dev);
gen8_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -3971,7 +3944,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
new_iir = I915_READ16(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
@@ -4168,7 +4141,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
@@ -4400,9 +4373,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[RCS]);
+ notify_ring(dev_priv->engine[RCS]);
if (iir & I915_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->engine[VCS]);
+ notify_ring(dev_priv->engine[VCS]);
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
@@ -4539,16 +4512,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_preinstall = cherryview_irq_preinstall;
dev->driver->irq_postinstall = cherryview_irq_postinstall;
dev->driver->irq_uninstall = cherryview_irq_uninstall;
- dev->driver->enable_vblank = valleyview_enable_vblank;
- dev->driver->disable_vblank = valleyview_disable_vblank;
+ dev->driver->enable_vblank = i965_enable_vblank;
+ dev->driver->disable_vblank = i965_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (IS_VALLEYVIEW(dev_priv)) {
dev->driver->irq_handler = valleyview_irq_handler;
dev->driver->irq_preinstall = valleyview_irq_preinstall;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
dev->driver->irq_uninstall = valleyview_irq_uninstall;
- dev->driver->enable_vblank = valleyview_enable_vblank;
- dev->driver->disable_vblank = valleyview_disable_vblank;
+ dev->driver->enable_vblank = i965_enable_vblank;
+ dev->driver->disable_vblank = i965_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (INTEL_INFO(dev_priv)->gen >= 8) {
dev->driver->irq_handler = gen8_irq_handler;
@@ -4557,13 +4530,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_uninstall = gen8_irq_uninstall;
dev->driver->enable_vblank = gen8_enable_vblank;
dev->driver->disable_vblank = gen8_disable_vblank;
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
- else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
+ else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_reset;
dev->driver->irq_postinstall = ironlake_irq_postinstall;
@@ -4577,21 +4550,25 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_postinstall = i8xx_irq_postinstall;
dev->driver->irq_handler = i8xx_irq_handler;
dev->driver->irq_uninstall = i8xx_irq_uninstall;
+ dev->driver->enable_vblank = i8xx_enable_vblank;
+ dev->driver->disable_vblank = i8xx_disable_vblank;
} else if (IS_GEN3(dev_priv)) {
dev->driver->irq_preinstall = i915_irq_preinstall;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
dev->driver->irq_handler = i915_irq_handler;
+ dev->driver->enable_vblank = i8xx_enable_vblank;
+ dev->driver->disable_vblank = i8xx_disable_vblank;
} else {
dev->driver->irq_preinstall = i965_irq_preinstall;
dev->driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_uninstall = i965_irq_uninstall;
dev->driver->irq_handler = i965_irq_handler;
+ dev->driver->enable_vblank = i965_enable_vblank;
+ dev->driver->disable_vblank = i965_disable_vblank;
}
if (I915_HAS_HOTPLUG(dev_priv))
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- dev->driver->enable_vblank = i915_enable_vblank;
- dev->driver->disable_vblank = i915_disable_vblank;
}
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 768ad89d9cd4..629e4334719c 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -47,6 +47,7 @@ struct i915_params i915 __read_mostly = {
.load_detect_test = 0,
.force_reset_modeset_test = 0,
.reset = true,
+ .error_capture = true,
.invert_brightness = 0,
.disable_display = 0,
.enable_cmd_parser = 1,
@@ -115,6 +116,14 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type,
module_param_named_unsafe(reset, i915.reset, bool, 0600);
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+module_param_named(error_capture, i915.error_capture, bool, 0600);
+MODULE_PARM_DESC(error_capture,
+ "Record the GPU state following a hang. "
+ "This information in /sys/class/drm/card<N>/error is vital for "
+ "triaging and debugging hangs.");
+#endif
+
module_param_named_unsafe(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
MODULE_PARM_DESC(enable_hangcheck,
"Periodically check GPU activity for detecting hangs. "
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 3a0dd78ddb38..94efc899c1ef 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -59,6 +59,7 @@ struct i915_params {
bool load_detect_test;
bool force_reset_modeset_test;
bool reset;
+ bool error_capture;
bool disable_display;
bool verbose_state_checks;
bool nuclear_pageflip;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 70d96162def6..00efaa13974d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -86,8 +86,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define DEVEN 0x54
#define DEVEN_MCHBAR_EN (1 << 28)
-#define BSM 0x5c
-#define BSM_MASK (0xFFFF << 20)
+/* BSM in include/drm/i915_drm.h */
#define HPLLCC 0xc0 /* 85x only */
#define GC_CLOCK_CONTROL_MASK (0x7 << 0)
@@ -1605,6 +1604,7 @@ enum skl_disp_power_wells {
#define RING_HEAD(base) _MMIO((base)+0x34)
#define RING_START(base) _MMIO((base)+0x38)
#define RING_CTL(base) _MMIO((base)+0x3c)
+#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
#define RING_SYNC_0(base) _MMIO((base)+0x40)
#define RING_SYNC_1(base) _MMIO((base)+0x44)
#define RING_SYNC_2(base) _MMIO((base)+0x48)
@@ -1708,7 +1708,11 @@ enum skl_disp_power_wells {
#define GEN7_SC_INSTDONE _MMIO(0x7100)
#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160)
#define GEN7_ROW_INSTDONE _MMIO(0xe164)
-#define I915_NUM_INSTDONE_REG 4
+#define GEN8_MCR_SELECTOR _MMIO(0xfdc)
+#define GEN8_MCR_SLICE(slice) (((slice) & 3) << 26)
+#define GEN8_MCR_SLICE_MASK GEN8_MCR_SLICE(3)
+#define GEN8_MCR_SUBSLICE(subslice) (((subslice) & 3) << 24)
+#define GEN8_MCR_SUBSLICE_MASK GEN8_MCR_SUBSLICE(3)
#define RING_IPEIR(base) _MMIO((base)+0x64)
#define RING_IPEHR(base) _MMIO((base)+0x68)
/*
@@ -2089,9 +2093,9 @@ enum skl_disp_power_wells {
#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */
#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */
-#define GT_PARITY_ERROR(dev) \
+#define GT_PARITY_ERROR(dev_priv) \
(GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
- (IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
+ (IS_HASWELL(dev_priv) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
/* These are all the "old" interrupts */
#define ILK_BSD_USER_INTERRUPT (1<<5)
@@ -7327,6 +7331,10 @@ enum {
#define AUD_CONFIG_UPPER_N_MASK (0xff << 20)
#define AUD_CONFIG_LOWER_N_SHIFT 4
#define AUD_CONFIG_LOWER_N_MASK (0xfff << 4)
+#define AUD_CONFIG_N_MASK (AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK)
+#define AUD_CONFIG_N(n) \
+ (((((n) >> 12) & 0xff) << AUD_CONFIG_UPPER_N_SHIFT) | \
+ (((n) & 0xfff) << AUD_CONFIG_LOWER_N_SHIFT))
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a0af170062b1..344cbf39cfa9 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -38,7 +38,7 @@ static void i915_save_display(struct drm_device *dev)
dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
/* save FBC interval */
- if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
}
@@ -54,7 +54,7 @@ static void i915_restore_display(struct drm_device *dev)
intel_fbc_global_disable(dev_priv);
/* restore FBC interval */
- if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
i915_redisable_vga(dev);
@@ -70,7 +70,7 @@ int i915_save_state(struct drm_device *dev)
i915_save_display(dev);
- if (IS_GEN4(dev))
+ if (IS_GEN4(dev_priv))
pci_read_config_word(pdev, GCDGMBUS,
&dev_priv->regfile.saveGCDGMBUS);
@@ -116,7 +116,7 @@ int i915_restore_state(struct drm_device *dev)
i915_gem_restore_fences(dev);
- if (IS_GEN4(dev))
+ if (IS_GEN4(dev_priv))
pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
i915_restore_display(dev);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 1e5cbc585ca2..8185002d7ec8 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -8,7 +8,7 @@
*/
#include <linux/slab.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/reservation.h>
#include "i915_sw_fence.h"
@@ -226,49 +226,50 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
return pending;
}
-struct dma_fence_cb {
- struct fence_cb base;
+struct i915_sw_dma_fence_cb {
+ struct dma_fence_cb base;
struct i915_sw_fence *fence;
- struct fence *dma;
+ struct dma_fence *dma;
struct timer_list timer;
};
static void timer_i915_sw_fence_wake(unsigned long data)
{
- struct dma_fence_cb *cb = (struct dma_fence_cb *)data;
+ struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data;
printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n",
cb->dma->ops->get_driver_name(cb->dma),
cb->dma->ops->get_timeline_name(cb->dma),
cb->dma->seqno);
- fence_put(cb->dma);
+ dma_fence_put(cb->dma);
cb->dma = NULL;
i915_sw_fence_commit(cb->fence);
cb->timer.function = NULL;
}
-static void dma_i915_sw_fence_wake(struct fence *dma, struct fence_cb *data)
+static void dma_i915_sw_fence_wake(struct dma_fence *dma,
+ struct dma_fence_cb *data)
{
- struct dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+ struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
del_timer_sync(&cb->timer);
if (cb->timer.function)
i915_sw_fence_commit(cb->fence);
- fence_put(cb->dma);
+ dma_fence_put(cb->dma);
kfree(cb);
}
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
- struct fence *dma,
+ struct dma_fence *dma,
unsigned long timeout,
gfp_t gfp)
{
- struct dma_fence_cb *cb;
+ struct i915_sw_dma_fence_cb *cb;
int ret;
- if (fence_is_signaled(dma))
+ if (dma_fence_is_signaled(dma))
return 0;
cb = kmalloc(sizeof(*cb), gfp);
@@ -276,7 +277,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
if (!gfpflags_allow_blocking(gfp))
return -ENOMEM;
- return fence_wait(dma, false);
+ return dma_fence_wait(dma, false);
}
cb->fence = i915_sw_fence_get(fence);
@@ -287,11 +288,11 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
timer_i915_sw_fence_wake, (unsigned long)cb,
TIMER_IRQSAFE);
if (timeout) {
- cb->dma = fence_get(dma);
+ cb->dma = dma_fence_get(dma);
mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout));
}
- ret = fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
+ ret = dma_fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
if (ret == 0) {
ret = 1;
} else {
@@ -305,16 +306,16 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct reservation_object *resv,
- const struct fence_ops *exclude,
+ const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
gfp_t gfp)
{
- struct fence *excl;
+ struct dma_fence *excl;
int ret = 0, pending;
if (write) {
- struct fence **shared;
+ struct dma_fence **shared;
unsigned int count, i;
ret = reservation_object_get_fences_rcu(resv,
@@ -339,7 +340,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
}
for (i = 0; i < count; i++)
- fence_put(shared[i]);
+ dma_fence_put(shared[i]);
kfree(shared);
} else {
excl = reservation_object_get_excl_rcu(resv);
@@ -356,7 +357,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
ret |= pending;
}
- fence_put(excl);
+ dma_fence_put(excl);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 373141602ca4..cd239e92f67f 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -16,8 +16,8 @@
#include <linux/wait.h>
struct completion;
-struct fence;
-struct fence_ops;
+struct dma_fence;
+struct dma_fence_ops;
struct reservation_object;
struct i915_sw_fence {
@@ -47,12 +47,12 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *after,
wait_queue_t *wq);
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
- struct fence *dma,
+ struct dma_fence *dma,
unsigned long timeout,
gfp_t gfp);
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct reservation_object *resv,
- const struct fence_ops *exclude,
+ const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
gfp_t gfp);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 1012eeea1324..47590ab08d7e 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -514,6 +514,8 @@ static const struct attribute *vlv_attrs[] = {
NULL,
};
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
@@ -571,6 +573,21 @@ static struct bin_attribute error_state_attr = {
.write = error_state_write,
};
+static void i915_setup_error_capture(struct device *kdev)
+{
+ if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
+ DRM_ERROR("error_state sysfs setup failed\n");
+}
+
+static void i915_teardown_error_capture(struct device *kdev)
+{
+ sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
+}
+#else
+static void i915_setup_error_capture(struct device *kdev) {}
+static void i915_teardown_error_capture(struct device *kdev) {}
+#endif
+
void i915_setup_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
@@ -617,17 +634,15 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
if (ret)
DRM_ERROR("RPS sysfs setup failed\n");
- ret = sysfs_create_bin_file(&kdev->kobj,
- &error_state_attr);
- if (ret)
- DRM_ERROR("error_state sysfs setup failed\n");
+ i915_setup_error_capture(kdev);
}
void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
- sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
+ i915_teardown_error_capture(kdev);
+
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
sysfs_remove_files(&kdev->kobj, vlv_attrs);
else
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 178798002a73..5c912c25f7d3 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -491,7 +491,7 @@ TRACE_EVENT(i915_gem_ring_dispatch,
__entry->ring = req->engine->id;
__entry->seqno = req->fence.seqno;
__entry->flags = flags;
- fence_enable_sw_signaling(&req->fence);
+ dma_fence_enable_sw_signaling(&req->fence);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index b82de3072d4f..c762ae549a1c 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -142,8 +142,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
intel_state->clip.y2 =
crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
- if (state->fb && intel_rotation_90_or_270(state->rotation)) {
+ if (state->fb && drm_rotation_90_or_270(state->rotation)) {
char *format_name;
+
if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 6c70a5bfd7d8..7093cfbb62b1 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -81,7 +81,7 @@ static const struct {
int clock;
int n;
int cts;
-} aud_ncts[] = {
+} hdmi_aud_ncts[] = {
{ 44100, TMDS_296M, 4459, 234375 },
{ 44100, TMDS_297M, 4704, 247500 },
{ 48000, TMDS_296M, 5824, 281250 },
@@ -121,45 +121,20 @@ static u32 audio_config_hdmi_pixel_clock(const struct drm_display_mode *adjusted
return hdmi_audio_clock[i].config;
}
-static int audio_config_get_n(const struct drm_display_mode *mode, int rate)
+static int audio_config_hdmi_get_n(const struct drm_display_mode *adjusted_mode,
+ int rate)
{
int i;
- for (i = 0; i < ARRAY_SIZE(aud_ncts); i++) {
- if ((rate == aud_ncts[i].sample_rate) &&
- (mode->clock == aud_ncts[i].clock)) {
- return aud_ncts[i].n;
+ for (i = 0; i < ARRAY_SIZE(hdmi_aud_ncts); i++) {
+ if (rate == hdmi_aud_ncts[i].sample_rate &&
+ adjusted_mode->crtc_clock == hdmi_aud_ncts[i].clock) {
+ return hdmi_aud_ncts[i].n;
}
}
return 0;
}
-static uint32_t audio_config_setup_n_reg(int n, uint32_t val)
-{
- int n_low, n_up;
- uint32_t tmp = val;
-
- n_low = n & 0xfff;
- n_up = (n >> 12) & 0xff;
- tmp &= ~(AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK);
- tmp |= ((n_up << AUD_CONFIG_UPPER_N_SHIFT) |
- (n_low << AUD_CONFIG_LOWER_N_SHIFT) |
- AUD_CONFIG_N_PROG_ENABLE);
- return tmp;
-}
-
-/* check whether N/CTS/M need be set manually */
-static bool audio_rate_need_prog(struct intel_crtc *crtc,
- const struct drm_display_mode *mode)
-{
- if (((mode->clock == TMDS_297M) ||
- (mode->clock == TMDS_296M)) &&
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
- return true;
- else
- return false;
-}
-
static bool intel_eld_uptodate(struct drm_connector *connector,
i915_reg_t reg_eldv, uint32_t bits_eldv,
i915_reg_t reg_elda, uint32_t bits_elda,
@@ -245,6 +220,65 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
I915_WRITE(G4X_AUD_CNTL_ST, tmp);
}
+static void
+hsw_dp_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 tmp;
+
+ tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+ tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+
+ I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+}
+
+static void
+hsw_hdmi_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct i915_audio_component *acomp = dev_priv->audio_component;
+ int rate = acomp ? acomp->aud_sample_rate[port] : 0;
+ enum pipe pipe = intel_crtc->pipe;
+ int n;
+ u32 tmp;
+
+ tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+ tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+ tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
+
+ if (adjusted_mode->crtc_clock == TMDS_296M ||
+ adjusted_mode->crtc_clock == TMDS_297M) {
+ n = audio_config_hdmi_get_n(adjusted_mode, rate);
+ if (n != 0) {
+ tmp &= ~AUD_CONFIG_N_MASK;
+ tmp |= AUD_CONFIG_N(n);
+ tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ } else {
+ DRM_DEBUG_KMS("no suitable N value is found\n");
+ }
+ }
+
+ I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+}
+
+static void
+hsw_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
+ const struct drm_display_mode *adjusted_mode)
+{
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
+ hsw_dp_audio_config_update(intel_crtc, port, adjusted_mode);
+ else
+ hsw_hdmi_audio_config_update(intel_crtc, port, adjusted_mode);
+}
+
static void hsw_audio_codec_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -276,20 +310,16 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
}
static void hsw_audio_codec_enable(struct drm_connector *connector,
- struct intel_encoder *encoder,
+ struct intel_encoder *intel_encoder,
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
- struct i915_audio_component *acomp = dev_priv->audio_component;
+ enum port port = intel_encoder->port;
const uint8_t *eld = connector->eld;
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(&encoder->base);
- enum port port = intel_dig_port->port;
uint32_t tmp;
int len, i;
- int n, rate;
DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
pipe_name(pipe), drm_eld_size(eld));
@@ -325,42 +355,17 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
/* Enable timestamps */
- tmp = I915_READ(HSW_AUD_CFG(pipe));
- tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
- tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
- tmp |= AUD_CONFIG_N_VALUE_INDEX;
- else
- tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
-
- tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
- if (audio_rate_need_prog(intel_crtc, adjusted_mode)) {
- if (!acomp)
- rate = 0;
- else if (port >= PORT_A && port <= PORT_E)
- rate = acomp->aud_sample_rate[port];
- else {
- DRM_ERROR("invalid port: %d\n", port);
- rate = 0;
- }
- n = audio_config_get_n(adjusted_mode, rate);
- if (n != 0)
- tmp = audio_config_setup_n_reg(n, tmp);
- else
- DRM_DEBUG_KMS("no suitable N value is found\n");
- }
-
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+ hsw_audio_config_update(intel_crtc, port, adjusted_mode);
mutex_unlock(&dev_priv->av_mutex);
}
-static void ilk_audio_codec_disable(struct intel_encoder *encoder)
+static void ilk_audio_codec_disable(struct intel_encoder *intel_encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum port port = enc_to_dig_port(&encoder->base)->port;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
+ enum port port = intel_encoder->port;
uint32_t tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
@@ -400,13 +405,13 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
}
static void ilk_audio_codec_enable(struct drm_connector *connector,
- struct intel_encoder *encoder,
+ struct intel_encoder *intel_encoder,
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum port port = enc_to_dig_port(&encoder->base)->port;
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
+ enum port port = intel_encoder->port;
uint8_t *eld = connector->eld;
uint32_t tmp, eldv;
int len, i;
@@ -425,13 +430,13 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
* infrastructure is not there yet.
*/
- if (HAS_PCH_IBX(connector->dev)) {
+ if (HAS_PCH_IBX(dev_priv)) {
hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
aud_config = IBX_AUD_CFG(pipe);
aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
- } else if (IS_VALLEYVIEW(connector->dev) ||
- IS_CHERRYVIEW(connector->dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) {
hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
aud_config = VLV_AUD_CFG(pipe);
aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
@@ -490,11 +495,10 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
struct drm_connector *connector;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- enum port port = intel_dig_port->port;
+ enum port port = intel_encoder->port;
+ enum pipe pipe = crtc->pipe;
connector = drm_select_eld(encoder);
if (!connector)
@@ -518,13 +522,19 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
adjusted_mode);
mutex_lock(&dev_priv->av_mutex);
- intel_dig_port->audio_connector = connector;
+ intel_encoder->audio_connector = connector;
+
/* referred in audio callbacks */
- dev_priv->dig_port_map[port] = intel_encoder;
+ dev_priv->av_enc_map[pipe] = intel_encoder;
mutex_unlock(&dev_priv->av_mutex);
+ /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+ if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
+ pipe = -1;
+
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
- acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ (int) port, (int) pipe);
}
/**
@@ -537,22 +547,27 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- enum port port = intel_dig_port->port;
+ enum port port = intel_encoder->port;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = crtc->pipe;
if (dev_priv->display.audio_codec_disable)
dev_priv->display.audio_codec_disable(intel_encoder);
mutex_lock(&dev_priv->av_mutex);
- intel_dig_port->audio_connector = NULL;
- dev_priv->dig_port_map[port] = NULL;
+ intel_encoder->audio_connector = NULL;
+ dev_priv->av_enc_map[pipe] = NULL;
mutex_unlock(&dev_priv->av_mutex);
+ /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+ if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
+ pipe = -1;
+
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
- acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ (int) port, (int) pipe);
}
/**
@@ -627,74 +642,67 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
return dev_priv->cdclk_freq;
}
-static int i915_audio_component_sync_audio_rate(struct device *kdev,
- int port, int rate)
+static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
+ int port, int pipe)
+{
+
+ if (WARN_ON(pipe >= I915_MAX_PIPES))
+ return NULL;
+
+ /* MST */
+ if (pipe >= 0)
+ return dev_priv->av_enc_map[pipe];
+
+ /* Non-MST */
+ for_each_pipe(dev_priv, pipe) {
+ struct intel_encoder *encoder;
+
+ encoder = dev_priv->av_enc_map[pipe];
+ if (encoder == NULL)
+ continue;
+
+ if (port == encoder->port)
+ return encoder;
+ }
+
+ return NULL;
+}
+
+static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
+ int pipe, int rate)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_encoder *intel_encoder;
struct intel_crtc *crtc;
- struct drm_display_mode *mode;
+ struct drm_display_mode *adjusted_mode;
struct i915_audio_component *acomp = dev_priv->audio_component;
- enum pipe pipe = INVALID_PIPE;
- u32 tmp;
- int n;
int err = 0;
- /* HSW, BDW, SKL, KBL need this fix */
- if (!IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv) &&
- !IS_BROADWELL(dev_priv) &&
- !IS_HASWELL(dev_priv))
+ if (!HAS_DDI(dev_priv))
return 0;
i915_audio_component_get_power(kdev);
mutex_lock(&dev_priv->av_mutex);
+
/* 1. get the pipe */
- intel_encoder = dev_priv->dig_port_map[port];
- /* intel_encoder might be NULL for DP MST */
+ intel_encoder = get_saved_enc(dev_priv, port, pipe);
if (!intel_encoder || !intel_encoder->base.crtc ||
intel_encoder->type != INTEL_OUTPUT_HDMI) {
- DRM_DEBUG_KMS("no valid port %c\n", port_name(port));
+ DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
err = -ENODEV;
goto unlock;
}
+
+ /* pipe passed from the audio driver will be -1 for Non-MST case */
crtc = to_intel_crtc(intel_encoder->base.crtc);
pipe = crtc->pipe;
- if (pipe == INVALID_PIPE) {
- DRM_DEBUG_KMS("no pipe for the port %c\n", port_name(port));
- err = -ENODEV;
- goto unlock;
- }
- DRM_DEBUG_KMS("pipe %c connects port %c\n",
- pipe_name(pipe), port_name(port));
- mode = &crtc->config->base.adjusted_mode;
+ adjusted_mode = &crtc->config->base.adjusted_mode;
/* port must be valid now, otherwise the pipe will be invalid */
acomp->aud_sample_rate[port] = rate;
- /* 2. check whether to set the N/CTS/M manually or not */
- if (!audio_rate_need_prog(crtc, mode)) {
- tmp = I915_READ(HSW_AUD_CFG(pipe));
- tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
- goto unlock;
- }
-
- n = audio_config_get_n(mode, rate);
- if (n == 0) {
- DRM_DEBUG_KMS("Using automatic mode for N value on port %c\n",
- port_name(port));
- tmp = I915_READ(HSW_AUD_CFG(pipe));
- tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
- goto unlock;
- }
-
- /* 3. set the N/CTS/M */
- tmp = I915_READ(HSW_AUD_CFG(pipe));
- tmp = audio_config_setup_n_reg(n, tmp);
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+ hsw_audio_config_update(crtc, port, adjusted_mode);
unlock:
mutex_unlock(&dev_priv->av_mutex);
@@ -703,27 +711,29 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev,
}
static int i915_audio_component_get_eld(struct device *kdev, int port,
- bool *enabled,
+ int pipe, bool *enabled,
unsigned char *buf, int max_bytes)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_encoder *intel_encoder;
- struct intel_digital_port *intel_dig_port;
const u8 *eld;
int ret = -EINVAL;
mutex_lock(&dev_priv->av_mutex);
- intel_encoder = dev_priv->dig_port_map[port];
- /* intel_encoder might be NULL for DP MST */
- if (intel_encoder) {
- ret = 0;
- intel_dig_port = enc_to_dig_port(&intel_encoder->base);
- *enabled = intel_dig_port->audio_connector != NULL;
- if (*enabled) {
- eld = intel_dig_port->audio_connector->eld;
- ret = drm_eld_size(eld);
- memcpy(buf, eld, min(max_bytes, ret));
- }
+
+ intel_encoder = get_saved_enc(dev_priv, port, pipe);
+ if (!intel_encoder) {
+ DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
+ mutex_unlock(&dev_priv->av_mutex);
+ return ret;
+ }
+
+ ret = 0;
+ *enabled = intel_encoder->audio_connector != NULL;
+ if (*enabled) {
+ eld = intel_encoder->audio_connector->eld;
+ ret = drm_eld_size(eld);
+ memcpy(buf, eld, min(max_bytes, ret));
}
mutex_unlock(&dev_priv->av_mutex);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 1f8af87c6294..5ab646ef8c9f 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -996,6 +996,10 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
goto err;
}
+ /* Log about presence of sequences we won't run. */
+ if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF)
+ DRM_DEBUG_KMS("Unsupported sequence %u\n", seq_id);
+
dev_priv->vbt.dsi.sequence[seq_id] = data + index;
if (sequence->version >= 3)
@@ -1791,3 +1795,52 @@ intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
return false;
}
+
+/**
+ * intel_bios_is_lspcon_present - if LSPCON is attached on %port
+ * @dev_priv: i915 device instance
+ * @port: port to check
+ *
+ * Return true if LSPCON is present on this port
+ */
+bool
+intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ int i;
+
+ if (!HAS_LSPCON(dev_priv))
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ if (!dev_priv->vbt.child_dev[i].common.lspcon)
+ continue;
+
+ switch (dev_priv->vbt.child_dev[i].common.dvo_port) {
+ case DVO_PORT_DPA:
+ case DVO_PORT_HDMIA:
+ if (port == PORT_A)
+ return true;
+ break;
+ case DVO_PORT_DPB:
+ case DVO_PORT_HDMIB:
+ if (port == PORT_B)
+ return true;
+ break;
+ case DVO_PORT_DPC:
+ case DVO_PORT_HDMIC:
+ if (port == PORT_C)
+ return true;
+ break;
+ case DVO_PORT_DPD:
+ case DVO_PORT_HDMID:
+ if (port == PORT_D)
+ return true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 495611b7068d..56efcc507ea2 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -464,7 +464,7 @@ static int intel_breadcrumbs_signaler(void *arg)
&request->signaling.wait);
local_bh_disable();
- fence_signal(&request->fence);
+ dma_fence_signal(&request->fence);
local_bh_enable(); /* kick start the tasklets */
/* Find the next oldest signal. Note that as we have
@@ -502,7 +502,7 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
struct rb_node *parent, **p;
bool first, wakeup;
- /* locked by fence_enable_sw_signaling() */
+ /* locked by dma_fence_enable_sw_signaling() */
assert_spin_locked(&request->lock);
request->signaling.wait.tsk = b->signaler;
@@ -621,6 +621,7 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
unsigned int intel_kick_waiters(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
unsigned int mask = 0;
/* To avoid the task_struct disappearing beneath us as we wake up
@@ -628,7 +629,7 @@ unsigned int intel_kick_waiters(struct drm_i915_private *i915)
* RCU lock, i.e. as we call wake_up_process() we must be holding the
* rcu_read_lock().
*/
- for_each_engine(engine, i915)
+ for_each_engine(engine, i915, id)
if (unlikely(intel_engine_wakeup(engine)))
mask |= intel_engine_flag(engine);
@@ -638,9 +639,10 @@ unsigned int intel_kick_waiters(struct drm_i915_private *i915)
unsigned int intel_kick_signalers(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
unsigned int mask = 0;
- for_each_engine(engine, i915) {
+ for_each_engine(engine, i915, id) {
if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
wake_up_process(engine->breadcrumbs.signaler);
mask |= intel_engine_flag(engine);
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 95a72771eea6..445108855275 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -273,7 +273,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
enum pipe pipe = intel_crtc->pipe;
int i;
- if (HAS_GMCH_DISPLAY(dev)) {
+ if (HAS_GMCH_DISPLAY(dev_priv)) {
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
@@ -288,7 +288,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
(drm_color_lut_extract(lut[i].green, 8) << 8) |
drm_color_lut_extract(lut[i].blue, 8);
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
I915_WRITE(PALETTE(pipe, i), word);
else
I915_WRITE(LGC_PALETTE(pipe, i), word);
@@ -297,7 +297,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
for (i = 0; i < 256; i++) {
uint32_t word = (i << 16) | (i << 8) | i;
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
I915_WRITE(PALETTE(pipe, i), word);
else
I915_WRITE(LGC_PALETTE(pipe, i), word);
@@ -326,7 +326,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*/
- if (IS_HASWELL(dev) && intel_crtc_state->ips_enabled &&
+ if (IS_HASWELL(dev_priv) && intel_crtc_state->ips_enabled &&
(intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
hsw_disable_ips(intel_crtc);
reenable_ips = true;
@@ -534,14 +534,14 @@ void intel_color_init(struct drm_crtc *crtc)
drm_mode_crtc_set_gamma_size(crtc, 256);
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
dev_priv->display.load_luts = cherryview_load_luts;
- } else if (IS_HASWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = haswell_load_luts;
- } else if (IS_BROADWELL(dev) || IS_SKYLAKE(dev) ||
- IS_BROXTON(dev) || IS_KABYLAKE(dev)) {
+ } else if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv) ||
+ IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = broadwell_load_luts;
} else {
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index dfbcf16b41df..a97151fcb9f4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -84,7 +84,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & ADPA_DAC_ENABLE))
goto out;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@@ -165,16 +165,16 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */
- if (HAS_PCH_LPT(dev))
+ if (HAS_PCH_LPT(dev_priv))
; /* Those bits don't exist here */
- else if (HAS_PCH_CPT(dev))
+ else if (HAS_PCH_CPT(dev_priv))
adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
else if (crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
else
adpa |= ADPA_PIPE_B_SELECT;
- if (!HAS_PCH_SPLIT(dev))
+ if (!HAS_PCH_SPLIT(dev_priv))
I915_WRITE(BCLRPAT(crtc->pipe), 0);
switch (mode) {
@@ -241,7 +241,8 @@ intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
- int max_dotclk = to_i915(dev)->max_dotclk_freq;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int max_dotclk = dev_priv->max_dotclk_freq;
int max_clock;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -250,15 +251,15 @@ intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
- if (HAS_PCH_LPT(dev))
+ if (HAS_PCH_LPT(dev_priv))
max_clock = 180000;
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
/*
* 270 MHz due to current DPLL limits,
* DAC limit supposedly 355 MHz.
*/
max_clock = 270000;
- else if (IS_GEN3(dev) || IS_GEN4(dev))
+ else if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv))
max_clock = 400000;
else
max_clock = 350000;
@@ -269,7 +270,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
- if (HAS_PCH_LPT(dev) &&
+ if (HAS_PCH_LPT(dev_priv) &&
(ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
return MODE_CLOCK_HIGH;
@@ -280,13 +281,13 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */
- if (HAS_PCH_LPT(dev)) {
+ if (HAS_PCH_LPT(dev_priv)) {
if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
DRM_DEBUG_KMS("LPT only supports 24bpp\n");
return false;
@@ -296,7 +297,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
}
/* FDI must always be 2.7 GHz */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
pipe_config->port_clock = 135000 * 2;
return true;
@@ -312,7 +313,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
/* The first time through, trigger an explicit detection cycle */
if (crt->force_hotplug_required) {
- bool turn_off_dac = HAS_PCH_SPLIT(dev);
+ bool turn_off_dac = HAS_PCH_SPLIT(dev_priv);
u32 save_adpa;
crt->force_hotplug_required = 0;
@@ -419,10 +420,10 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
bool ret = false;
int i, tries = 0;
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
return intel_ironlake_crt_detect_hotplug(connector);
- if (IS_VALLEYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv))
return valleyview_crt_detect_hotplug(connector);
/*
@@ -430,7 +431,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
* to get a reliable result.
*/
- if (IS_G4X(dev) && !IS_GM45(dev))
+ if (IS_G4X(dev_priv) && !IS_GM45(dev_priv))
tries = 2;
else
tries = 1;
@@ -566,7 +567,7 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
/* Set the border color to purple. */
I915_WRITE(bclrpat_reg, 0x500050);
- if (!IS_GEN2(dev)) {
+ if (!IS_GEN2(dev_priv)) {
uint32_t pipeconf = I915_READ(pipeconf_reg);
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
POSTING_READ(pipeconf_reg);
@@ -643,6 +644,32 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
return status;
}
+static int intel_spurious_crt_detect_dmi_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_DRIVER("Skipping CRT detection for %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_spurious_crt_detect[] = {
+ {
+ .callback = intel_spurious_crt_detect_dmi_callback,
+ .ident = "ACER ZGB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+ },
+ },
+ {
+ .callback = intel_spurious_crt_detect_dmi_callback,
+ .ident = "Intel DZ77BH-55K",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "DZ77BH-55K"),
+ },
+ },
+ { }
+};
+
static enum drm_connector_status
intel_crt_detect(struct drm_connector *connector, bool force)
{
@@ -659,6 +686,10 @@ intel_crt_detect(struct drm_connector *connector, bool force)
connector->base.id, connector->name,
force);
+ /* Skip machines without VGA that falsely report hotplug events */
+ if (dmi_check_system(intel_spurious_crt_detect))
+ return connector_status_disconnected;
+
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
@@ -740,7 +771,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
- if (ret || !IS_G4X(dev))
+ if (ret || !IS_G4X(dev_priv))
goto out;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
@@ -808,32 +839,6 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
.destroy = intel_encoder_destroy,
};
-static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
-{
- DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
- return 1;
-}
-
-static const struct dmi_system_id intel_no_crt[] = {
- {
- .callback = intel_no_crt_dmi_callback,
- .ident = "ACER ZGB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
- },
- },
- {
- .callback = intel_no_crt_dmi_callback,
- .ident = "DELL XPS 8700",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"),
- },
- },
- { }
-};
-
void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
@@ -843,13 +848,9 @@ void intel_crt_init(struct drm_device *dev)
i915_reg_t adpa_reg;
u32 adpa;
- /* Skip machines without VGA that falsely report hotplug events */
- if (dmi_check_system(intel_no_crt))
- return;
-
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
adpa_reg = PCH_ADPA;
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
adpa_reg = VLV_ADPA;
else
adpa_reg = ADPA;
@@ -893,12 +894,12 @@ void intel_crt_init(struct drm_device *dev)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
- if (IS_I830(dev))
+ if (IS_I830(dev_priv))
crt->base.crtc_mask = (1 << 0);
else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
connector->interlace_allowed = 0;
else
connector->interlace_allowed = 1;
@@ -907,20 +908,23 @@ void intel_crt_init(struct drm_device *dev)
crt->adpa_reg = adpa_reg;
crt->base.compute_config = intel_crt_compute_config;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
crt->base.disable = pch_disable_crt;
crt->base.post_disable = pch_post_disable_crt;
} else {
crt->base.disable = intel_disable_crt;
}
crt->base.enable = intel_enable_crt;
- if (I915_HAS_HOTPLUG(dev))
+ if (I915_HAS_HOTPLUG(dev) &&
+ !dmi_check_system(intel_spurious_crt_detect))
crt->base.hpd_pin = HPD_CRT;
- if (HAS_DDI(dev)) {
+ if (HAS_DDI(dev_priv)) {
+ crt->base.port = PORT_E;
crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state;
crt->base.post_disable = hsw_post_disable_crt;
} else {
+ crt->base.port = PORT_NONE;
crt->base.get_config = intel_crt_get_config;
crt->base.get_hw_state = intel_crt_get_hw_state;
}
@@ -941,7 +945,7 @@ void intel_crt_init(struct drm_device *dev)
* polarity and link reversal bits or not, instead of relying on the
* BIOS.
*/
- if (HAS_PCH_LPT(dev)) {
+ if (HAS_PCH_LPT(dev_priv)) {
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 15d47c87def6..fb18d699ce10 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -167,8 +167,47 @@ static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
{ 0x80005012, 0x000000C0, 0x3 },
};
+/* Kabylake H and S */
+static const struct ddi_buf_trans kbl_ddi_translations_dp[] = {
+ { 0x00002016, 0x000000A0, 0x0 },
+ { 0x00005012, 0x0000009B, 0x0 },
+ { 0x00007011, 0x00000088, 0x0 },
+ { 0x80009010, 0x000000C0, 0x1 },
+ { 0x00002016, 0x0000009B, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000C0, 0x1 },
+ { 0x00002016, 0x00000097, 0x0 },
+ { 0x80005012, 0x000000C0, 0x1 },
+};
+
+/* Kabylake U */
+static const struct ddi_buf_trans kbl_u_ddi_translations_dp[] = {
+ { 0x0000201B, 0x000000A1, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000CD, 0x3 },
+ { 0x80009010, 0x000000C0, 0x3 },
+ { 0x0000201B, 0x0000009D, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+ { 0x80007011, 0x000000C0, 0x3 },
+ { 0x00002016, 0x0000004F, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+};
+
+/* Kabylake Y */
+static const struct ddi_buf_trans kbl_y_ddi_translations_dp[] = {
+ { 0x00001017, 0x000000A1, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000CD, 0x3 },
+ { 0x8000800F, 0x000000C0, 0x3 },
+ { 0x00001017, 0x0000009D, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+ { 0x80007011, 0x000000C0, 0x3 },
+ { 0x00001017, 0x0000004C, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+};
+
/*
- * Skylake H and S
+ * Skylake/Kabylake H and S
* eDP 1.4 low vswing translation parameters
*/
static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
@@ -185,7 +224,7 @@ static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
};
/*
- * Skylake U
+ * Skylake/Kabylake U
* eDP 1.4 low vswing translation parameters
*/
static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
@@ -202,7 +241,7 @@ static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
};
/*
- * Skylake Y
+ * Skylake/Kabylake Y
* eDP 1.4 low vswing translation parameters
*/
static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
@@ -218,7 +257,7 @@ static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
{ 0x00000018, 0x0000008A, 0x0 },
};
-/* Skylake U, H and S */
+/* Skylake/Kabylake U, H and S */
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
{ 0x00000018, 0x000000AC, 0x0 },
{ 0x00005012, 0x0000009D, 0x0 },
@@ -233,7 +272,7 @@ static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
{ 0x80000018, 0x000000C0, 0x1 },
};
-/* Skylake Y */
+/* Skylake/Kabylake Y */
static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
{ 0x00000018, 0x000000A1, 0x0 },
{ 0x00005012, 0x000000DF, 0x0 },
@@ -334,10 +373,10 @@ bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
return skl_y_ddi_translations_dp;
- } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
+ } else if (IS_SKL_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
return skl_u_ddi_translations_dp;
} else {
@@ -347,6 +386,21 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
}
static const struct ddi_buf_trans *
+kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+{
+ if (IS_KBL_ULX(dev_priv)) {
+ *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
+ return kbl_y_ddi_translations_dp;
+ } else if (IS_KBL_ULT(dev_priv)) {
+ *n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp);
+ return kbl_u_ddi_translations_dp;
+ } else {
+ *n_entries = ARRAY_SIZE(kbl_ddi_translations_dp);
+ return kbl_ddi_translations_dp;
+ }
+}
+
+static const struct ddi_buf_trans *
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
if (dev_priv->vbt.edp.low_vswing) {
@@ -362,7 +416,10 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
}
- return skl_get_buf_trans_dp(dev_priv, n_entries);
+ if (IS_KABYLAKE(dev_priv))
+ return kbl_get_buf_trans_dp(dev_priv, n_entries);
+ else
+ return skl_get_buf_trans_dp(dev_priv, n_entries);
}
static const struct ddi_buf_trans *
@@ -430,21 +487,18 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
if (IS_BROXTON(dev_priv))
return;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_KABYLAKE(dev_priv)) {
+ ddi_translations_fdi = NULL;
+ ddi_translations_dp =
+ kbl_get_buf_trans_dp(dev_priv, &n_dp_entries);
+ ddi_translations_edp =
+ skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
+ } else if (IS_SKYLAKE(dev_priv)) {
ddi_translations_fdi = NULL;
ddi_translations_dp =
skl_get_buf_trans_dp(dev_priv, &n_dp_entries);
ddi_translations_edp =
skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
-
- /* If we're boosting the current, set bit 31 of trans1 */
- if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
- iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
-
- if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
- port != PORT_A && port != PORT_E &&
- n_edp_entries > 9))
- n_edp_entries = 9;
} else if (IS_BROADWELL(dev_priv)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
@@ -464,6 +518,17 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
}
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ /* If we're boosting the current, set bit 31 of trans1 */
+ if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+ iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
+
+ if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
+ port != PORT_A && port != PORT_E &&
+ n_edp_entries > 9))
+ n_edp_entries = 9;
+ }
+
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
ddi_translations = ddi_translations_edp;
@@ -1020,13 +1085,13 @@ static void bxt_ddi_clock_get(struct intel_encoder *encoder,
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (INTEL_INFO(dev)->gen <= 8)
+ if (INTEL_GEN(dev_priv) <= 8)
hsw_ddi_clock_get(encoder, pipe_config);
- else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_ddi_clock_get(encoder, pipe_config);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_ddi_clock_get(encoder, pipe_config);
}
@@ -1081,14 +1146,14 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct intel_encoder *intel_encoder =
intel_ddi_get_crtc_new_encoder(crtc_state);
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return skl_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
return bxt_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
else
@@ -1189,7 +1254,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
* eDP when not using the panel fitter, and when not
* using motion blur mitigation (which we don't
* support). */
- if (IS_HASWELL(dev) &&
+ if (IS_HASWELL(dev_priv) &&
(intel_crtc->config->pch_pfit.enabled ||
intel_crtc->config->pch_pfit.force_thru))
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
@@ -1434,7 +1499,12 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
if (dp_iboost) {
iboost = dp_iboost;
} else {
- ddi_translations = skl_get_buf_trans_dp(dev_priv, &n_entries);
+ if (IS_KABYLAKE(dev_priv))
+ ddi_translations = kbl_get_buf_trans_dp(dev_priv,
+ &n_entries);
+ else
+ ddi_translations = skl_get_buf_trans_dp(dev_priv,
+ &n_entries);
iboost = ddi_translations[level].i_boost;
}
} else if (type == INTEL_OUTPUT_EDP) {
@@ -1742,7 +1812,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
intel_edp_panel_off(intel_dp);
}
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port)));
else if (INTEL_INFO(dev)->gen < 9)
@@ -2438,7 +2508,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
- bool init_hdmi, init_dp;
+ bool init_hdmi, init_dp, init_lspcon = false;
int max_lanes;
if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) {
@@ -2470,6 +2540,19 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
+
+ if (intel_bios_is_lspcon_present(dev_priv, port)) {
+ /*
+ * Lspcon device needs to be driven with DP connector
+ * with special detection sequence. So make sure DP
+ * is initialized before lspcon.
+ */
+ init_dp = true;
+ init_lspcon = true;
+ init_hdmi = false;
+ DRM_DEBUG_KMS("VBT says port %c has lspcon\n", port_name(port));
+ }
+
if (!init_dp && !init_hdmi) {
DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
port_name(port));
@@ -2509,7 +2592,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
* configuration so that we use the proper lane count for our
* calculations.
*/
- if (IS_BROXTON(dev) && port == PORT_A) {
+ if (IS_BROXTON(dev_priv) && port == PORT_A) {
if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
@@ -2520,6 +2603,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_dig_port->max_lanes = max_lanes;
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
+ intel_encoder->port = port;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
@@ -2532,7 +2616,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
*/
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) && port == PORT_B)
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) && port == PORT_B)
dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
else
dev_priv->hotplug.irq_port[port] = intel_dig_port;
@@ -2545,6 +2629,20 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
goto err;
}
+ if (init_lspcon) {
+ if (lspcon_init(intel_dig_port))
+ /* TODO: handle hdmi info frame part */
+ DRM_DEBUG_KMS("LSPCON init success on port %c\n",
+ port_name(port));
+ else
+ /*
+ * LSPCON init faied, but DP init was success, so
+ * lets try to drive as DP++ port.
+ */
+ DRM_ERROR("LSPCON init failed on port %c\n",
+ port_name(port));
+ }
+
return;
err:
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 1b20e160bc1f..d6a8f11813d5 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -28,20 +28,14 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv)
{
const struct intel_device_info *info = &dev_priv->info;
-#define PRINT_S(name) "%s"
-#define SEP_EMPTY
-#define PRINT_FLAG(name) info->name ? #name "," : ""
-#define SEP_COMMA ,
- DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
- DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
+ DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x",
info->gen,
dev_priv->drm.pdev->device,
- dev_priv->drm.pdev->revision,
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
-#undef PRINT_S
-#undef SEP_EMPTY
+ dev_priv->drm.pdev->revision);
+#define PRINT_FLAG(name) \
+ DRM_DEBUG_DRIVER("i915 device info: " #name ": %s", yesno(info->name))
+ DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
-#undef SEP_COMMA
}
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0ad1879bfd9d..895b3dc50e3f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -600,7 +600,7 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
* the given connectors.
*/
-static bool intel_PLL_is_valid(struct drm_device *dev,
+static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
const struct intel_limit *limit,
const struct dpll *clock)
{
@@ -613,12 +613,13 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid("m1 out of range\n");
- if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
- !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
+ if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
+ !IS_CHERRYVIEW(dev_priv) && !IS_BROXTON(dev_priv))
if (clock->m1 <= clock->m2)
INTELPllInvalid("m1 <= m2\n");
- if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ !IS_BROXTON(dev_priv)) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
INTELPllInvalid("p out of range\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
@@ -698,7 +699,8 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
if (match_clock &&
@@ -753,7 +755,8 @@ pnv_find_best_dpll(const struct intel_limit *limit,
int this_err;
pnv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
if (match_clock &&
@@ -813,7 +816,8 @@ g4x_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
@@ -845,7 +849,7 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
* For CHV ignore the error and consider only the P value.
* Prefer a bigger P value based on HW requirements.
*/
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(to_i915(dev))) {
*error_ppm = 0;
return calculated_clock->p > best_clock->p;
@@ -909,7 +913,8 @@ vlv_find_best_dpll(const struct intel_limit *limit,
vlv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit,
+ if (!intel_PLL_is_valid(to_i915(dev),
+ limit,
&clock))
continue;
@@ -977,7 +982,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
chv_calc_dpll_params(refclk, &clock);
- if (!intel_PLL_is_valid(dev, limit, &clock))
+ if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
continue;
if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
@@ -1040,7 +1045,7 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
u32 line1, line2;
u32 line_mask;
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
line_mask = DSL_LINEMASK_GEN2;
else
line_mask = DSL_LINEMASK_GEN3;
@@ -1187,19 +1192,17 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
onoff(state), onoff(cur_state));
}
-void assert_panel_unlocked(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
i915_reg_t pp_reg;
u32 val;
enum pipe panel_pipe = PIPE_A;
bool locked = true;
- if (WARN_ON(HAS_DDI(dev)))
+ if (WARN_ON(HAS_DDI(dev_priv)))
return;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
u32 port_sel;
pp_reg = PP_CONTROL(0);
@@ -1209,7 +1212,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
panel_pipe = PIPE_B;
/* XXX: else fix for eDP */
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/* presumably write lock depends on pipe, not port select */
pp_reg = PP_CONTROL(pipe);
panel_pipe = pipe;
@@ -1232,10 +1235,9 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
static void assert_cursor(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
- struct drm_device *dev = &dev_priv->drm;
bool cur_state;
- if (IS_845G(dev) || IS_I865G(dev))
+ if (IS_845G(dev_priv) || IS_I865G(dev_priv))
cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
else
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
@@ -1330,7 +1332,7 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
"plane %d assertion failure, should be off on pipe %c but is still active\n",
sprite, pipe_name(pipe));
}
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
for_each_sprite(dev_priv, pipe, sprite) {
u32 val = I915_READ(SPCNTR(pipe, sprite));
I915_STATE_WARN(val & SP_ENABLE,
@@ -1619,11 +1621,11 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
assert_pipe_disabled(dev_priv, crtc->pipe);
/* PLL is protected by panel, make sure we can write it */
- if (IS_MOBILE(dev) && !IS_I830(dev))
+ if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
assert_panel_unlocked(dev_priv, crtc->pipe);
/* Enable DVO 2x clock on both PLLs if necessary */
- if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
+ if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev) > 0) {
/*
* It appears to be important that we don't enable this
* for the current pipe before otherwise configuring the
@@ -1688,7 +1690,7 @@ static void i9xx_disable_pll(struct intel_crtc *crtc)
enum pipe pipe = crtc->pipe;
/* Disable DVO 2x clock on both PLLs if necessary */
- if (IS_I830(dev) &&
+ if (IS_I830(dev_priv) &&
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
!intel_num_dvo_pipes(dev)) {
I915_WRITE(DPLL(PIPE_B),
@@ -1786,7 +1788,6 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
i915_reg_t reg;
@@ -1799,7 +1800,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
assert_fdi_tx_enabled(dev_priv, pipe);
assert_fdi_rx_enabled(dev_priv, pipe);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
/* Workaround: Set the timing override bit before enabling the
* pch transcoder. */
reg = TRANS_CHICKEN2(pipe);
@@ -1877,7 +1878,6 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = &dev_priv->drm;
i915_reg_t reg;
uint32_t val;
@@ -1898,7 +1898,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
50))
DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
/* Workaround: Clear the timing override chicken bit again. */
reg = TRANS_CHICKEN2(pipe);
val = I915_READ(reg);
@@ -1926,6 +1926,18 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
}
+enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ WARN_ON(!crtc->config->has_pch_encoder);
+
+ if (HAS_PCH_LPT(dev_priv))
+ return TRANSCODER_A;
+ else
+ return (enum transcoder) crtc->pipe;
+}
+
/**
* intel_enable_pipe - enable a pipe, asserting requirements
* @crtc: crtc responsible for the pipe
@@ -1939,7 +1951,6 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
- enum pipe pch_transcoder;
i915_reg_t reg;
u32 val;
@@ -1949,11 +1960,6 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
assert_cursor_disabled(dev_priv, pipe);
assert_sprites_disabled(dev_priv, pipe);
- if (HAS_PCH_LPT(dev_priv))
- pch_transcoder = TRANSCODER_A;
- else
- pch_transcoder = pipe;
-
/*
* A pipe without a PLL won't actually be able to drive bits from
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
@@ -1967,7 +1973,8 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
} else {
if (crtc->config->has_pch_encoder) {
/* if driving the PCH, we need FDI enabled */
- assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
+ assert_fdi_rx_pll_enabled(dev_priv,
+ (enum pipe) intel_crtc_pch_transcoder(crtc));
assert_fdi_tx_pll_enabled(dev_priv,
(enum pipe) cpu_transcoder);
}
@@ -2139,7 +2146,7 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
const struct drm_framebuffer *fb,
unsigned int rotation)
{
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
*view = i915_ggtt_view_rotated;
view->params.rotated = to_intel_framebuffer(fb)->rot_info;
} else {
@@ -2260,7 +2267,7 @@ void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
unsigned int rotation)
{
- if (intel_rotation_90_or_270(rotation))
+ if (drm_rotation_90_or_270(rotation))
return to_intel_framebuffer(fb)->rotated[plane].pitch;
else
return fb->pitches[plane];
@@ -2296,7 +2303,7 @@ void intel_add_fb_offsets(int *x, int *y,
const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
unsigned int rotation = state->base.rotation;
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
*x += intel_fb->rotated[plane].x;
*y += intel_fb->rotated[plane].y;
} else {
@@ -2360,7 +2367,7 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
intel_tile_dims(dev_priv, &tile_width, &tile_height,
fb->modifier[plane], cpp);
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
pitch_tiles = pitch / tile_height;
swap(tile_width, tile_height);
} else {
@@ -2416,7 +2423,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
intel_tile_dims(dev_priv, &tile_width, &tile_height,
fb_modifier, cpp);
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
pitch_tiles = pitch / tile_height;
swap(tile_width, tile_height);
} else {
@@ -2976,7 +2983,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
int ret;
/* Rotate src coordinates to match rotated GTT view */
- if (intel_rotation_90_or_270(rotation))
+ if (drm_rotation_90_or_270(rotation))
drm_rect_rotate(&plane_state->base.src,
fb->width << 16, fb->height << 16,
DRM_ROTATE_270);
@@ -3034,7 +3041,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
- } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
+ } else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) {
I915_WRITE(PRIMSIZE(plane),
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
@@ -3072,7 +3079,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
- if (IS_G4X(dev))
+ if (IS_G4X(dev_priv))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
intel_add_fb_offsets(&x, &y, plane_state, 0);
@@ -3145,7 +3152,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
dspcntr = DISPPLANE_GAMMA_ENABLE;
dspcntr |= DISPLAY_PLANE_ENABLE;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
switch (fb->pixel_format) {
@@ -3174,7 +3181,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
- if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
intel_add_fb_offsets(&x, &y, plane_state, 0);
@@ -3185,7 +3192,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
if (rotation == DRM_ROTATE_180) {
dspcntr |= DISPPLANE_ROTATE_180;
- if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
x += (crtc_state->pipe_src_w - 1);
y += (crtc_state->pipe_src_h - 1);
}
@@ -3202,7 +3209,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
I915_WRITE(DSPSURF(plane),
intel_fb_gtt_offset(fb, rotation) +
intel_crtc->dspaddr_offset);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
@@ -3277,7 +3284,7 @@ u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
* The stride is either expressed as a multiple of 64 bytes chunks for
* linear buffers or in number of tiles for tiled buffers.
*/
- if (intel_rotation_90_or_270(rotation)) {
+ if (drm_rotation_90_or_270(rotation)) {
int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
stride /= intel_tile_height(dev_priv, fb->modifier[0], cpp);
@@ -3379,6 +3386,8 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
+ const struct skl_plane_wm *p_wm =
+ &crtc_state->wm.skl.optimal.planes[0];
int pipe = intel_crtc->pipe;
u32 plane_ctl;
unsigned int rotation = plane_state->base.rotation;
@@ -3415,7 +3424,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
intel_crtc->adjusted_y = src_y;
if (wm->dirty_pipes & drm_crtc_mask(&intel_crtc->base))
- skl_write_plane_wm(intel_crtc, wm, 0);
+ skl_write_plane_wm(intel_crtc, p_wm, &wm->ddb, 0);
I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x);
@@ -3449,6 +3458,8 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
+ const struct skl_plane_wm *p_wm = &cstate->wm.skl.optimal.planes[0];
int pipe = intel_crtc->pipe;
/*
@@ -3456,7 +3467,8 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
* plane's visiblity isn't actually changing neither is its watermarks.
*/
if (!crtc->primary->state->visible)
- skl_write_plane_wm(intel_crtc, &dev_priv->wm.skl_results, 0);
+ skl_write_plane_wm(intel_crtc, p_wm,
+ &dev_priv->wm.skl_results.ddb, 0);
I915_WRITE(PLANE_CTL(pipe, 0), 0);
I915_WRITE(PLANE_SURF(pipe, 0), 0);
@@ -3585,7 +3597,7 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
return;
err:
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
void intel_finish_reset(struct drm_i915_private *dev_priv)
@@ -3604,8 +3616,6 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
dev_priv->modeset_restore_state = NULL;
- dev_priv->modeset_restore_state = NULL;
-
/* reset doesn't touch the display */
if (!gpu_reset_clobbers_display(dev_priv)) {
if (!state) {
@@ -3647,6 +3657,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
intel_hpd_init(dev_priv);
}
+ if (state)
+ drm_atomic_state_put(state);
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
mutex_unlock(&dev->mode_config.mutex);
@@ -3715,7 +3727,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
if (pipe_config->pch_pfit.enabled)
skylake_pfit_enable(crtc);
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
if (pipe_config->pch_pfit.enabled)
ironlake_pfit_enable(crtc);
else if (old_crtc_state->pch_pfit.enabled)
@@ -3735,7 +3747,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
/* enable normal train */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_IVYBRIDGE(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_NONE_IVB;
temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
} else {
@@ -3746,7 +3758,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
} else {
@@ -3760,7 +3772,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
udelay(1000);
/* IVB wants error correction enabled */
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
FDI_FE_ERRC_ENABLE);
}
@@ -3904,7 +3916,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
@@ -3948,7 +3960,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
@@ -3957,7 +3969,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
} else {
@@ -4211,7 +4223,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev))
+ if (HAS_PCH_IBX(dev_priv))
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
/* still set train pattern 1 */
@@ -4223,7 +4235,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
@@ -4546,7 +4558,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
assert_pch_transcoder_disabled(dev_priv, pipe);
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
ivybridge_update_fdi_bc_bifurcation(intel_crtc);
/* Write the TU size bits before fdi link training, so that error
@@ -4559,7 +4571,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
/* We need to program the right clock selection before writing the pixel
* mutliplier into the DPLL. */
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
u32 sel;
temp = I915_READ(PCH_DPLL_SEL);
@@ -4589,7 +4601,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev) && intel_crtc_has_dp_encoder(intel_crtc->config)) {
+ if (HAS_PCH_CPT(dev_priv) &&
+ intel_crtc_has_dp_encoder(intel_crtc->config)) {
const struct drm_display_mode *adjusted_mode =
&intel_crtc->config->base.adjusted_mode;
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
@@ -4668,7 +4681,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
to_intel_crtc(crtc_state->base.crtc);
int need_scaling;
- need_scaling = intel_rotation_90_or_270(rotation) ?
+ need_scaling = drm_rotation_90_or_270(rotation) ?
(src_h != dst_w || src_w != dst_h):
(src_w != dst_w || src_h != dst_h);
@@ -4859,7 +4872,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
* as some pre-programmed values are broken,
* e.g. x201.
*/
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
PF_PIPE_SEL_IVB(pipe));
else
@@ -4884,7 +4897,7 @@ void hsw_enable_ips(struct intel_crtc *crtc)
*/
assert_plane_enabled(dev_priv, crtc->plane);
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->rps.hw_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4916,7 +4929,7 @@ void hsw_disable_ips(struct intel_crtc *crtc)
return;
assert_plane_enabled(dev_priv, crtc->plane);
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->rps.hw_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4985,7 +4998,7 @@ intel_post_enable_primary(struct drm_crtc *crtc)
* FIXME: Need to fix the logic to work when we turn off all planes
* but leave the pipe running.
*/
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
/* Underruns don't always raise interrupts, so check manually. */
@@ -5008,7 +5021,7 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
* FIXME: Need to fix the logic to work when we turn off all planes
* but leave the pipe running.
*/
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
/*
@@ -5040,7 +5053,7 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (HAS_GMCH_DISPLAY(dev)) {
+ if (HAS_GMCH_DISPLAY(dev_priv)) {
intel_set_memory_cxsr(dev_priv, false);
dev_priv->wm.vlv.cxsr = false;
intel_wait_for_vblank(dev, pipe);
@@ -5105,7 +5118,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
intel_pre_disable_primary(&crtc->base);
}
- if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) {
+ if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev_priv)) {
crtc->wm.cxsr_allowed = false;
/*
@@ -5383,7 +5396,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_enable(crtc, pipe_config, old_state);
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
cpt_verify_modeset(dev, intel_crtc->pipe);
/* Must wait for vblank to avoid spurious PCH FIFO underruns */
@@ -5396,7 +5409,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
/* IPS only exists on ULT machines and is tied to pipe A. */
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
{
- return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
+ return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
}
static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
@@ -5508,7 +5521,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
- if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
+ if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
intel_wait_for_vblank(dev, hsw_workaround_pipe);
intel_wait_for_vblank(dev, hsw_workaround_pipe);
}
@@ -5565,7 +5578,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (intel_crtc->config->has_pch_encoder) {
ironlake_disable_pch_transcoder(dev_priv, pipe);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
i915_reg_t reg;
u32 temp;
@@ -5699,13 +5712,13 @@ static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_digital_port *intel_dig_port;
switch (intel_encoder->type) {
case INTEL_OUTPUT_UNKNOWN:
/* Only DDI platforms should ever use this output type */
- WARN_ON_ONCE(!HAS_DDI(dev));
+ WARN_ON_ONCE(!HAS_DDI(dev_priv));
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
@@ -5726,7 +5739,7 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
enum intel_display_power_domain
intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_digital_port *intel_dig_port;
switch (intel_encoder->type) {
@@ -5739,7 +5752,7 @@ intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
* what's the status of the given connectors, play safe and
* run the DP detection too.
*/
- WARN_ON_ONCE(!HAS_DDI(dev));
+ WARN_ON_ONCE(!HAS_DDI(dev_priv));
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
@@ -5835,7 +5848,7 @@ static void intel_update_max_cdclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
int max_cdclk, vco;
@@ -5857,9 +5870,9 @@ static void intel_update_max_cdclk(struct drm_device *dev)
max_cdclk = 308571;
dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
dev_priv->max_cdclk_freq = 624000;
- } else if (IS_BROADWELL(dev)) {
+ } else if (IS_BROADWELL(dev_priv)) {
/*
* FIXME with extra cooling we can allow
* 540 MHz for ULX and 675 Mhz for ULT.
@@ -5868,15 +5881,15 @@ static void intel_update_max_cdclk(struct drm_device *dev)
*/
if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
dev_priv->max_cdclk_freq = 450000;
- else if (IS_BDW_ULX(dev))
+ else if (IS_BDW_ULX(dev_priv))
dev_priv->max_cdclk_freq = 450000;
- else if (IS_BDW_ULT(dev))
+ else if (IS_BDW_ULT(dev_priv))
dev_priv->max_cdclk_freq = 540000;
else
dev_priv->max_cdclk_freq = 675000;
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->max_cdclk_freq = 320000;
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
dev_priv->max_cdclk_freq = 400000;
} else {
/* otherwise assume cdclk is fixed */
@@ -6676,7 +6689,7 @@ static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
cherryview_set_cdclk(dev, req_cdclk);
else
valleyview_set_cdclk(dev, req_cdclk);
@@ -6704,7 +6717,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_set_pipe_timings(intel_crtc);
intel_set_pipe_src_size(intel_crtc);
- if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
@@ -6719,7 +6732,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
chv_prepare_pll(intel_crtc, intel_crtc->config);
chv_enable_pll(intel_crtc, intel_crtc->config);
} else {
@@ -6775,7 +6788,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_crtc->active = true;
- if (!IS_GEN2(dev))
+ if (!IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_encoders_pre_enable(crtc, pipe_config, old_state);
@@ -6823,7 +6836,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
*/
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
intel_wait_for_vblank(dev, pipe);
intel_encoders_disable(crtc, old_crtc_state, old_state);
@@ -6838,9 +6851,9 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
chv_disable_pll(dev_priv, pipe);
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
vlv_disable_pll(dev_priv, pipe);
else
i9xx_disable_pll(intel_crtc);
@@ -6848,7 +6861,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
- if (!IS_GEN2(dev))
+ if (!IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
}
@@ -6886,7 +6899,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
dev_priv->display.crtc_disable(crtc_state, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
crtc->base.id, crtc->name);
@@ -7028,6 +7041,7 @@ static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state = pipe_config->base.state;
struct intel_crtc *other_crtc;
struct intel_crtc_state *other_crtc_state;
@@ -7040,7 +7054,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return -EINVAL;
}
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
if (pipe_config->fdi_lanes > 2) {
DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
pipe_config->fdi_lanes);
@@ -7225,11 +7239,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
/* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
- if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
+ if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
return -EINVAL;
- if (HAS_IPS(dev))
+ if (HAS_IPS(dev_priv))
hsw_compute_ips_config(crtc, pipe_config);
if (pipe_config->has_pch_encoder)
@@ -7367,7 +7381,7 @@ static int haswell_get_display_clock_speed(struct drm_device *dev)
return 450000;
else if (freq == LCPLL_CLK_FREQ_450)
return 450000;
- else if (IS_HSW_ULT(dev))
+ else if (IS_HSW_ULT(dev_priv))
return 337500;
else
return 540000;
@@ -7537,9 +7551,9 @@ static unsigned int intel_hpll_vco(struct drm_device *dev)
uint8_t tmp = 0;
/* FIXME other chipsets? */
- if (IS_GM45(dev))
+ if (IS_GM45(dev_priv))
vco_table = ctg_vco;
- else if (IS_G4X(dev))
+ else if (IS_G4X(dev_priv))
vco_table = elk_vco;
else if (IS_CRESTLINE(dev))
vco_table = cl_vco;
@@ -7804,8 +7818,8 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
* for gen < 8) and if DRRS is supported (to make sure the
* registers are not unnecessarily accessed).
*/
- if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
- crtc->config->has_drrs) {
+ if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
+ INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
I915_WRITE(PIPE_DATA_M2(transcoder),
TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
@@ -8107,7 +8121,7 @@ int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
pipe_config->pixel_multiplier = 1;
pipe_config->dpll = *dpll;
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(to_i915(dev))) {
chv_compute_dpll(crtc, pipe_config);
chv_prepare_pll(crtc, pipe_config);
chv_enable_pll(crtc, pipe_config);
@@ -8132,7 +8146,7 @@ int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
*/
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
{
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(to_i915(dev)))
chv_disable_pll(to_i915(dev), pipe);
else
vlv_disable_pll(to_i915(dev), pipe);
@@ -8156,7 +8170,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv)) {
dpll |= (crtc_state->pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
@@ -8173,7 +8187,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
else {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (IS_G4X(dev) && reduced_clock)
+ if (IS_G4X(dev_priv) && reduced_clock)
dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
}
switch (clock->p2) {
@@ -8235,7 +8249,8 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
- if (!IS_I830(dev) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
+ if (!IS_I830(dev_priv) &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
@@ -8304,7 +8319,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
* programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
* documented on the DDI_FUNC_CTL register description, EDP Input Select
* bits. */
- if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
+ if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
(pipe == PIPE_B || pipe == PIPE_C))
I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
@@ -8414,7 +8429,8 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
pipeconf |= PIPECONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
- if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
pipeconf |= PIPECONF_DITHER_EN |
@@ -8454,7 +8470,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
} else
pipeconf |= PIPECONF_PROGRESSIVE;
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_crtc->config->limited_color_range)
pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
@@ -8658,7 +8674,8 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp;
- if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
+ if (INTEL_GEN(dev_priv) <= 3 &&
+ (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
return;
tmp = I915_READ(PFIT_CONTROL);
@@ -8830,7 +8847,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
if (!(tmp & PIPECONF_ENABLE))
goto out;
- if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) {
switch (tmp & PIPECONF_BPC_MASK) {
case PIPECONF_6BPC:
pipe_config->pipe_bpp = 18;
@@ -8846,7 +8864,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
}
}
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
(tmp & PIPECONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
@@ -8860,7 +8878,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
if (INTEL_INFO(dev)->gen >= 4) {
/* No way to read it out on pipes B and C */
- if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A)
+ if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
tmp = dev_priv->chv_dpll_md[crtc->pipe];
else
tmp = I915_READ(DPLL_MD(crtc->pipe));
@@ -8868,7 +8886,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
pipe_config->dpll_hw_state.dpll_md = tmp;
- } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+ IS_G33(dev_priv)) {
tmp = I915_READ(DPLL(crtc->pipe));
pipe_config->pixel_multiplier =
((tmp & SDVO_MULTIPLIER_MASK)
@@ -8880,13 +8899,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
- if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
/*
* DPLL_DVO_2X_MODE must be enabled for both DPLLs
* on 830. Filter it out here so that we don't
* report errors due to that.
*/
- if (IS_I830(dev))
+ if (IS_I830(dev_priv))
pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
@@ -8898,9 +8917,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
DPLL_PORTB_READY_MASK);
}
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
chv_crtc_clock_get(crtc, pipe_config);
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
vlv_crtc_clock_get(crtc, pipe_config);
else
i9xx_crtc_clock_get(crtc, pipe_config);
@@ -8951,7 +8970,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
}
}
- if (HAS_PCH_IBX(dev)) {
+ if (HAS_PCH_IBX(dev_priv)) {
has_ck505 = dev_priv->vbt.display_clock_mode;
can_ssc = has_ck505;
} else {
@@ -9199,7 +9218,8 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
with_spread = true;
- if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
+ if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
+ with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
mutex_lock(&dev_priv->sb_lock);
@@ -9222,7 +9242,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
}
}
- reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
+ reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -9238,7 +9258,7 @@ static void lpt_disable_clkout_dp(struct drm_device *dev)
mutex_lock(&dev_priv->sb_lock);
- reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
+ reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -9346,9 +9366,11 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
*/
void intel_init_pch_refclk(struct drm_device *dev)
{
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
ironlake_init_pch_refclk(dev);
- else if (HAS_PCH_LPT(dev))
+ else if (HAS_PCH_LPT(dev_priv))
lpt_init_pch_refclk(dev);
}
@@ -9477,7 +9499,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->vbt.lvds_ssc_freq == 100000) ||
- (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
+ (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
factor = 25;
} else if (crtc_state->sdvo_tv_clock)
factor = 20;
@@ -9837,7 +9859,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
/* We currently do not free assignements of panel fitters on
* ivb/hsw (since we don't use the higher upscaling modes which
* differentiates them) so just WARN about this case for now. */
- if (IS_GEN7(dev)) {
+ if (IS_GEN7(dev_priv)) {
WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
PF_PIPE_SEL_IVB(crtc->pipe));
}
@@ -9882,7 +9904,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
offset = I915_READ(DSPOFFSET(pipe));
} else {
if (plane_config->tiling)
@@ -10026,7 +10048,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
"CPU PWM2 enabled\n");
I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
@@ -10046,9 +10068,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
-
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
return I915_READ(D_COMP_HSW);
else
return I915_READ(D_COMP_BDW);
@@ -10056,9 +10076,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
{
- struct drm_device *dev = &dev_priv->drm;
-
- if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev_priv)) {
mutex_lock(&dev_priv->rps.hw_lock);
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
val))
@@ -10206,7 +10224,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Enabling package C8+\n");
- if (HAS_PCH_LPT_LP(dev)) {
+ if (HAS_PCH_LPT_LP(dev_priv)) {
val = I915_READ(SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
@@ -10226,7 +10244,7 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
hsw_restore_lcpll(dev_priv);
lpt_init_pch_refclk(dev);
- if (HAS_PCH_LPT_LP(dev)) {
+ if (HAS_PCH_LPT_LP(dev_priv)) {
val = I915_READ(SOUTH_DSPCLK_GATE_D);
val |= PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
@@ -10650,9 +10668,9 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skylake_get_ddi_pll(dev_priv, port, pipe_config);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_get_ddi_pll(dev_priv, port, pipe_config);
else
haswell_get_ddi_pll(dev_priv, port, pipe_config);
@@ -10735,7 +10753,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
ironlake_get_pfit_config(crtc, pipe_config);
}
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
(I915_READ(IPS_CTL) & IPS_ENABLE);
@@ -10823,12 +10841,15 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
+ const struct skl_plane_wm *p_wm =
+ &cstate->wm.skl.optimal.planes[PLANE_CURSOR];
int pipe = intel_crtc->pipe;
uint32_t cntl = 0;
if (INTEL_GEN(dev_priv) >= 9 && wm->dirty_pipes & drm_crtc_mask(crtc))
- skl_write_cursor_wm(intel_crtc, wm);
+ skl_write_cursor_wm(intel_crtc, p_wm, &wm->ddb);
if (plane_state && plane_state->base.visible) {
cntl = MCURSOR_GAMMA_ENABLE;
@@ -10848,7 +10869,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
}
cntl |= pipe << 28; /* Connect to correct pipe */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
cntl |= CURSOR_PIPE_CSC_ENABLE;
if (plane_state->base.rotation == DRM_ROTATE_180)
@@ -10896,7 +10917,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
pos |= y << CURSOR_Y_SHIFT;
/* ILK+ do this automagically */
- if (HAS_GMCH_DISPLAY(dev) &&
+ if (HAS_GMCH_DISPLAY(dev_priv) &&
plane_state->base.rotation == DRM_ROTATE_180) {
base += (plane_state->base.crtc_h *
plane_state->base.crtc_w - 1) * 4;
@@ -10905,13 +10926,13 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
I915_WRITE(CURPOS(pipe), pos);
- if (IS_845G(dev) || IS_I865G(dev))
+ if (IS_845G(dev_priv) || IS_I865G(dev_priv))
i845_update_cursor(crtc, base, plane_state);
else
i9xx_update_cursor(crtc, base, plane_state);
}
-static bool cursor_size_ok(struct drm_device *dev,
+static bool cursor_size_ok(struct drm_i915_private *dev_priv,
uint32_t width, uint32_t height)
{
if (width == 0 || height == 0)
@@ -10923,11 +10944,11 @@ static bool cursor_size_ok(struct drm_device *dev,
* the precision of the register. Everything else requires
* square cursors, limited to a few power-of-two sizes.
*/
- if (IS_845G(dev) || IS_I865G(dev)) {
+ if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
if ((width & 63) != 0)
return false;
- if (width > (IS_845G(dev) ? 64 : 512))
+ if (width > (IS_845G(dev_priv) ? 64 : 512))
return false;
if (height > 1023)
@@ -10936,7 +10957,7 @@ static bool cursor_size_ok(struct drm_device *dev,
switch (width | height) {
case 256:
case 128:
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
return false;
case 64:
break;
@@ -11271,9 +11292,14 @@ found:
return true;
fail:
- drm_atomic_state_free(state);
- drm_atomic_state_free(restore_state);
- restore_state = state = NULL;
+ if (state) {
+ drm_atomic_state_put(state);
+ state = NULL;
+ }
+ if (restore_state) {
+ drm_atomic_state_put(restore_state);
+ restore_state = NULL;
+ }
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
@@ -11301,10 +11327,9 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
return;
ret = drm_atomic_commit(state);
- if (ret) {
+ if (ret)
DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
- drm_atomic_state_free(state);
- }
+ drm_atomic_state_put(state);
}
static int i9xx_pll_refclk(struct drm_device *dev,
@@ -11315,9 +11340,9 @@ static int i9xx_pll_refclk(struct drm_device *dev,
if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
return dev_priv->vbt.lvds_ssc_freq;
- else if (HAS_PCH_SPLIT(dev))
+ else if (HAS_PCH_SPLIT(dev_priv))
return 120000;
- else if (!IS_GEN2(dev))
+ else if (!IS_GEN2(dev_priv))
return 96000;
else
return 48000;
@@ -11350,7 +11375,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
- if (!IS_GEN2(dev)) {
+ if (!IS_GEN2(dev_priv)) {
if (IS_PINEVIEW(dev))
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
@@ -11378,7 +11403,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
else
port_clock = i9xx_calc_dpll_params(refclk, &clock);
} else {
- u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
+ u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
if (is_lvds) {
@@ -11579,7 +11604,7 @@ static bool __pageflip_finished_cs(struct intel_crtc *crtc,
* really needed there. But since ctg has the registers,
* include it in the check anyway.
*/
- if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
+ if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
return true;
/*
@@ -11849,6 +11874,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_ring *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t plane_bit = 0;
@@ -11877,7 +11903,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
* 48bits addresses, and we need a NOOP for the batch size to
* stay even.
*/
- if (IS_GEN8(dev))
+ if (IS_GEN8(dev_priv))
len += 2;
}
@@ -11914,7 +11940,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
- if (IS_GEN8(dev))
+ if (IS_GEN8(dev_priv))
intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT);
else
@@ -11923,7 +11949,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit_reg(ring, DERRMR);
intel_ring_emit(ring,
i915_ggtt_offset(req->engine->scratch) + 256);
- if (IS_GEN8(dev)) {
+ if (IS_GEN8(dev_priv)) {
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
}
@@ -12242,23 +12268,23 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
atomic_inc(&intel_crtc->unpin_work_count);
- if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
- engine = &dev_priv->engine[BCS];
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ engine = dev_priv->engine[BCS];
if (fb->modifier[0] != old_fb->modifier[0])
/* vlv: DISPLAY_FLIP fails to change tiling */
engine = NULL;
- } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
- engine = &dev_priv->engine[BCS];
+ } else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
+ engine = dev_priv->engine[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
engine = i915_gem_active_get_engine(&obj->last_write,
&obj->base.dev->struct_mutex);
if (engine == NULL || engine->id != RCS)
- engine = &dev_priv->engine[BCS];
+ engine = dev_priv->engine[BCS];
} else {
- engine = &dev_priv->engine[RCS];
+ engine = dev_priv->engine[RCS];
}
mmio_flip = use_mmio_flip(engine, obj);
@@ -12289,7 +12315,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->flip_queued_req = i915_gem_active_get(&obj->last_write,
&obj->base.dev->struct_mutex);
- schedule_work(&work->mmio_work);
+ queue_work(system_unbound_wq, &work->mmio_work);
} else {
request = i915_gem_request_alloc(engine, engine->last_context);
if (IS_ERR(request)) {
@@ -12372,8 +12398,7 @@ retry:
goto retry;
}
- if (ret)
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
if (ret == 0 && event) {
spin_lock_irq(&dev->event_lock);
@@ -12447,7 +12472,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct drm_framebuffer *fb = plane_state->fb;
int ret;
- if (INTEL_GEN(dev) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
+ if (INTEL_GEN(dev_priv) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
ret = skl_update_scaler_plane(
to_intel_crtc_state(crtc_state),
to_intel_plane_state(plane_state));
@@ -12526,7 +12551,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
* cstate->update_wm was already set above, so this flag will
* take effect when we commit and program watermarks.
*/
- if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) &&
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev_priv) &&
needs_scaling(to_intel_plane_state(plane_state)) &&
!needs_scaling(old_plane_state))
pipe_config->disable_lp_wm = true;
@@ -12702,15 +12727,16 @@ static int
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_atomic_state *state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int bpp, i;
- if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
+ if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)))
bpp = 10*3;
- else if (INTEL_INFO(dev)->gen >= 5)
+ else if (INTEL_GEN(dev_priv) >= 5)
bpp = 12*3;
else
bpp = 8*3;
@@ -12748,6 +12774,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
const char *context)
{
struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_plane *plane;
struct intel_plane *intel_plane;
struct intel_plane_state *state;
@@ -12809,7 +12836,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
"pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
"pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
@@ -12824,13 +12851,13 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->dpll_hw_state.pll9,
pipe_config->dpll_hw_state.pll10,
pipe_config->dpll_hw_state.pcsdw12);
- } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
DRM_DEBUG_KMS("dpll_hw_state: "
"ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
pipe_config->dpll_hw_state.ctrl1,
pipe_config->dpll_hw_state.cfgcr1,
pipe_config->dpll_hw_state.cfgcr2);
- } else if (HAS_DDI(dev)) {
+ } else if (HAS_DDI(dev_priv)) {
DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
pipe_config->dpll_hw_state.wrpll,
pipe_config->dpll_hw_state.spll);
@@ -12908,7 +12935,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
switch (encoder->type) {
unsigned int port_mask;
case INTEL_OUTPUT_UNKNOWN:
- if (WARN_ON(!HAS_DDI(dev)))
+ if (WARN_ON(!HAS_DDI(to_i915(dev))))
break;
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
@@ -13194,6 +13221,7 @@ intel_pipe_config_compare(struct drm_device *dev,
struct intel_crtc_state *pipe_config,
bool adjust)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
bool ret = true;
#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
@@ -13339,8 +13367,8 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(has_hdmi_sink);
- if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
- IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
PIPE_CONF_CHECK_I(limited_color_range);
PIPE_CONF_CHECK_I(has_infoframe);
@@ -13380,7 +13408,7 @@ intel_pipe_config_compare(struct drm_device *dev,
}
/* BDW+ don't expose a synchronous way to read the state */
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
PIPE_CONF_CHECK_I(ips_enabled);
PIPE_CONF_CHECK_I(double_wide);
@@ -13399,7 +13427,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(dsi_pll.ctrl);
PIPE_CONF_CHECK_X(dsi_pll.div);
- if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
+ if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
@@ -13440,30 +13468,65 @@ static void verify_wm_state(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_ddb_allocation hw_ddb, *sw_ddb;
- struct skl_ddb_entry *hw_entry, *sw_entry;
+ struct skl_pipe_wm hw_wm, *sw_wm;
+ struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
+ struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const enum pipe pipe = intel_crtc->pipe;
- int plane;
+ int plane, level, max_level = ilk_wm_max_level(dev_priv);
if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
return;
+ skl_pipe_wm_get_hw_state(crtc, &hw_wm);
+ sw_wm = &intel_crtc->wm.active.skl;
+
skl_ddb_get_hw_state(dev_priv, &hw_ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
/* planes */
for_each_plane(dev_priv, pipe, plane) {
- hw_entry = &hw_ddb.plane[pipe][plane];
- sw_entry = &sw_ddb->plane[pipe][plane];
+ hw_plane_wm = &hw_wm.planes[plane];
+ sw_plane_wm = &sw_wm->planes[plane];
- if (skl_ddb_entry_equal(hw_entry, sw_entry))
- continue;
+ /* Watermarks */
+ for (level = 0; level <= max_level; level++) {
+ if (skl_wm_level_equals(&hw_plane_wm->wm[level],
+ &sw_plane_wm->wm[level]))
+ continue;
+
+ DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), plane + 1, level,
+ sw_plane_wm->wm[level].plane_en,
+ sw_plane_wm->wm[level].plane_res_b,
+ sw_plane_wm->wm[level].plane_res_l,
+ hw_plane_wm->wm[level].plane_en,
+ hw_plane_wm->wm[level].plane_res_b,
+ hw_plane_wm->wm[level].plane_res_l);
+ }
- DRM_ERROR("mismatch in DDB state pipe %c plane %d "
- "(expected (%u,%u), found (%u,%u))\n",
- pipe_name(pipe), plane + 1,
- sw_entry->start, sw_entry->end,
- hw_entry->start, hw_entry->end);
+ if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
+ &sw_plane_wm->trans_wm)) {
+ DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), plane + 1,
+ sw_plane_wm->trans_wm.plane_en,
+ sw_plane_wm->trans_wm.plane_res_b,
+ sw_plane_wm->trans_wm.plane_res_l,
+ hw_plane_wm->trans_wm.plane_en,
+ hw_plane_wm->trans_wm.plane_res_b,
+ hw_plane_wm->trans_wm.plane_res_l);
+ }
+
+ /* DDB */
+ hw_ddb_entry = &hw_ddb.plane[pipe][plane];
+ sw_ddb_entry = &sw_ddb->plane[pipe][plane];
+
+ if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+ DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe), plane + 1,
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
+ }
}
/*
@@ -13473,15 +13536,46 @@ static void verify_wm_state(struct drm_crtc *crtc,
* once the plane becomes visible, we can skip this check
*/
if (intel_crtc->cursor_addr) {
- hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
- sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+ hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
+ sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
+
+ /* Watermarks */
+ for (level = 0; level <= max_level; level++) {
+ if (skl_wm_level_equals(&hw_plane_wm->wm[level],
+ &sw_plane_wm->wm[level]))
+ continue;
+
+ DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), level,
+ sw_plane_wm->wm[level].plane_en,
+ sw_plane_wm->wm[level].plane_res_b,
+ sw_plane_wm->wm[level].plane_res_l,
+ hw_plane_wm->wm[level].plane_en,
+ hw_plane_wm->wm[level].plane_res_b,
+ hw_plane_wm->wm[level].plane_res_l);
+ }
- if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
- DRM_ERROR("mismatch in DDB state pipe %c cursor "
- "(expected (%u,%u), found (%u,%u))\n",
+ if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
+ &sw_plane_wm->trans_wm)) {
+ DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
pipe_name(pipe),
- sw_entry->start, sw_entry->end,
- hw_entry->start, hw_entry->end);
+ sw_plane_wm->trans_wm.plane_en,
+ sw_plane_wm->trans_wm.plane_res_b,
+ sw_plane_wm->trans_wm.plane_res_l,
+ hw_plane_wm->trans_wm.plane_en,
+ hw_plane_wm->trans_wm.plane_res_b,
+ hw_plane_wm->trans_wm.plane_res_l);
+ }
+
+ /* DDB */
+ hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
+ sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+
+ if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+ DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe),
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
}
}
}
@@ -13732,7 +13826,7 @@ intel_modeset_verify_disabled(struct drm_device *dev)
static void update_scanline_offset(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/*
* The scanline counter increments at the leading edge of hsync.
@@ -13752,7 +13846,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
* there's an extra 1 line difference. So we need to add two instead of
* one to the value.
*/
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
int vtotal;
@@ -13761,7 +13855,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
vtotal /= 2;
crtc->scanline_offset = vtotal - 1;
- } else if (HAS_DDI(dev) &&
+ } else if (HAS_DDI(dev_priv) &&
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
crtc->scanline_offset = 2;
} else
@@ -14239,12 +14333,11 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
unsigned int *crtc_vblank_mask)
{
struct drm_device *dev = state->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
struct drm_crtc_state *old_crtc_state;
- struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
- struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+ struct intel_crtc_state *cstate;
unsigned int updated = 0;
bool progress;
enum pipe pipe;
@@ -14262,12 +14355,14 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
bool vbl_wait = false;
unsigned int cmask = drm_crtc_mask(crtc);
- pipe = to_intel_crtc(crtc)->pipe;
+
+ intel_crtc = to_intel_crtc(crtc);
+ cstate = to_intel_crtc_state(crtc->state);
+ pipe = intel_crtc->pipe;
if (updated & cmask || !crtc->state->active)
continue;
- if (skl_ddb_allocation_overlaps(state, cur_ddb, new_ddb,
- pipe))
+ if (skl_ddb_allocation_overlaps(state, intel_crtc))
continue;
updated |= cmask;
@@ -14278,7 +14373,8 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
* then we need to wait for a vblank to pass for the
* new ddb allocation to take effect.
*/
- if (!skl_ddb_allocation_equals(cur_ddb, new_ddb, pipe) &&
+ if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
+ &intel_crtc->hw_ddb) &&
!crtc->state->active_changed &&
intel_state->wm_results.dirty_pipes != updated)
vbl_wait = true;
@@ -14458,7 +14554,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_cleanup_done(state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
/* As one of the primary mmio accessors, KMS has a high likelihood
* of triggering bugs in unclaimed access. After we finish
@@ -14541,6 +14637,7 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_shared_dpll_commit(state);
intel_atomic_track_fbs(state);
+ drm_atomic_state_get(state);
if (nonblock)
queue_work(system_unbound_wq, &state->commit_work);
else
@@ -14582,9 +14679,8 @@ retry:
goto retry;
}
- if (ret)
out:
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
/*
@@ -14658,6 +14754,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_framebuffer *fb = new_state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
@@ -14709,7 +14806,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (plane->type == DRM_PLANE_TYPE_CURSOR &&
INTEL_INFO(dev)->cursor_needs_physical) {
- int align = IS_I830(dev) ? 16 * 1024 : 256;
+ int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
ret = i915_gem_object_attach_phys(obj, align);
if (ret)
DRM_DEBUG_KMS("failed to attach phys object\n");
@@ -14834,6 +14931,8 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *intel_cstate =
+ to_intel_crtc_state(crtc->state);
struct intel_crtc_state *old_intel_state =
to_intel_crtc_state(old_crtc_state);
bool modeset = needs_modeset(crtc->state);
@@ -14850,13 +14949,13 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
intel_color_load_luts(crtc->state);
}
- if (to_intel_crtc_state(crtc->state)->update_pipe)
+ if (intel_cstate->update_pipe) {
intel_update_pipe_config(intel_crtc, old_intel_state);
- else if (INTEL_GEN(dev_priv) >= 9) {
+ } else if (INTEL_GEN(dev_priv) >= 9) {
skl_detach_scalers(intel_crtc);
I915_WRITE(PIPE_WM_LINETIME(pipe),
- dev_priv->wm.skl_hw.wm_linetime[pipe]);
+ intel_cstate->wm.skl.optimal.linetime);
}
}
@@ -14899,9 +14998,11 @@ const struct drm_plane_funcs intel_plane_funcs = {
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
int pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *primary = NULL;
struct intel_plane_state *state = NULL;
const uint32_t *intel_primary_formats;
+ unsigned int supported_rotations;
unsigned int num_formats;
int ret;
@@ -14933,7 +15034,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
primary->update_plane = skylake_update_primary_plane;
primary->disable_plane = skylake_disable_primary_plane;
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -14959,7 +15060,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY,
"plane 1%c", pipe_name(pipe));
- else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
ret = drm_universal_plane_init(dev, &primary->base, 0,
&intel_plane_funcs,
intel_primary_formats, num_formats,
@@ -14974,8 +15075,21 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
if (ret)
goto fail;
- if (INTEL_INFO(dev)->gen >= 4)
- intel_create_rotation_property(dev, primary);
+ if (INTEL_GEN(dev_priv) >= 9) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_90 |
+ DRM_ROTATE_180 | DRM_ROTATE_270;
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_180;
+ } else {
+ supported_rotations = DRM_ROTATE_0;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ drm_plane_create_rotation_property(&primary->base,
+ DRM_ROTATE_0,
+ supported_rotations);
drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
@@ -14988,24 +15102,6 @@ fail:
return NULL;
}
-void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
-{
- if (!dev->mode_config.rotation_property) {
- unsigned long flags = DRM_ROTATE_0 |
- DRM_ROTATE_180;
-
- if (INTEL_INFO(dev)->gen >= 9)
- flags |= DRM_ROTATE_90 | DRM_ROTATE_270;
-
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev, flags);
- }
- if (dev->mode_config.rotation_property)
- drm_object_attach_property(&plane->base.base,
- dev->mode_config.rotation_property,
- plane->base.state->rotation);
-}
-
static int
intel_check_cursor_plane(struct drm_plane *plane,
struct intel_crtc_state *crtc_state,
@@ -15030,7 +15126,8 @@ intel_check_cursor_plane(struct drm_plane *plane,
return 0;
/* Check for which cursor types we support */
- if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
+ if (!cursor_size_ok(to_i915(plane->dev), state->base.crtc_w,
+ state->base.crtc_h)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
state->base.crtc_w, state->base.crtc_h);
return -EINVAL;
@@ -15057,7 +15154,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
* display power well must be turned off and on again.
* Refuse the put the cursor into that compromised position.
*/
- if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
+ if (IS_CHERRYVIEW(to_i915(plane->dev)) && pipe == PIPE_C &&
state->base.visible && state->base.crtc_x < 0) {
DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
@@ -15101,6 +15198,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
int pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *cursor = NULL;
struct intel_plane_state *state = NULL;
int ret;
@@ -15132,17 +15230,11 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
if (ret)
goto fail;
- if (INTEL_INFO(dev)->gen >= 4) {
- if (!dev->mode_config.rotation_property)
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev,
- DRM_ROTATE_0 |
- DRM_ROTATE_180);
- if (dev->mode_config.rotation_property)
- drm_object_attach_property(&cursor->base.base,
- dev->mode_config.rotation_property,
- state->base.rotation);
- }
+ if (INTEL_GEN(dev_priv) >= 4)
+ drm_plane_create_rotation_property(&cursor->base,
+ DRM_ROTATE_0,
+ DRM_ROTATE_0 |
+ DRM_ROTATE_180);
if (INTEL_INFO(dev)->gen >=9)
state->scaler_id = -1;
@@ -15311,7 +15403,7 @@ static bool has_edp_a(struct drm_device *dev)
if ((I915_READ(DP_A) & DP_DETECTED) == 0)
return false;
- if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
+ if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
return false;
return true;
@@ -15324,17 +15416,18 @@ static bool intel_crt_present(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 9)
return false;
- if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
+ if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
return false;
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
return false;
- if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
+ if (HAS_PCH_LPT_H(dev_priv) &&
+ I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
return false;
/* DDI E can't be used if DDI A requires 4 lanes */
- if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
return false;
if (!dev_priv->vbt.int_crt_support)
@@ -15397,7 +15490,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (intel_crt_present(dev))
intel_crt_init(dev);
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
/*
* FIXME: Broxton doesn't support port detection via the
* DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
@@ -15408,7 +15501,7 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_ddi_init(dev, PORT_C);
intel_dsi_init(dev);
- } else if (HAS_DDI(dev)) {
+ } else if (HAS_DDI(dev_priv)) {
int found;
/*
@@ -15418,7 +15511,7 @@ static void intel_setup_outputs(struct drm_device *dev)
*/
found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
/* WaIgnoreDDIAStrap: skl */
- if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (found || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
intel_ddi_init(dev, PORT_A);
/* DDI B, C and D detection is indicated by the SFUSE_STRAP
@@ -15434,13 +15527,13 @@ static void intel_setup_outputs(struct drm_device *dev)
/*
* On SKL we don't have a way to detect DDI-E so we rely on VBT.
*/
- if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
+ if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
(dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
intel_ddi_init(dev, PORT_E);
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
int found;
dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
@@ -15467,7 +15560,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
bool has_edp, has_port;
/*
@@ -15499,7 +15592,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
/*
* eDP not supported on port D,
* so no need to worry about it
@@ -15512,18 +15605,18 @@ static void intel_setup_outputs(struct drm_device *dev)
}
intel_dsi_init(dev);
- } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
+ } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
bool found = false;
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
- if (!found && IS_G4X(dev)) {
+ if (!found && IS_G4X(dev_priv)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
}
- if (!found && IS_G4X(dev))
+ if (!found && IS_G4X(dev_priv))
intel_dp_init(dev, DP_B, PORT_B);
}
@@ -15536,18 +15629,17 @@ static void intel_setup_outputs(struct drm_device *dev)
if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
- if (IS_G4X(dev)) {
+ if (IS_G4X(dev_priv)) {
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
}
- if (IS_G4X(dev))
+ if (IS_G4X(dev_priv))
intel_dp_init(dev, DP_C, PORT_C);
}
- if (IS_G4X(dev) &&
- (I915_READ(DP_D) & DP_DETECTED))
+ if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
intel_dp_init(dev, DP_D, PORT_D);
- } else if (IS_GEN2(dev))
+ } else if (IS_GEN2(dev_priv))
intel_dvo_init(dev);
if (SUPPORTS_TV(dev))
@@ -15618,10 +15710,10 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
};
static
-u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
- uint32_t pixel_format)
+u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
+ uint64_t fb_modifier, uint32_t pixel_format)
{
- u32 gen = INTEL_INFO(dev)->gen;
+ u32 gen = INTEL_INFO(dev_priv)->gen;
if (gen >= 9) {
int cpp = drm_format_plane_cpp(pixel_format, 0);
@@ -15630,7 +15722,8 @@ u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
* pixels and 32K bytes."
*/
return min(8192 * cpp, 32768);
- } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ } else if (gen >= 5 && !IS_VALLEYVIEW(dev_priv) &&
+ !IS_CHERRYVIEW(dev_priv)) {
return 32*1024;
} else if (gen >= 4) {
if (fb_modifier == I915_FORMAT_MOD_X_TILED)
@@ -15717,7 +15810,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
return -EINVAL;
}
- pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
+ pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
mode_cmd->pixel_format);
if (mode_cmd->pitches[0] > pitch_limit) {
DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
@@ -15755,7 +15848,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
}
break;
case DRM_FORMAT_ABGR8888:
- if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
INTEL_INFO(dev)->gen < 9) {
format_name = drm_get_format_name(mode_cmd->pixel_format);
DRM_DEBUG("unsupported pixel format: %s\n", format_name);
@@ -15774,7 +15867,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
}
break;
case DRM_FORMAT_ABGR2101010:
- if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
format_name = drm_get_format_name(mode_cmd->pixel_format);
DRM_DEBUG("unsupported pixel format: %s\n", format_name);
kfree(format_name);
@@ -15841,12 +15934,6 @@ intel_user_framebuffer_create(struct drm_device *dev,
return fb;
}
-#ifndef CONFIG_DRM_FBDEV_EMULATION
-static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
-{
-}
-#endif
-
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
.output_poll_changed = intel_fbdev_output_poll_changed,
@@ -16227,7 +16314,7 @@ static void i915_disable_vga(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
u8 sr1;
- i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
+ i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
@@ -16315,7 +16402,7 @@ retry:
* BIOS-programmed watermarks untouched and hope for the best.
*/
WARN(true, "Could not determine valid watermarks for inherited state\n");
- goto fail;
+ goto put_state;
}
/* Write calculated watermark values back */
@@ -16326,7 +16413,8 @@ retry:
dev_priv->display.optimize_watermarks(cs);
}
- drm_atomic_state_free(state);
+put_state:
+ drm_atomic_state_put(state);
fail:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
@@ -16365,7 +16453,7 @@ void intel_modeset_init(struct drm_device *dev)
* BIOS isn't using it, don't assume it will work even if the VBT
* indicates as much.
*/
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE);
@@ -16377,10 +16465,10 @@ void intel_modeset_init(struct drm_device *dev)
}
}
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
- } else if (IS_GEN3(dev)) {
+ } else if (IS_GEN3(dev_priv)) {
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
} else {
@@ -16388,10 +16476,10 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.max_height = 8192;
}
- if (IS_845G(dev) || IS_I865G(dev)) {
- dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
+ if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
+ dev->mode_config.cursor_width = IS_845G(dev_priv) ? 64 : 512;
dev->mode_config.cursor_height = 1023;
- } else if (IS_GEN2(dev)) {
+ } else if (IS_GEN2(dev_priv)) {
dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
} else {
@@ -16597,7 +16685,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
if (crtc->active && !intel_crtc_has_encoders(crtc))
intel_crtc_disable_noatomic(&crtc->base);
- if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
+ if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
/*
* We start out with underrun reporting disabled to avoid races.
* For correct bookkeeping mark this on active crtcs.
@@ -16672,7 +16760,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
void i915_redisable_vga_power_on(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
+ i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
@@ -16910,11 +16998,11 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
pll->on = false;
}
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_wm_get_hw_state(dev);
- else if (IS_GEN9(dev))
+ else if (IS_GEN9(dev_priv))
skl_wm_get_hw_state(dev);
- else if (HAS_PCH_SPLIT(dev))
+ else if (HAS_PCH_SPLIT(dev_priv))
ilk_wm_get_hw_state(dev);
for_each_intel_crtc(dev, crtc) {
@@ -16964,10 +17052,9 @@ void intel_display_resume(struct drm_device *dev)
drm_modeset_acquire_fini(&ctx);
mutex_unlock(&dev->mode_config.mutex);
- if (ret) {
+ if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
- drm_atomic_state_free(state);
- }
+ drm_atomic_state_put(state);
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -17106,6 +17193,8 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
return 0;
}
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
struct intel_display_error_state {
u32 power_well_driver;
@@ -17244,7 +17333,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
return;
err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
for_each_pipe(dev_priv, i) {
@@ -17261,7 +17350,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " SIZE: %08x\n", error->plane[i].size);
err_printf(m, " POS: %08x\n", error->plane[i].pos);
}
- if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
if (INTEL_INFO(dev)->gen >= 4) {
err_printf(m, " SURF: %08x\n", error->plane[i].surface);
@@ -17288,3 +17377,5 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
}
}
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 3581b5a7f716..3c2293bd24bf 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -344,7 +344,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
DP |= DP_PORT_WIDTH(1);
DP |= DP_LINK_TRAIN_PAT_1;
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
DP |= DP_PIPE_SELECT_CHV(pipe);
else if (pipe == PIPE_B)
DP |= DP_PIPEB_SELECT;
@@ -356,10 +356,10 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
* So enable temporarily it if it's not already enabled.
*/
if (!pll_enabled) {
- release_cl_override = IS_CHERRYVIEW(dev) &&
+ release_cl_override = IS_CHERRYVIEW(dev_priv) &&
!chv_phy_powergate_ch(dev_priv, phy, ch, true);
- if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
+ if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev_priv) ?
&chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
DRM_ERROR("Failed to force on pll for pipe %c!\n",
pipe_name(pipe));
@@ -570,8 +570,8 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
- if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
- !IS_BROXTON(dev)))
+ if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ !IS_BROXTON(dev_priv)))
return;
/*
@@ -591,7 +591,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
continue;
intel_dp = enc_to_intel_dp(&encoder->base);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
intel_dp->pps_reset = true;
else
intel_dp->pps_pipe = INVALID_PIPE;
@@ -664,7 +664,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
pps_lock(intel_dp);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
i915_reg_t pp_ctrl_reg, pp_div_reg;
u32 pp_div;
@@ -692,7 +692,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps_pipe == INVALID_PIPE)
return false;
@@ -706,7 +706,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps_pipe == INVALID_PIPE)
return false;
@@ -821,15 +821,16 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
uint32_t aux_clock_divider)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_dig_port->base.base.dev);
uint32_t precharge, timeout;
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
precharge = 3;
else
precharge = 5;
- if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
+ if (IS_BROADWELL(dev_priv) && intel_dig_port->port == PORT_A)
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
@@ -1147,7 +1148,7 @@ static enum port intel_aux_port(struct drm_i915_private *dev_priv,
}
static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+ enum port port)
{
switch (port) {
case PORT_B:
@@ -1161,7 +1162,7 @@ static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+ enum port port, int index)
{
switch (port) {
case PORT_B:
@@ -1175,7 +1176,7 @@ static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+ enum port port)
{
switch (port) {
case PORT_A:
@@ -1191,7 +1192,7 @@ static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+ enum port port, int index)
{
switch (port) {
case PORT_A:
@@ -1207,7 +1208,7 @@ static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+ enum port port)
{
switch (port) {
case PORT_A:
@@ -1222,7 +1223,7 @@ static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+ enum port port, int index)
{
switch (port) {
case PORT_A:
@@ -1237,7 +1238,7 @@ static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+ enum port port)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return skl_aux_ctl_reg(dev_priv, port);
@@ -1248,7 +1249,7 @@ static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
}
static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+ enum port port, int index)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return skl_aux_data_reg(dev_priv, port, index);
@@ -1306,14 +1307,10 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
-
- /* WaDisableHBR2:skl */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
- return false;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
- (INTEL_INFO(dev)->gen >= 9))
+ if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
+ IS_BROADWELL(dev_priv) || (INTEL_GEN(dev_priv) >= 9))
return true;
else
return false;
@@ -1323,13 +1320,13 @@ static int
intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
int size;
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
*source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
- } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
*source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
} else {
@@ -1349,19 +1346,20 @@ intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const struct dp_link_dpll *divisor = NULL;
int i, count = 0;
- if (IS_G4X(dev)) {
+ if (IS_G4X(dev_priv)) {
divisor = gen4_dpll;
count = ARRAY_SIZE(gen4_dpll);
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
divisor = chv_dpll;
count = ARRAY_SIZE(chv_dpll);
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
divisor = vlv_dpll;
count = ARRAY_SIZE(vlv_dpll);
}
@@ -1461,8 +1459,7 @@ static void intel_dp_print_hw_revision(struct intel_dp *intel_dp)
if ((drm_debug & DRM_UT_KMS) == 0)
return;
- if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DWN_STRM_PORT_PRESENT))
+ if (!drm_dp_is_branch(intel_dp->dpcd))
return;
len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_HW_REV, &rev, 1);
@@ -1480,8 +1477,7 @@ static void intel_dp_print_sw_revision(struct intel_dp *intel_dp)
if ((drm_debug & DRM_UT_KMS) == 0)
return;
- if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DWN_STRM_PORT_PRESENT))
+ if (!drm_dp_is_branch(intel_dp->dpcd))
return;
len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_SW_REV, &rev, 2);
@@ -1578,7 +1574,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
max_clock = common_len - 1;
- if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
+ if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
pipe_config->has_pch_encoder = true;
pipe_config->has_drrs = false;
@@ -1595,7 +1591,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
return ret;
}
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
intel_gmch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
else
@@ -1720,7 +1716,7 @@ found:
to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
}
- if (!HAS_DDI(dev))
+ if (!HAS_DDI(dev_priv))
intel_dp_set_clock(encoder, pipe_config);
return true;
@@ -1778,7 +1774,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
/* Split out the IBX/CPU vs CPT settings */
- if (IS_GEN7(dev) && port == PORT_A) {
+ if (IS_GEN7(dev_priv) && port == PORT_A) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -1789,7 +1785,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_ENHANCED_FRAMING;
intel_dp->DP |= crtc->pipe << 29;
- } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
+ } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
u32 trans_dp;
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
@@ -1801,8 +1797,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
trans_dp &= ~TRANS_DP_ENH_FRAMING;
I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
} else {
- if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
- !IS_CHERRYVIEW(dev) && pipe_config->limited_color_range)
+ if (!HAS_PCH_SPLIT(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
+ !IS_CHERRYVIEW(dev_priv) &&
+ pipe_config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1814,7 +1811,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
else if (crtc->pipe == PIPE_B)
intel_dp->DP |= DP_PIPEB_SELECT;
@@ -2123,7 +2120,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
I915_WRITE(pp_ctrl_reg, pp);
@@ -2131,7 +2128,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
}
pp |= PANEL_POWER_ON;
- if (!IS_GEN5(dev))
+ if (!IS_GEN5(dev_priv))
pp |= PANEL_POWER_RESET;
I915_WRITE(pp_ctrl_reg, pp);
@@ -2140,7 +2137,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
wait_panel_on(intel_dp);
intel_dp->last_power_on = jiffies;
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
@@ -2453,9 +2450,9 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & DP_PORT_EN))
goto out;
- if (IS_GEN7(dev) && port == PORT_A) {
+ if (IS_GEN7(dev_priv) && port == PORT_A) {
*pipe = PORT_TO_PIPE_CPT(tmp);
- } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
+ } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
enum pipe p;
for_each_pipe(dev_priv, p) {
@@ -2470,7 +2467,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
i915_mmio_reg_offset(intel_dp->output_reg));
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
*pipe = DP_PORT_TO_PIPE_CHV(tmp);
} else {
*pipe = PORT_TO_PIPE(tmp);
@@ -2498,7 +2495,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
- if (HAS_PCH_CPT(dev) && port != PORT_A) {
+ if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
@@ -2524,8 +2521,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.flags |= flags;
- if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
- !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
+ if (!HAS_PCH_SPLIT(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
+ !IS_CHERRYVIEW(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
pipe_config->lane_count =
@@ -2645,7 +2642,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
dp_train_pat & DP_TRAINING_PATTERN_MASK);
- if (HAS_DDI(dev)) {
+ if (HAS_DDI(dev_priv)) {
uint32_t temp = I915_READ(DP_TP_CTL(port));
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
@@ -2671,8 +2668,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
}
I915_WRITE(DP_TP_CTL(port), temp);
- } else if ((IS_GEN7(dev) && port == PORT_A) ||
- (HAS_PCH_CPT(dev) && port != PORT_A)) {
+ } else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+ (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
*DP &= ~DP_LINK_TRAIN_MASK_CPT;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
@@ -2692,7 +2689,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
}
} else {
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
*DP &= ~DP_LINK_TRAIN_MASK_CHV;
else
*DP &= ~DP_LINK_TRAIN_MASK;
@@ -2708,7 +2705,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
*DP |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
*DP |= DP_LINK_TRAIN_PAT_3_CHV;
} else {
DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
@@ -2758,7 +2755,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
pps_lock(intel_dp);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_init_panel_power_sequencer(intel_dp);
intel_dp_enable_port(intel_dp, pipe_config);
@@ -2769,10 +2766,10 @@ static void intel_enable_dp(struct intel_encoder *encoder,
pps_unlock(intel_dp);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
unsigned int lane_mask = 0x0;
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
@@ -2992,17 +2989,17 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (INTEL_INFO(dev)->gen >= 9) {
if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- else if (IS_GEN7(dev) && port == PORT_A)
+ else if (IS_GEN7(dev_priv) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
- else if (HAS_PCH_CPT(dev) && port != PORT_A)
+ else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -3011,10 +3008,10 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = dp_to_dig_port(intel_dp)->port;
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3027,7 +3024,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3039,7 +3036,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3051,7 +3048,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
- } else if (IS_GEN7(dev) && port == PORT_A) {
+ } else if (IS_GEN7(dev_priv) && port == PORT_A) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
@@ -3352,21 +3349,21 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
uint32_t signal_levels, mask = 0;
uint8_t train_set = intel_dp->train_set[0];
- if (HAS_DDI(dev)) {
+ if (HAS_DDI(dev_priv)) {
signal_levels = ddi_signal_levels(intel_dp);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
signal_levels = 0;
else
mask = DDI_BUF_EMP_MASK;
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
signal_levels = chv_signal_levels(intel_dp);
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
signal_levels = vlv_signal_levels(intel_dp);
- } else if (IS_GEN7(dev) && port == PORT_A) {
+ } else if (IS_GEN7(dev_priv) && port == PORT_A) {
signal_levels = gen7_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
- } else if (IS_GEN6(dev) && port == PORT_A) {
+ } else if (IS_GEN6(dev_priv) && port == PORT_A) {
signal_levels = gen6_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
} else {
@@ -3411,7 +3408,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
enum port port = intel_dig_port->port;
uint32_t val;
- if (!HAS_DDI(dev))
+ if (!HAS_DDI(dev_priv))
return;
val = I915_READ(DP_TP_CTL(port));
@@ -3446,7 +3443,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t DP = intel_dp->DP;
- if (WARN_ON(HAS_DDI(dev)))
+ if (WARN_ON(HAS_DDI(dev_priv)))
return;
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
@@ -3454,12 +3451,12 @@ intel_dp_link_down(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("\n");
- if ((IS_GEN7(dev) && port == PORT_A) ||
- (HAS_PCH_CPT(dev) && port != PORT_A)) {
+ if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+ (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
} else {
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
DP &= ~DP_LINK_TRAIN_MASK_CHV;
else
DP &= ~DP_LINK_TRAIN_MASK;
@@ -3477,7 +3474,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
* to transcoder A after disabling it to allow the
* matching HDMI port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
+ if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -3616,8 +3613,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (!is_edp(intel_dp) && !intel_dp->sink_count)
return false;
- if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DWN_STRM_PORT_PRESENT))
+ if (!drm_dp_is_branch(intel_dp->dpcd))
return true; /* native DP sink */
if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
@@ -3998,6 +3994,31 @@ go_again:
}
static void
+intel_dp_retrain_link(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+ /* Suppress underruns caused by re-training */
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
+ if (crtc->config->has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev_priv,
+ intel_crtc_pch_transcoder(crtc), false);
+
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+
+ /* Keep underrun reporting disabled until things are stable */
+ intel_wait_for_vblank(&dev_priv->drm, crtc->pipe);
+
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+ if (crtc->config->has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev_priv,
+ intel_crtc_pch_transcoder(crtc), true);
+}
+
+static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
@@ -4017,13 +4038,18 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
if (!to_intel_crtc(intel_encoder->base.crtc)->active)
return;
+ /* FIXME: we need to synchronize this sort of stuff with hardware
+ * readout */
+ if (WARN_ON_ONCE(!intel_dp->lane_count))
+ return;
+
/* if link training is requested we should perform it always */
if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
intel_encoder->base.name);
- intel_dp_start_link_train(intel_dp);
- intel_dp_stop_link_train(intel_dp);
+
+ intel_dp_retrain_link(intel_dp);
}
}
@@ -4105,7 +4131,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
return connector_status_connected;
/* if there's no downstream port, we're done */
- if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
+ if (!drm_dp_is_branch(dpcd))
return connector_status_connected;
/* If we're HPD-aware, SINK_COUNT changes dynamically */
@@ -4765,11 +4791,16 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
if (!HAS_DDI(dev_priv))
intel_dp->DP = I915_READ(intel_dp->output_reg);
+ if (IS_GEN9(dev_priv) && lspcon->active)
+ lspcon_resume(lspcon);
+
if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
return;
@@ -5083,7 +5114,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
pp_div = I915_READ(regs.pp_ctrl);
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
@@ -5096,9 +5127,9 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
port_sel = PANEL_PORT_SELECT_VLV(port);
- } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
if (port == PORT_A)
port_sel = PANEL_PORT_SELECT_DPA;
else
@@ -5109,7 +5140,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_WRITE(regs.pp_on, pp_on);
I915_WRITE(regs.pp_off, pp_off);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
I915_WRITE(regs.pp_ctrl, pp_div);
else
I915_WRITE(regs.pp_div, pp_div);
@@ -5117,7 +5148,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(regs.pp_on),
I915_READ(regs.pp_off),
- IS_BROXTON(dev) ?
+ IS_BROXTON(dev_priv) ?
(I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
I915_READ(regs.pp_div));
}
@@ -5125,7 +5156,9 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
static void intel_dp_pps_init(struct drm_device *dev,
struct intel_dp *intel_dp)
{
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_initial_power_sequencer_setup(intel_dp);
} else {
intel_dp_init_panel_power_sequencer(dev, intel_dp);
@@ -5595,7 +5628,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
mutex_unlock(&dev->mode_config.mutex);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_dp->edp_notifier.notifier_call = edp_notify_handler;
register_reboot_notifier(&intel_dp->edp_notifier);
@@ -5604,7 +5637,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* If the current pipe isn't valid, try the PPS pipe, and if that
* fails just assume pipe A.
*/
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
else
pipe = PORT_TO_PIPE(intel_dp->DP);
@@ -5660,9 +5693,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
/* intel_dp vfuncs */
if (INTEL_INFO(dev)->gen >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
- else if (HAS_PCH_SPLIT(dev))
+ else if (HAS_PCH_SPLIT(dev_priv))
intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
else
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
@@ -5672,7 +5705,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
else
intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
/* Preserve the current hw state. */
@@ -5693,7 +5726,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_encoder->type = INTEL_OUTPUT_EDP;
/* eDP only on port B and/or C on vlv/chv */
- if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
is_edp(intel_dp) && port != PORT_B && port != PORT_C))
return false;
@@ -5714,7 +5747,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector_attach_encoder(intel_connector, intel_encoder);
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -5726,7 +5759,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
break;
case PORT_B:
intel_encoder->hpd_pin = HPD_PORT_B;
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
intel_encoder->hpd_pin = HPD_PORT_A;
break;
case PORT_C:
@@ -5760,7 +5793,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
- if (IS_G4X(dev) && !IS_GM45(dev)) {
+ if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
@@ -5803,13 +5836,13 @@ bool intel_dp_init(struct drm_device *dev,
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
intel_encoder->suspend = intel_dp_encoder_suspend;
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
intel_encoder->pre_enable = chv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
intel_encoder->post_disable = chv_post_disable_dp;
intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
@@ -5826,7 +5859,7 @@ bool intel_dp_init(struct drm_device *dev,
intel_dig_port->max_lanes = 4;
intel_encoder->type = INTEL_OUTPUT_DP;
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
intel_encoder->crtc_mask = 1 << 2;
else
@@ -5835,6 +5868,7 @@ bool intel_dp_init(struct drm_device *dev,
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
}
intel_encoder->cloneable = 0;
+ intel_encoder->port = port;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
dev_priv->hotplug.irq_port[port] = intel_dig_port;
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index c438b02184cb..0048b520baf7 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -225,9 +225,6 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
* also mandatory for downstream devices that support HBR2. However, not
* all sinks follow the spec.
- *
- * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
- * supported in source but still not enabled.
*/
source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 54a9d7610d8f..3ffbd69e4551 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -523,6 +523,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
intel_encoder->type = INTEL_OUTPUT_DP_MST;
+ intel_encoder->port = intel_dig_port->port;
intel_encoder->crtc_mask = 0x7;
intel_encoder->cloneable = 0;
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 1c59ca50c430..605d0b509f24 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1851,13 +1851,13 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
dpll_mgr = &skl_pll_mgr;
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
dpll_mgr = &bxt_pll_mgr;
- else if (HAS_DDI(dev))
+ else if (HAS_DDI(dev_priv))
dpll_mgr = &hsw_pll_mgr;
- else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr) {
@@ -1883,7 +1883,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
/* FIXME: Move this to a more suitable place */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
intel_ddi_pll_init(dev);
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a19ec06f9e42..4e90b0760f3f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -206,6 +206,7 @@ struct intel_encoder {
struct drm_encoder base;
enum intel_output_type type;
+ enum port port;
unsigned int cloneable;
void (*hot_plug)(struct intel_encoder *);
bool (*compute_config)(struct intel_encoder *,
@@ -247,6 +248,8 @@ struct intel_encoder {
void (*suspend)(struct intel_encoder *);
int crtc_mask;
enum hpd_pin hpd_pin;
+ /* for communication with audio component; protected by av_mutex */
+ const struct drm_connector *audio_connector;
};
struct intel_panel {
@@ -465,9 +468,13 @@ struct intel_pipe_wm {
bool sprites_scaled;
};
-struct skl_pipe_wm {
+struct skl_plane_wm {
struct skl_wm_level wm[8];
struct skl_wm_level trans_wm;
+};
+
+struct skl_pipe_wm {
+ struct skl_plane_wm planes[I915_MAX_PLANES];
uint32_t linetime;
};
@@ -493,6 +500,7 @@ struct intel_crtc_wm_state {
struct {
/* gen9+ only needs 1-step wm programming */
struct skl_pipe_wm optimal;
+ struct skl_ddb_entry ddb;
/* cached plane data rate */
unsigned plane_data_rate[I915_MAX_PLANES];
@@ -730,6 +738,9 @@ struct intel_crtc {
bool cxsr_allowed;
} wm;
+ /* gen9+: ddb allocation currently being used */
+ struct skl_ddb_entry hw_ddb;
+
int scanline_offset;
struct {
@@ -796,22 +807,22 @@ struct intel_plane {
};
struct intel_watermark_params {
- unsigned long fifo_size;
- unsigned long max_wm;
- unsigned long default_wm;
- unsigned long guard_size;
- unsigned long cacheline_size;
+ u16 fifo_size;
+ u16 max_wm;
+ u8 default_wm;
+ u8 guard_size;
+ u8 cacheline_size;
};
struct cxsr_latency {
- int is_desktop;
- int is_ddr3;
- unsigned long fsb_freq;
- unsigned long mem_freq;
- unsigned long display_sr;
- unsigned long display_hpll_disable;
- unsigned long cursor_sr;
- unsigned long cursor_hpll_disable;
+ bool is_desktop : 1;
+ bool is_ddr3 : 1;
+ u16 fsb_freq;
+ u16 mem_freq;
+ u16 display_sr;
+ u16 display_hpll_disable;
+ u16 cursor_sr;
+ u16 cursor_hpll_disable;
};
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
@@ -950,17 +961,22 @@ struct intel_dp {
bool compliance_test_active;
};
+struct intel_lspcon {
+ bool active;
+ enum drm_lspcon_mode mode;
+ struct drm_dp_aux *aux;
+};
+
struct intel_digital_port {
struct intel_encoder base;
enum port port;
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
+ struct intel_lspcon lspcon;
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
uint8_t max_lanes;
- /* for communication with audio component; protected by av_mutex */
- const struct drm_connector *audio_connector;
};
struct intel_dp_mst_encoder {
@@ -1182,6 +1198,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
/* intel_display.c */
+enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc);
void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
@@ -1285,15 +1302,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier, unsigned int cpp);
-static inline bool
-intel_rotation_90_or_270(unsigned int rotation)
-{
- return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270);
-}
-
-void intel_create_rotation_property(struct drm_device *dev,
- struct intel_plane *plane);
-
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe);
@@ -1487,6 +1495,10 @@ static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bo
{
}
+static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+}
+
static inline void intel_fbdev_restore_mode(struct drm_device *dev)
{
}
@@ -1513,6 +1525,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin);
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, i915_reg_t hdmi_reg, enum port port);
@@ -1716,7 +1729,7 @@ bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
/* intel_pm.c */
void intel_init_clock_gating(struct drm_device *dev);
void intel_suspend_hw(struct drm_device *dev);
-int ilk_wm_max_level(const struct drm_device *dev);
+int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
void intel_update_watermarks(struct drm_crtc *crtc);
void intel_init_pm(struct drm_device *dev);
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
@@ -1742,20 +1755,24 @@ void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
+void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
+ struct skl_pipe_wm *out);
bool intel_can_enable_sagv(struct drm_atomic_state *state);
int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
+bool skl_wm_level_equals(const struct skl_wm_level *l1,
+ const struct skl_wm_level *l2);
bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
const struct skl_ddb_allocation *new,
enum pipe pipe);
bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
- const struct skl_ddb_allocation *old,
- const struct skl_ddb_allocation *new,
- enum pipe pipe);
+ struct intel_crtc *intel_crtc);
void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
- const struct skl_wm_values *wm);
+ const struct skl_plane_wm *wm,
+ const struct skl_ddb_allocation *ddb);
void skl_write_plane_wm(struct intel_crtc *intel_crtc,
- const struct skl_wm_values *wm,
+ const struct skl_plane_wm *wm,
+ const struct skl_ddb_allocation *ddb,
int plane);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
bool ilk_disable_lp_wm(struct drm_device *dev);
@@ -1835,4 +1852,7 @@ int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
void intel_color_set_csc(struct drm_crtc_state *crtc_state);
void intel_color_load_luts(struct drm_crtc_state *crtc_state);
+/* intel_lspcon.c */
+bool lspcon_init(struct intel_digital_port *intel_dig_port);
+void lspcon_resume(struct intel_lspcon *lspcon);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index b2e3d3a334f7..4e0d025490a3 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -437,11 +437,11 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_device_ready(encoder);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_dsi_device_ready(encoder);
}
@@ -464,7 +464,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
}
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_BROXTON(dev) ?
+ i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
@@ -494,7 +494,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_BROXTON(dev) ?
+ i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
@@ -656,7 +656,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -664,7 +663,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
- i915_reg_t port_ctrl = IS_BROXTON(dev) ?
+ i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
u32 val;
@@ -741,7 +740,6 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain;
enum port port;
bool active = false;
@@ -762,7 +760,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
+ i915_reg_t ctrl_reg = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
@@ -771,7 +769,8 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* bit in port C control register does not get set. As a
* workaround, check pipe B conf instead.
*/
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && port == PORT_C)
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ port == PORT_C)
enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
/* Try command mode if video mode not enabled */
@@ -970,11 +969,11 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 pclk;
DRM_DEBUG_KMS("\n");
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
bxt_dsi_get_pipe_config(encoder, pipe_config);
pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
@@ -1066,7 +1065,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
/*
* Program hdisplay and vdisplay on MIPI transcoder.
* This is different from calculated hactive and
@@ -1138,7 +1137,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/*
* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc?
@@ -1153,7 +1152,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
tmp &= ~READ_REQUEST_PRIORITY_MASK;
I915_WRITE(MIPI_CTRL(port), tmp |
READ_REQUEST_PRIORITY_HIGH);
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
enum pipe pipe = intel_crtc->pipe;
tmp = I915_READ(MIPI_CTRL(port));
@@ -1242,7 +1241,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
I915_WRITE(MIPI_INIT_COUNT(port),
txclkesc(intel_dsi->escape_clk_div, 100));
- if (IS_BROXTON(dev) && (!intel_dsi->dual_link)) {
+ if (IS_BROXTON(dev_priv) && (!intel_dsi->dual_link)) {
/*
* BXT spec says write MIPI_INIT_COUNT for
* both the ports, even if only one is
@@ -1346,7 +1345,7 @@ static int intel_dsi_set_property(struct drm_connector *connector,
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
- if (HAS_GMCH_DISPLAY(dev) &&
+ if (HAS_GMCH_DISPLAY(to_i915(dev)) &&
val == DRM_MODE_SCALE_CENTER) {
DRM_DEBUG_KMS("centering not supported\n");
return -EINVAL;
@@ -1450,9 +1449,9 @@ void intel_dsi_init(struct drm_device *dev)
if (!intel_bios_is_dsi_present(dev_priv, &port))
return;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
} else {
DRM_ERROR("Unsupported Mipi device to reg base");
@@ -1488,6 +1487,7 @@ void intel_dsi_init(struct drm_device *dev)
intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_encoder->port = port;
/*
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
* port C. BXT isn't limited like this.
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index cd154ce6b6c1..9f279a3d0f74 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -126,6 +126,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
u16 len;
enum port port;
+ DRM_DEBUG_KMS("\n");
+
flags = *data++;
type = *data++;
@@ -199,6 +201,8 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
{
u32 delay = *((const u32 *) data);
+ DRM_DEBUG_KMS("\n");
+
usleep_range(delay, delay + 10);
data += 4;
@@ -307,6 +311,8 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
u8 gpio_source, gpio_index;
bool value;
+ DRM_DEBUG_KMS("\n");
+
if (dev_priv->vbt.dsi.seq_version >= 3)
data++;
@@ -331,18 +337,36 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
-static const u8 *mipi_exec_i2c_skip(struct intel_dsi *intel_dsi, const u8 *data)
+static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
{
+ DRM_DEBUG_KMS("Skipping I2C element execution\n");
+
return data + *(data + 6) + 7;
}
+static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
+{
+ DRM_DEBUG_KMS("Skipping SPI element execution\n");
+
+ return data + *(data + 5) + 6;
+}
+
+static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
+{
+ DRM_DEBUG_KMS("Skipping PMIC element execution\n");
+
+ return data + 15;
+}
+
typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi,
const u8 *data);
static const fn_mipi_elem_exec exec_elem[] = {
[MIPI_SEQ_ELEM_SEND_PKT] = mipi_exec_send_packet,
[MIPI_SEQ_ELEM_DELAY] = mipi_exec_delay,
[MIPI_SEQ_ELEM_GPIO] = mipi_exec_gpio,
- [MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c_skip,
+ [MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c,
+ [MIPI_SEQ_ELEM_SPI] = mipi_exec_spi,
+ [MIPI_SEQ_ELEM_PMIC] = mipi_exec_pmic,
};
/*
@@ -385,11 +409,8 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
return;
data = dev_priv->vbt.dsi.sequence[seq_id];
- if (!data) {
- DRM_DEBUG_KMS("MIPI sequence %d - %s not available\n",
- seq_id, sequence_name(seq_id));
+ if (!data)
return;
- }
WARN_ON(*data != seq_id);
@@ -420,7 +441,15 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
operation_size = *data++;
if (mipi_elem_exec) {
+ const u8 *next = data + operation_size;
+
data = mipi_elem_exec(intel_dsi, data);
+
+ /* Consistency check if we have size. */
+ if (operation_size && data != next) {
+ DRM_ERROR("Inconsistent operation size\n");
+ return;
+ }
} else if (operation_size) {
/* We have size, skip. */
DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n",
@@ -438,6 +467,8 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
static int vbt_panel_prepare(struct drm_panel *panel)
{
generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
+ generic_exec_sequence(panel, MIPI_SEQ_POWER_ON);
+ generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
generic_exec_sequence(panel, MIPI_SEQ_INIT_OTP);
return 0;
@@ -445,7 +476,8 @@ static int vbt_panel_prepare(struct drm_panel *panel)
static int vbt_panel_unprepare(struct drm_panel *panel)
{
- generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
+ generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
+ generic_exec_sequence(panel, MIPI_SEQ_POWER_OFF);
return 0;
}
@@ -453,12 +485,14 @@ static int vbt_panel_unprepare(struct drm_panel *panel)
static int vbt_panel_enable(struct drm_panel *panel)
{
generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_ON);
+ generic_exec_sequence(panel, MIPI_SEQ_BACKLIGHT_ON);
return 0;
}
static int vbt_panel_disable(struct drm_panel *panel)
{
+ generic_exec_sequence(panel, MIPI_SEQ_BACKLIGHT_OFF);
generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_OFF);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 6ab58a01b18e..56eff6004bc0 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -351,7 +351,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
struct intel_crtc_state *config)
{
- if (IS_BROXTON(encoder->base.dev))
+ if (IS_BROXTON(to_i915(encoder->base.dev)))
return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
else
return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
@@ -515,11 +515,11 @@ bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
int intel_compute_dsi_pll(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_compute_dsi_pll(encoder, config);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
return bxt_compute_dsi_pll(encoder, config);
return -ENODEV;
@@ -528,21 +528,21 @@ int intel_compute_dsi_pll(struct intel_encoder *encoder,
void intel_enable_dsi_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_enable_dsi_pll(encoder, config);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_enable_dsi_pll(encoder, config);
}
void intel_disable_dsi_pll(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_disable_dsi_pll(encoder);
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
bxt_disable_dsi_pll(encoder);
}
@@ -564,10 +564,10 @@ static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
bxt_dsi_reset_clocks(encoder, port);
- else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_reset_clocks(encoder, port);
}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 2e452c505e7e..cd574900cd8d 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -412,16 +412,14 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
return mode;
}
-static char intel_dvo_port_name(i915_reg_t dvo_reg)
+static enum port intel_dvo_port(i915_reg_t dvo_reg)
{
if (i915_mmio_reg_equal(dvo_reg, DVOA))
- return 'A';
+ return PORT_A;
else if (i915_mmio_reg_equal(dvo_reg, DVOB))
- return 'B';
- else if (i915_mmio_reg_equal(dvo_reg, DVOC))
- return 'C';
+ return PORT_B;
else
- return '?';
+ return PORT_C;
}
void intel_dvo_init(struct drm_device *dev)
@@ -464,6 +462,7 @@ void intel_dvo_init(struct drm_device *dev)
bool dvoinit;
enum pipe pipe;
uint32_t dpll[I915_MAX_PIPES];
+ enum port port;
/* Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
@@ -511,12 +510,15 @@ void intel_dvo_init(struct drm_device *dev)
if (!dvoinit)
continue;
+ port = intel_dvo_port(dvo->dvo_reg);
drm_encoder_init(dev, &intel_encoder->base,
&intel_dvo_enc_funcs, encoder_type,
- "DVO %c", intel_dvo_port_name(dvo->dvo_reg));
+ "DVO %c", port_name(port));
intel_encoder->type = INTEL_OUTPUT_DVO;
+ intel_encoder->port = port;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 025e232a4205..8cceb345aa0f 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -82,12 +82,17 @@ static const struct engine_info {
},
};
-static struct intel_engine_cs *
+static int
intel_engine_setup(struct drm_i915_private *dev_priv,
enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
- struct intel_engine_cs *engine = &dev_priv->engine[id];
+ struct intel_engine_cs *engine;
+
+ GEM_BUG_ON(dev_priv->engine[id]);
+ engine = kzalloc(sizeof(*engine), GFP_KERNEL);
+ if (!engine)
+ return -ENOMEM;
engine->id = id;
engine->i915 = dev_priv;
@@ -97,7 +102,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->mmio_base = info->mmio_base;
engine->irq_shift = info->irq_shift;
- return engine;
+ dev_priv->engine[id] = engine;
+ return 0;
}
/**
@@ -110,13 +116,16 @@ int intel_engines_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
+ unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
unsigned int mask = 0;
int (*init)(struct intel_engine_cs *engine);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
unsigned int i;
int ret;
- WARN_ON(INTEL_INFO(dev_priv)->ring_mask == 0);
- WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
+ WARN_ON(ring_mask == 0);
+ WARN_ON(ring_mask &
GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
@@ -131,7 +140,11 @@ int intel_engines_init(struct drm_device *dev)
if (!init)
continue;
- ret = init(intel_engine_setup(dev_priv, i));
+ ret = intel_engine_setup(dev_priv, i);
+ if (ret)
+ goto cleanup;
+
+ ret = init(dev_priv->engine[i]);
if (ret)
goto cleanup;
@@ -143,7 +156,7 @@ int intel_engines_init(struct drm_device *dev)
* are added to the driver by a warning and disabling the forgotten
* engines.
*/
- if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
+ if (WARN_ON(mask != ring_mask))
device_info->ring_mask = mask;
device_info->num_rings = hweight32(mask);
@@ -151,11 +164,11 @@ int intel_engines_init(struct drm_device *dev)
return 0;
cleanup:
- for (i = 0; i < I915_NUM_ENGINES; i++) {
+ for_each_engine(engine, dev_priv, id) {
if (i915.enable_execlists)
- intel_logical_ring_cleanup(&dev_priv->engine[i]);
+ intel_logical_ring_cleanup(engine);
else
- intel_engine_cleanup(&dev_priv->engine[i]);
+ intel_engine_cleanup(engine);
}
return ret;
@@ -232,7 +245,7 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
INIT_LIST_HEAD(&engine->execlist_queue);
spin_lock_init(&engine->execlist_lock);
- engine->fence_context = fence_context_alloc(1);
+ engine->fence_context = dma_fence_context_alloc(1);
intel_engine_init_requests(engine);
intel_engine_init_hangcheck(engine);
@@ -319,3 +332,137 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
intel_engine_cleanup_cmd_parser(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
}
+
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ u64 acthd;
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
+ RING_ACTHD_UDW(engine->mmio_base));
+ else if (INTEL_GEN(dev_priv) >= 4)
+ acthd = I915_READ(RING_ACTHD(engine->mmio_base));
+ else
+ acthd = I915_READ(ACTHD);
+
+ return acthd;
+}
+
+u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ u64 bbaddr;
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
+ RING_BBADDR_UDW(engine->mmio_base));
+ else
+ bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
+
+ return bbaddr;
+}
+
+const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
+{
+ switch (type) {
+ case I915_CACHE_NONE: return " uncached";
+ case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
+ case I915_CACHE_L3_LLC: return " L3+LLC";
+ case I915_CACHE_WT: return " WT";
+ default: return "";
+ }
+}
+
+static inline uint32_t
+read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
+ int subslice, i915_reg_t reg)
+{
+ uint32_t mcr;
+ uint32_t ret;
+ enum forcewake_domains fw_domains;
+
+ fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ FW_REG_READ);
+ fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ GEN8_MCR_SELECTOR,
+ FW_REG_READ | FW_REG_WRITE);
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+
+ mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
+ /*
+ * The HW expects the slice and sublice selectors to be reset to 0
+ * after reading out the registers.
+ */
+ WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
+ mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
+ mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
+ I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
+
+ ret = I915_READ_FW(reg);
+
+ mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
+ I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
+
+ intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
+ spin_unlock_irq(&dev_priv->uncore.lock);
+
+ return ret;
+}
+
+/* NB: please notice the memset */
+void intel_engine_get_instdone(struct intel_engine_cs *engine,
+ struct intel_instdone *instdone)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ u32 mmio_base = engine->mmio_base;
+ int slice;
+ int subslice;
+
+ memset(instdone, 0, sizeof(*instdone));
+
+ switch (INTEL_GEN(dev_priv)) {
+ default:
+ instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+
+ if (engine->id != RCS)
+ break;
+
+ instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
+ instdone->sampler[slice][subslice] =
+ read_subslice_reg(dev_priv, slice, subslice,
+ GEN7_SAMPLER_INSTDONE);
+ instdone->row[slice][subslice] =
+ read_subslice_reg(dev_priv, slice, subslice,
+ GEN7_ROW_INSTDONE);
+ }
+ break;
+ case 7:
+ instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+
+ if (engine->id != RCS)
+ break;
+
+ instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
+ instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
+ instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
+
+ break;
+ case 6:
+ case 5:
+ case 4:
+ instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+
+ if (engine->id == RCS)
+ /* HACK: Using the wrong struct member */
+ instdone->slice_common = I915_READ(GEN4_INSTDONE1);
+ break;
+ case 3:
+ case 2:
+ instdone->instdone = I915_READ(GEN2_INSTDONE);
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index c43dd9abce79..cbe2ebda4c40 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -84,7 +84,7 @@ static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
{
int w, h;
- if (intel_rotation_90_or_270(cache->plane.rotation)) {
+ if (drm_rotation_90_or_270(cache->plane.rotation)) {
w = cache->plane.src_h;
h = cache->plane.src_w;
} else {
@@ -776,6 +776,14 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ /* We don't need to use a state cache here since this information is
+ * global for all CRTC.
+ */
+ if (fbc->underrun_detected) {
+ fbc->no_fbc_reason = "underrun detected";
+ return false;
+ }
+
if (!cache->plane.visible) {
fbc->no_fbc_reason = "primary plane not visible";
return false;
@@ -861,6 +869,11 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
return false;
}
+ if (fbc->underrun_detected) {
+ fbc->no_fbc_reason = "underrun detected";
+ return false;
+ }
+
if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) {
fbc->no_fbc_reason = "no enabled pipes can have FBC";
return false;
@@ -1223,6 +1236,59 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
cancel_work_sync(&fbc->work.work);
}
+static void intel_fbc_underrun_work_fn(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, fbc.underrun_work);
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ mutex_lock(&fbc->lock);
+
+ /* Maybe we were scheduled twice. */
+ if (fbc->underrun_detected)
+ goto out;
+
+ DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
+ fbc->underrun_detected = true;
+
+ intel_fbc_deactivate(dev_priv);
+out:
+ mutex_unlock(&fbc->lock);
+}
+
+/**
+ * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
+ * @dev_priv: i915 device instance
+ *
+ * Without FBC, most underruns are harmless and don't really cause too many
+ * problems, except for an annoying message on dmesg. With FBC, underruns can
+ * become black screens or even worse, especially when paired with bad
+ * watermarks. So in order for us to be on the safe side, completely disable FBC
+ * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
+ * already suggests that watermarks may be bad, so try to be as safe as
+ * possible.
+ *
+ * This function is called from the IRQ handler.
+ */
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ if (!fbc_supported(dev_priv))
+ return;
+
+ /* There's no guarantee that underrun_detected won't be set to true
+ * right after this check and before the work is scheduled, but that's
+ * not a problem since we'll check it again under the work function
+ * while FBC is locked. This check here is just to prevent us from
+ * unnecessarily scheduling the work, and it relies on the fact that we
+ * never switch underrun_detect back to false after it's true. */
+ if (READ_ONCE(fbc->underrun_detected))
+ return;
+
+ schedule_work(&fbc->underrun_work);
+}
+
/**
* intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
* @dev_priv: i915 device instance
@@ -1294,6 +1360,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
enum pipe pipe;
INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
+ INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
mutex_init(&fbc->lock);
fbc->enabled = false;
fbc->active = false;
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 2aa744081f09..3018f4f589c8 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -254,13 +254,13 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
old = !intel_crtc->cpu_fifo_underrun_disabled;
intel_crtc->cpu_fifo_underrun_disabled = !enable;
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (IS_GEN5(dev) || IS_GEN6(dev))
+ else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
- else if (IS_GEN7(dev))
+ else if (IS_GEN7(dev_priv))
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (IS_GEN8(dev) || IS_GEN9(dev))
+ else if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
return old;
@@ -372,6 +372,8 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
DRM_ERROR("CPU pipe %c FIFO underrun\n",
pipe_name(pipe));
+
+ intel_fbc_handle_fifo_underrun_irq(dev_priv);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 6fd39efb7894..3c8eaae13732 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -100,12 +100,13 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
static void guc_interrupts_release(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int irqs;
/* tell all command streamers NOT to forward interrupts or vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route all GT interrupts to the host */
@@ -117,12 +118,13 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int irqs;
u32 tmp;
/* tell all command streamers to forward interrupts (but not vblank) to GuC */
irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
@@ -347,7 +349,6 @@ static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
{
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
- struct drm_device *dev = &dev_priv->drm;
struct i915_vma *vma;
int ret;
@@ -375,24 +376,22 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
/* Enable MIA caching. GuC clock gating is disabled. */
I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
- /* WaDisableMinuteIaClockGating:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ /* WaDisableMinuteIaClockGating:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
~GUC_ENABLE_MIA_CLOCK_GATING));
}
- /* WaC6DisallowByGfxPause*/
- if (IS_SKL_REVID(dev, 0, SKL_REVID_C0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+ /* WaC6DisallowByGfxPause:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
else
I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
- if (IS_GEN9(dev)) {
+ if (IS_GEN9(dev_priv)) {
/* DOP Clock Gating Enable for GuC clocks */
I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
I915_READ(GEN7_MISCCPCTL)));
@@ -720,23 +719,28 @@ void intel_guc_init(struct drm_device *dev)
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
const char *fw_path;
- /* A negative value means "use platform default" */
- if (i915.enable_guc_loading < 0)
- i915.enable_guc_loading = HAS_GUC_UCODE(dev);
- if (i915.enable_guc_submission < 0)
- i915.enable_guc_submission = HAS_GUC_SCHED(dev);
+ if (!HAS_GUC(dev)) {
+ i915.enable_guc_loading = 0;
+ i915.enable_guc_submission = 0;
+ } else {
+ /* A negative value means "use platform default" */
+ if (i915.enable_guc_loading < 0)
+ i915.enable_guc_loading = HAS_GUC_UCODE(dev);
+ if (i915.enable_guc_submission < 0)
+ i915.enable_guc_submission = HAS_GUC_SCHED(dev);
+ }
if (!HAS_GUC_UCODE(dev)) {
fw_path = NULL;
- } else if (IS_SKYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev_priv)) {
fw_path = I915_SKL_GUC_UCODE;
guc_fw->guc_fw_major_wanted = SKL_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = SKL_FW_MINOR;
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
fw_path = I915_BXT_GUC_UCODE;
guc_fw->guc_fw_major_wanted = BXT_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = BXT_FW_MINOR;
- } else if (IS_KABYLAKE(dev)) {
+ } else if (IS_KABYLAKE(dev_priv)) {
fw_path = I915_KBL_GUC_UCODE;
guc_fw->guc_fw_major_wanted = KBL_FW_MAJOR;
guc_fw->guc_fw_minor_wanted = KBL_FW_MINOR;
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 434f4d5c553d..290384e86c63 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -31,14 +31,20 @@
* GPU among multiple virtual machines on a time-sharing basis. Each
* virtual machine is presented a virtual GPU (vGPU), which has equivalent
* features as the underlying physical GPU (pGPU), so i915 driver can run
- * seamlessly in a virtual machine. This file provides the englightments
- * of GVT and the necessary components used by GVT in i915 driver.
+ * seamlessly in a virtual machine.
+ *
+ * To virtualize GPU resources GVT-g driver depends on hypervisor technology
+ * e.g KVM/VFIO/mdev, Xen, etc. to provide resource access trapping capability
+ * and be virtualized within GVT-g device module. More architectural design
+ * doc is available on https://01.org/group/2230/documentation-list.
*/
static bool is_supported_device(struct drm_i915_private *dev_priv)
{
if (IS_BROADWELL(dev_priv))
return true;
+ if (IS_SKYLAKE(dev_priv))
+ return true;
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
index 960211df74db..25df2d65b985 100644
--- a/drivers/gpu/drm/i915/intel_gvt.h
+++ b/drivers/gpu/drm/i915/intel_gvt.h
@@ -24,7 +24,7 @@
#ifndef _INTEL_GVT_H_
#define _INTEL_GVT_H_
-#include "gvt/gvt.h"
+struct intel_gvt;
#ifdef CONFIG_DRM_I915_GVT
int intel_gvt_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f40a35f2913a..af8715f31807 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -50,7 +50,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t enabled_bits;
- enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+ enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits,
"HDMI port enabled, expecting disabled\n");
@@ -864,7 +864,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
hdmi_val = SDVO_ENCODING_HDMI;
- if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
+ if (!HAS_PCH_SPLIT(dev_priv) && crtc->config->limited_color_range)
hdmi_val |= HDMI_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
@@ -879,9 +879,9 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
if (crtc->config->has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
- else if (IS_CHERRYVIEW(dev))
+ else if (IS_CHERRYVIEW(dev_priv))
hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
else
hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
@@ -911,9 +911,9 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & SDVO_ENABLE))
goto out;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
- else if (IS_CHERRYVIEW(dev))
+ else if (IS_CHERRYVIEW(dev_priv))
*pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@@ -956,7 +956,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & SDVO_AUDIO_ENABLE)
pipe_config->has_audio = true;
- if (!HAS_PCH_SPLIT(dev) &&
+ if (!HAS_PCH_SPLIT(dev_priv) &&
tmp & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
@@ -1141,7 +1141,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
* to transcoder A after disabling it to allow the
* matching DP port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B) {
+ if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -1241,7 +1241,7 @@ static enum drm_mode_status
hdmi_port_clock_valid(struct intel_hdmi *hdmi,
int clock, bool respect_downstream_limits)
{
- struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+ struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
if (clock < 25000)
return MODE_CLOCK_LOW;
@@ -1249,11 +1249,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_CLOCK_HIGH;
/* BXT DPLL can't generate 223-240 MHz */
- if (IS_BROXTON(dev) && clock > 223333 && clock < 240000)
+ if (IS_BROXTON(dev_priv) && clock > 223333 && clock < 240000)
return MODE_CLOCK_RANGE;
/* CHV DPLL can't generate 216-240 MHz */
- if (IS_CHERRYVIEW(dev) && clock > 216000 && clock < 240000)
+ if (IS_CHERRYVIEW(dev_priv) && clock > 216000 && clock < 240000)
return MODE_CLOCK_RANGE;
return MODE_OK;
@@ -1265,6 +1265,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
{
struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum drm_mode_status status;
int clock;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -1287,7 +1288,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
status = hdmi_port_clock_valid(hdmi, clock, true);
/* if we can't do 8bpc we may still be able to do 12bpc */
- if (!HAS_GMCH_DISPLAY(dev) && status != MODE_OK)
+ if (!HAS_GMCH_DISPLAY(dev_priv) && status != MODE_OK)
status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true);
return status;
@@ -1297,7 +1298,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(to_i915(dev)))
return false;
/*
@@ -1312,7 +1313,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
int clock_12bpc = clock_8bpc * 3 / 2;
@@ -1339,7 +1340,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
clock_12bpc *= 2;
}
- if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
+ if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
pipe_config->has_pch_encoder = true;
if (pipe_config->has_hdmi_sink && intel_hdmi->has_audio)
@@ -1799,6 +1800,50 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
+static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ u8 ddc_pin;
+
+ if (info->alternate_ddc_pin) {
+ DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
+ info->alternate_ddc_pin, port_name(port));
+ return info->alternate_ddc_pin;
+ }
+
+ switch (port) {
+ case PORT_B:
+ if (IS_BROXTON(dev_priv))
+ ddc_pin = GMBUS_PIN_1_BXT;
+ else
+ ddc_pin = GMBUS_PIN_DPB;
+ break;
+ case PORT_C:
+ if (IS_BROXTON(dev_priv))
+ ddc_pin = GMBUS_PIN_2_BXT;
+ else
+ ddc_pin = GMBUS_PIN_DPC;
+ break;
+ case PORT_D:
+ if (IS_CHERRYVIEW(dev_priv))
+ ddc_pin = GMBUS_PIN_DPD_CHV;
+ else
+ ddc_pin = GMBUS_PIN_DPD;
+ break;
+ default:
+ MISSING_CASE(port);
+ ddc_pin = GMBUS_PIN_DPB;
+ break;
+ }
+
+ DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
+ ddc_pin, port_name(port));
+
+ return ddc_pin;
+}
+
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector)
{
@@ -1808,7 +1853,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
- uint8_t alternate_ddc_pin;
DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
port_name(port));
@@ -1826,12 +1870,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
connector->doublescan_allowed = 0;
connector->stereo_allowed = 1;
+ intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
+
switch (port) {
case PORT_B:
- if (IS_BROXTON(dev_priv))
- intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
- else
- intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
/*
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
@@ -1842,61 +1884,32 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_encoder->hpd_pin = HPD_PORT_B;
break;
case PORT_C:
- if (IS_BROXTON(dev_priv))
- intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT;
- else
- intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
intel_encoder->hpd_pin = HPD_PORT_C;
break;
case PORT_D:
- if (WARN_ON(IS_BROXTON(dev_priv)))
- intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED;
- else if (IS_CHERRYVIEW(dev_priv))
- intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV;
- else
- intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
intel_encoder->hpd_pin = HPD_PORT_D;
break;
case PORT_E:
- /* On SKL PORT E doesn't have seperate GMBUS pin
- * We rely on VBT to set a proper alternate GMBUS pin. */
- alternate_ddc_pin =
- dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
- switch (alternate_ddc_pin) {
- case DDC_PIN_B:
- intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
- break;
- case DDC_PIN_C:
- intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
- break;
- case DDC_PIN_D:
- intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
- break;
- default:
- MISSING_CASE(alternate_ddc_pin);
- }
intel_encoder->hpd_pin = HPD_PORT_E;
break;
- case PORT_A:
- intel_encoder->hpd_pin = HPD_PORT_A;
- /* Internal port only for eDP. */
default:
- BUG();
+ MISSING_CASE(port);
+ return;
}
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes;
intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
- } else if (IS_G4X(dev)) {
+ } else if (IS_G4X(dev_priv)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
intel_hdmi->infoframe_enabled = g4x_infoframe_enabled;
- } else if (HAS_DDI(dev)) {
+ } else if (HAS_DDI(dev_priv)) {
intel_hdmi->write_infoframe = hsw_write_infoframe;
intel_hdmi->set_infoframes = hsw_set_infoframes;
intel_hdmi->infoframe_enabled = hsw_infoframe_enabled;
- } else if (HAS_PCH_IBX(dev)) {
+ } else if (HAS_PCH_IBX(dev_priv)) {
intel_hdmi->write_infoframe = ibx_write_infoframe;
intel_hdmi->set_infoframes = ibx_set_infoframes;
intel_hdmi->infoframe_enabled = ibx_infoframe_enabled;
@@ -1906,7 +1919,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_hdmi->infoframe_enabled = cpt_infoframe_enabled;
}
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -1920,7 +1933,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
- if (IS_G4X(dev) && !IS_GM45(dev)) {
+ if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
@@ -1929,6 +1942,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
void intel_hdmi_init(struct drm_device *dev,
i915_reg_t hdmi_reg, enum port port)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
@@ -1949,7 +1963,7 @@ void intel_hdmi_init(struct drm_device *dev,
DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port));
intel_encoder->compute_config = intel_hdmi_compute_config;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
intel_encoder->disable = pch_disable_hdmi;
intel_encoder->post_disable = pch_post_disable_hdmi;
} else {
@@ -1957,29 +1971,30 @@ void intel_hdmi_init(struct drm_device *dev,
}
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = chv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = chv_hdmi_post_disable;
intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable;
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = vlv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = vlv_hdmi_post_disable;
} else {
intel_encoder->pre_enable = intel_hdmi_pre_enable;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
intel_encoder->enable = cpt_enable_hdmi;
- else if (HAS_PCH_IBX(dev))
+ else if (HAS_PCH_IBX(dev_priv))
intel_encoder->enable = ibx_enable_hdmi;
else
intel_encoder->enable = g4x_enable_hdmi;
}
intel_encoder->type = INTEL_OUTPUT_HDMI;
- if (IS_CHERRYVIEW(dev)) {
+ intel_encoder->port = port;
+ if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
intel_encoder->crtc_mask = 1 << 2;
else
@@ -1993,7 +2008,7 @@ void intel_hdmi_init(struct drm_device *dev,
* to work on real hardware. And since g4x can send infoframes to
* only one port anyway, nothing is lost by allowing it.
*/
- if (IS_G4X(dev))
+ if (IS_G4X(dev_priv))
intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
intel_dig_port->port = port;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 79aab9ad6faa..83f260bb4eef 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -138,11 +138,10 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
static u32 get_reserved(struct intel_gmbus *bus)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
- struct drm_device *dev = &dev_priv->drm;
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
- if (!IS_I830(dev) && !IS_845G(dev))
+ if (!IS_I830(dev_priv) && !IS_845G(dev_priv))
reserved = I915_READ_NOTRACE(bus->gpio_reg) &
(GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
@@ -468,13 +467,9 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- const unsigned int fw =
- intel_uncore_forcewake_for_reg(dev_priv, GMBUS0,
- FW_REG_READ | FW_REG_WRITE);
int i = 0, inc, try = 0;
int ret = 0;
- intel_uncore_forcewake_get(dev_priv, fw);
retry:
I915_WRITE_FW(GMBUS0, bus->reg0);
@@ -576,7 +571,6 @@ timeout:
ret = -EAGAIN;
out:
- intel_uncore_forcewake_put(dev_priv, fw);
return ret;
}
@@ -633,10 +627,10 @@ int intel_setup_gmbus(struct drm_device *dev)
unsigned int pin;
int ret;
- if (HAS_PCH_NOP(dev))
+ if (HAS_PCH_NOP(dev_priv))
return 0;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
else if (!HAS_GMCH_DISPLAY(dev_priv))
dev_priv->gpio_mmio_base =
@@ -674,7 +668,7 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->reg0 = pin | GMBUS_RATE_100KHZ;
/* gmbus seems to be broken on i830 */
- if (IS_I830(dev))
+ if (IS_I830(dev_priv))
bus->force_bit = 1;
intel_gpio_setup(bus, pin);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0adb879833ff..bc86585b9fbb 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -275,8 +275,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
engine->disable_lite_restore_wa =
- (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) &&
(engine->id == VCS || engine->id == VCS2);
engine->ctx_desc_template = GEN8_CTX_VALID;
@@ -853,13 +852,12 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
- * WaDisableLSQCROPERFforOCL:skl,kbl
+ * WaDisableLSQCROPERFforOCL:kbl
* This WA is implemented in skl_init_clock_gating() but since
* this batch updates GEN8_L3SQCREG4 with default value we need to
* set this bit here to retain the WA during flush.
*/
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) ||
- IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
@@ -1002,9 +1000,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
struct drm_i915_private *dev_priv = engine->i915;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
- /* WaDisableCtxRestoreArbitration:skl,bxt */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ /* WaDisableCtxRestoreArbitration:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1075,9 +1072,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
{
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
- /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
- if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
+ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
+ if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
wa_ctx_emit(batch, index,
@@ -1104,9 +1100,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
wa_ctx_emit(batch, index, MI_NOOP);
}
- /* WaDisableCtxRestoreArbitration:skl,bxt */
- if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
+ /* WaDisableCtxRestoreArbitration:bxt */
+ if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@@ -1250,8 +1245,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
intel_engine_init_hangcheck(engine);
- if (!execlists_elsp_idle(engine))
+ /* After a GPU reset, we may have requests to replay */
+ if (!execlists_elsp_idle(engine)) {
+ engine->execlist_port[0].count = 0;
+ engine->execlist_port[1].count = 0;
execlists_submit_ports(engine);
+ }
return 0;
}
@@ -1326,10 +1325,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
memset(&port[1], 0, sizeof(port[1]));
}
- /* CS is stopped, and we will resubmit both ports on resume */
GEM_BUG_ON(request->ctx != port[0].request->ctx);
- port[0].count = 0;
- port[1].count = 0;
/* Reset WaIdleLiteRestore:bdw,skl as well */
request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
@@ -1652,9 +1648,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv;
- if (!intel_engine_initialized(engine))
- return;
-
/*
* Tasklet cannot be active at this point due intel_mark_active/idle
* so this is just for documentation.
@@ -1681,13 +1674,16 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
lrc_destroy_wa_ctx_obj(engine);
engine->i915 = NULL;
+ dev_priv->engine[engine->id] = NULL;
+ kfree(engine);
}
void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
engine->submit_request = execlists_submit_request;
}
@@ -1945,7 +1941,7 @@ static void execlists_init_reg_state(u32 *reg_state,
RING_START(engine->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
RING_CTL(engine->mmio_base),
- ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
+ RING_CTL_SIZE(ring->size) | RING_VALID);
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
RING_BBADDR_UDW(engine->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
@@ -2153,30 +2149,43 @@ error_deref_obj:
void intel_lr_context_resume(struct drm_i915_private *dev_priv)
{
- struct i915_gem_context *ctx = dev_priv->kernel_context;
struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+
+ /* Because we emit WA_TAIL_DWORDS there may be a disparity
+ * between our bookkeeping in ce->ring->head and ce->ring->tail and
+ * that stored in context. As we only write new commands from
+ * ce->ring->tail onwards, everything before that is junk. If the GPU
+ * starts reading from its RING_HEAD from the context, it may try to
+ * execute that junk and die.
+ *
+ * So to avoid that we reset the context images upon resume. For
+ * simplicity, we just zero everything out.
+ */
+ list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ for_each_engine(engine, dev_priv, id) {
+ struct intel_context *ce = &ctx->engine[engine->id];
+ u32 *reg;
- for_each_engine(engine, dev_priv) {
- struct intel_context *ce = &ctx->engine[engine->id];
- void *vaddr;
- uint32_t *reg_state;
-
- if (!ce->state)
- continue;
-
- vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
- if (WARN_ON(IS_ERR(vaddr)))
- continue;
+ if (!ce->state)
+ continue;
- reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+ reg = i915_gem_object_pin_map(ce->state->obj,
+ I915_MAP_WB);
+ if (WARN_ON(IS_ERR(reg)))
+ continue;
- reg_state[CTX_RING_HEAD+1] = 0;
- reg_state[CTX_RING_TAIL+1] = 0;
+ reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
+ reg[CTX_RING_HEAD+1] = 0;
+ reg[CTX_RING_TAIL+1] = 0;
- ce->state->obj->dirty = true;
- i915_gem_object_unpin_map(ce->state->obj);
+ ce->state->obj->dirty = true;
+ i915_gem_object_unpin_map(ce->state->obj);
- ce->ring->head = 0;
- ce->ring->tail = 0;
+ ce->ring->head = ce->ring->tail = 0;
+ ce->ring->last_retired_head = -1;
+ intel_ring_update_space(ce->ring);
+ }
}
}
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
new file mode 100644
index 000000000000..632149c6b3ad
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <drm/drm_edid.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_dual_mode_helper.h>
+#include "intel_drv.h"
+
+static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
+{
+ enum drm_lspcon_mode current_mode = DRM_LSPCON_MODE_INVALID;
+ struct i2c_adapter *adapter = &lspcon->aux->ddc;
+
+ if (drm_lspcon_get_mode(adapter, &current_mode))
+ DRM_ERROR("Error reading LSPCON mode\n");
+ else
+ DRM_DEBUG_KMS("Current LSPCON mode %s\n",
+ current_mode == DRM_LSPCON_MODE_PCON ? "PCON" : "LS");
+ return current_mode;
+}
+
+static int lspcon_change_mode(struct intel_lspcon *lspcon,
+ enum drm_lspcon_mode mode, bool force)
+{
+ int err;
+ enum drm_lspcon_mode current_mode;
+ struct i2c_adapter *adapter = &lspcon->aux->ddc;
+
+ err = drm_lspcon_get_mode(adapter, &current_mode);
+ if (err) {
+ DRM_ERROR("Error reading LSPCON mode\n");
+ return err;
+ }
+
+ if (current_mode == mode) {
+ DRM_DEBUG_KMS("Current mode = desired LSPCON mode\n");
+ return 0;
+ }
+
+ err = drm_lspcon_set_mode(adapter, mode);
+ if (err < 0) {
+ DRM_ERROR("LSPCON mode change failed\n");
+ return err;
+ }
+
+ lspcon->mode = mode;
+ DRM_DEBUG_KMS("LSPCON mode changed done\n");
+ return 0;
+}
+
+static bool lspcon_probe(struct intel_lspcon *lspcon)
+{
+ enum drm_dp_dual_mode_type adaptor_type;
+ struct i2c_adapter *adapter = &lspcon->aux->ddc;
+
+ /* Lets probe the adaptor and check its type */
+ adaptor_type = drm_dp_dual_mode_detect(adapter);
+ if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) {
+ DRM_DEBUG_KMS("No LSPCON detected, found %s\n",
+ drm_dp_get_dual_mode_type_name(adaptor_type));
+ return false;
+ }
+
+ /* Yay ... got a LSPCON device */
+ DRM_DEBUG_KMS("LSPCON detected\n");
+ lspcon->mode = lspcon_get_current_mode(lspcon);
+ lspcon->active = true;
+ return true;
+}
+
+void lspcon_resume(struct intel_lspcon *lspcon)
+{
+ if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON, true))
+ DRM_ERROR("LSPCON resume failed\n");
+ else
+ DRM_DEBUG_KMS("LSPCON resume success\n");
+}
+
+bool lspcon_init(struct intel_digital_port *intel_dig_port)
+{
+ struct intel_dp *dp = &intel_dig_port->dp;
+ struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (!IS_GEN9(dev_priv)) {
+ DRM_ERROR("LSPCON is supported on GEN9 only\n");
+ return false;
+ }
+
+ lspcon->active = false;
+ lspcon->mode = DRM_LSPCON_MODE_INVALID;
+ lspcon->aux = &dp->aux;
+
+ if (!lspcon_probe(lspcon)) {
+ DRM_ERROR("Failed to probe lspcon\n");
+ return false;
+ }
+
+ /*
+ * In the SW state machine, lets Put LSPCON in PCON mode only.
+ * In this way, it will work with both HDMI 1.4 sinks as well as HDMI
+ * 2.0 sinks.
+ */
+ if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) {
+ if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON,
+ true) < 0) {
+ DRM_ERROR("LSPCON mode change to PCON failed\n");
+ return false;
+ }
+ }
+
+ DRM_DEBUG_KMS("Success: LSPCON init\n");
+ return true;
+}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e1d47d51ea47..199b90c7907a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -106,7 +106,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & LVDS_PORT_EN))
goto out;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@@ -396,7 +396,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder =
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
@@ -406,7 +406,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
unsigned int lvds_bpp;
/* Should never happen!! */
- if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
+ if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
DRM_ERROR("Can't support LVDS on pipe A\n");
return false;
}
@@ -431,7 +431,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
pipe_config->has_pch_encoder = true;
intel_pch_panel_fitting(intel_crtc, pipe_config,
@@ -566,7 +566,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
* and as part of the cleanup in the hw state restore we also redisable
* the vga plane.
*/
- if (!HAS_PCH_SPLIT(dev))
+ if (!HAS_PCH_SPLIT(dev_priv))
intel_display_resume(dev);
dev_priv->modeset_restore = MODESET_DONE;
@@ -949,16 +949,17 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
}
-static bool intel_lvds_supported(struct drm_device *dev)
+static bool intel_lvds_supported(struct drm_i915_private *dev_priv)
{
/* With the introduction of the PCH we gained a dedicated
* LVDS presence pin, use it. */
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
return true;
/* Otherwise LVDS was only attached to mobile products,
* except for the inglorious 830gm */
- if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+ if (INTEL_GEN(dev_priv) <= 4 &&
+ IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
return true;
return false;
@@ -990,21 +991,21 @@ void intel_lvds_init(struct drm_device *dev)
int pipe;
u8 pin;
- if (!intel_lvds_supported(dev))
+ if (!intel_lvds_supported(dev_priv))
return;
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds))
return;
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
lvds = I915_READ(lvds_reg);
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
if ((lvds & LVDS_DETECTED) == 0)
return;
if (dev_priv->vbt.edp.support) {
@@ -1064,12 +1065,13 @@ void intel_lvds_init(struct drm_device *dev)
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector_attach_encoder(intel_connector, intel_encoder);
- intel_encoder->type = INTEL_OUTPUT_LVDS;
+ intel_encoder->type = INTEL_OUTPUT_LVDS;
+ intel_encoder->port = PORT_NONE;
intel_encoder->cloneable = 0;
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- else if (IS_GEN4(dev))
+ else if (IS_GEN4(dev_priv))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
else
intel_encoder->crtc_mask = (1 << 1);
@@ -1157,7 +1159,7 @@ void intel_lvds_init(struct drm_device *dev)
*/
/* Ironlake: FIXME if still fail, not try pipe mode now */
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
goto failed;
pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a24bc8c7889f..25bcd4a178d3 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -233,7 +233,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+ struct intel_engine_cs *engine = dev_priv->engine[RCS];
return i915_gem_request_alloc(engine, dev_priv->kernel_context);
}
@@ -1470,6 +1470,8 @@ void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
kfree(dev_priv->overlay);
}
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
struct intel_overlay_error_state {
struct overlay_registers regs;
unsigned long base;
@@ -1587,3 +1589,5 @@ intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
P(UVSCALEV);
#undef P
}
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index db24f898853c..560fc7af8267 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -252,8 +252,8 @@ static const struct cxsr_latency cxsr_latency_table[] = {
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
-static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
- int is_ddr3,
+static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
+ bool is_ddr3,
int fsb,
int mem)
{
@@ -322,11 +322,11 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
struct drm_device *dev = &dev_priv->drm;
u32 val;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
POSTING_READ(FW_BLC_SELF_VLV);
dev_priv->wm.vlv.cxsr = enable;
- } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
+ } else if (IS_G4X(dev_priv) || IS_CRESTLINE(dev_priv)) {
I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
POSTING_READ(FW_BLC_SELF);
} else if (IS_PINEVIEW(dev)) {
@@ -334,12 +334,12 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
I915_WRITE(DSPFW3, val);
POSTING_READ(DSPFW3);
- } else if (IS_I945G(dev) || IS_I945GM(dev)) {
+ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
I915_WRITE(FW_BLC_SELF, val);
POSTING_READ(FW_BLC_SELF);
- } else if (IS_I915GM(dev)) {
+ } else if (IS_I915GM(dev_priv)) {
/*
* FIXME can't find a bit like this for 915G, and
* and yet it does have the related watermark in
@@ -648,8 +648,10 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
u32 reg;
unsigned long wm;
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
- dev_priv->fsb_freq, dev_priv->mem_freq);
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
+ dev_priv->is_ddr3,
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
intel_set_memory_cxsr(dev_priv, false);
@@ -775,13 +777,13 @@ static bool g4x_check_srwm(struct drm_device *dev,
display_wm, cursor_wm);
if (display_wm > display->max_wm) {
- DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
+ DRM_DEBUG_KMS("display watermark is too large(%d/%u), disabling\n",
display_wm, display->max_wm);
return false;
}
if (cursor_wm > cursor->max_wm) {
- DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
+ DRM_DEBUG_KMS("cursor watermark is too large(%d/%u), disabling\n",
cursor_wm, cursor->max_wm);
return false;
}
@@ -1528,7 +1530,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (IS_I945GM(dev))
wm_info = &i945_wm_info;
- else if (!IS_GEN2(dev))
+ else if (!IS_GEN2(dev_priv))
wm_info = &i915_wm_info;
else
wm_info = &i830_a_wm_info;
@@ -1538,7 +1540,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
cpp = 4;
adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
@@ -1552,7 +1554,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
planea_wm = wm_info->max_wm;
}
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
wm_info = &i830_bc_wm_info;
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
@@ -1560,7 +1562,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
cpp = 4;
adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
@@ -1579,7 +1581,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
- if (IS_I915GM(dev) && enabled) {
+ if (IS_I915GM(dev_priv) && enabled) {
struct drm_i915_gem_object *obj;
obj = intel_fb_obj(enabled->primary->state->fb);
@@ -1609,7 +1611,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
unsigned long line_time_us;
int entries;
- if (IS_I915GM(dev) || IS_I945GM(dev))
+ if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
cpp = 4;
line_time_us = max(htotal * 1000 / clock, 1);
@@ -1623,7 +1625,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (srwm < 0)
srwm = 1;
- if (IS_I945G(dev) || IS_I945GM(dev))
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
I915_WRITE(FW_BLC_SELF,
FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
else
@@ -2080,10 +2082,10 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (IS_GEN9(dev)) {
+ if (IS_GEN9(dev_priv)) {
uint32_t val;
int ret, i;
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
@@ -2155,7 +2157,7 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
}
}
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
uint64_t sskpd = I915_READ64(MCH_SSKPD);
wm[0] = (sskpd >> 56) & 0xFF;
@@ -2182,42 +2184,44 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
}
}
-static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
+static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
+ uint16_t wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
- if (IS_GEN5(dev))
+ if (IS_GEN5(dev_priv))
wm[0] = 13;
}
-static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
+static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
+ uint16_t wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
- if (IS_GEN5(dev))
+ if (IS_GEN5(dev_priv))
wm[0] = 13;
/* WaDoubleCursorLP3Latency:ivb */
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
wm[3] *= 2;
}
-int ilk_wm_max_level(const struct drm_device *dev)
+int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
{
/* how many WM levels are we expecting */
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return 7;
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return 4;
- else if (INTEL_INFO(dev)->gen >= 6)
+ else if (INTEL_GEN(dev_priv) >= 6)
return 3;
else
return 2;
}
-static void intel_print_wm_latency(struct drm_device *dev,
+static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
const char *name,
const uint16_t wm[8])
{
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
for (level = 0; level <= max_level; level++) {
unsigned int latency = wm[level];
@@ -2232,7 +2236,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
* - latencies are in us on gen9.
* - before then, WM1+ latency values are in 0.5us units
*/
- if (IS_GEN9(dev))
+ if (IS_GEN9(dev_priv))
latency *= 10;
else if (level > 0)
latency *= 5;
@@ -2246,7 +2250,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
uint16_t wm[5], uint16_t min)
{
- int level, max_level = ilk_wm_max_level(&dev_priv->drm);
+ int level, max_level = ilk_wm_max_level(dev_priv);
if (wm[0] >= min)
return false;
@@ -2275,9 +2279,9 @@ static void snb_wm_latency_quirk(struct drm_device *dev)
return;
DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
- intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
- intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
- intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
}
static void ilk_setup_wm_latency(struct drm_device *dev)
@@ -2291,14 +2295,14 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
sizeof(dev_priv->wm.pri_latency));
- intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
- intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
+ intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
+ intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
- intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
- intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
- intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
snb_wm_latency_quirk(dev);
}
@@ -2307,7 +2311,7 @@ static void skl_setup_wm_latency(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
- intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
+ intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
}
static bool ilk_validate_pipe_wm(struct drm_device *dev,
@@ -2345,7 +2349,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
struct intel_plane_state *pristate = NULL;
struct intel_plane_state *sprstate = NULL;
struct intel_plane_state *curstate = NULL;
- int level, max_level = ilk_wm_max_level(dev), usable_level;
+ int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
struct ilk_wm_maximums max;
pipe_wm = &cstate->wm.ilk.optimal;
@@ -2390,7 +2394,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
pipe_wm->wm[0] = pipe_wm->raw_wm[0];
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
if (!ilk_validate_pipe_wm(dev, pipe_wm))
@@ -2432,7 +2436,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
{
struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(to_i915(dev));
/*
* Start with the final, target watermarks, then combine with the
@@ -2516,11 +2520,11 @@ static void ilk_wm_merge(struct drm_device *dev,
struct intel_pipe_wm *merged)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
int last_enabled_level = max_level;
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
- if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
+ if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
config->num_pipes_active > 1)
last_enabled_level = 0;
@@ -2556,7 +2560,7 @@ static void ilk_wm_merge(struct drm_device *dev,
* What we should check here is whether FBC can be
* enabled sometime later.
*/
- if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
+ if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled &&
intel_fbc_is_active(dev_priv)) {
for (level = 2; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -2577,7 +2581,7 @@ static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return 2 * level;
else
return dev_priv->wm.pri_latency[level];
@@ -2656,7 +2660,7 @@ static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
struct intel_pipe_wm *r1,
struct intel_pipe_wm *r2)
{
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(to_i915(dev));
int level1 = 0, level2 = 0;
for (level = 1; level <= max_level; level++) {
@@ -2801,7 +2805,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
if (dirty & WM_DIRTY_DDB) {
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
val = I915_READ(WM_MISC);
if (results->partitioning == INTEL_DDB_PART_1_2)
val &= ~WM_MISC_DATA_PARTITION_5_6;
@@ -2879,6 +2883,21 @@ skl_wm_plane_id(const struct intel_plane *plane)
}
}
+/*
+ * FIXME: We still don't have the proper code detect if we need to apply the WA,
+ * so assume we'll always need it in order to avoid underruns.
+ */
+static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+
+ if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
+ IS_KABYLAKE(dev_priv))
+ return true;
+
+ return false;
+}
+
static bool
intel_has_sagv(struct drm_i915_private *dev_priv)
{
@@ -2999,9 +3018,12 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
+ struct intel_plane *plane;
+ struct intel_crtc_state *cstate;
+ struct skl_plane_wm *wm;
enum pipe pipe;
- int level, plane;
+ int level, latency;
if (!intel_has_sagv(dev_priv))
return false;
@@ -3019,27 +3041,37 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
/* Since we're now guaranteed to only have one active CRTC... */
pipe = ffs(intel_state->active_crtcs) - 1;
- crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ cstate = to_intel_crtc_state(crtc->base.state);
- if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
return false;
- for_each_plane(dev_priv, pipe, plane) {
+ for_each_intel_plane_on_crtc(dev, crtc, plane) {
+ wm = &cstate->wm.skl.optimal.planes[skl_wm_plane_id(plane)];
+
/* Skip this plane if it's not enabled */
- if (intel_state->wm_results.plane[pipe][plane][0] == 0)
+ if (!wm->wm[0].plane_en)
continue;
/* Find the highest enabled wm level for this plane */
- for (level = ilk_wm_max_level(dev);
- intel_state->wm_results.plane[pipe][plane][level] == 0; --level)
+ for (level = ilk_wm_max_level(dev_priv);
+ !wm->wm[level].plane_en; --level)
{ }
+ latency = dev_priv->wm.skl_latency[level];
+
+ if (skl_needs_memory_bw_wa(intel_state) &&
+ plane->base.state->fb->modifier[0] ==
+ I915_FORMAT_MOD_X_TILED)
+ latency += 15;
+
/*
* If any of the planes on this pipe don't enable wm levels
* that incur memory latencies higher then 30µs we can't enable
* the SAGV
*/
- if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME)
+ if (latency < SKL_SAGV_BLOCK_TIME)
return false;
}
@@ -3058,7 +3090,6 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
struct drm_crtc *for_crtc = cstate->base.crtc;
unsigned int pipe_size, ddb_size;
int nth_active_pipe;
- int pipe = to_intel_crtc(for_crtc)->pipe;
if (WARN_ON(!state) || !cstate->base.active) {
alloc->start = 0;
@@ -3086,7 +3117,7 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
* we currently hold.
*/
if (!intel_state->active_pipe_changes) {
- *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe];
+ *alloc = to_intel_crtc(for_crtc)->hw_ddb;
return;
}
@@ -3173,7 +3204,7 @@ skl_plane_downscale_amount(const struct intel_plane_state *pstate)
src_h = drm_rect_height(&pstate->base.src);
dst_w = drm_rect_width(&pstate->base.dst);
dst_h = drm_rect_height(&pstate->base.dst);
- if (intel_rotation_90_or_270(pstate->base.rotation))
+ if (drm_rotation_90_or_270(pstate->base.rotation))
swap(dst_w, dst_h);
downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
@@ -3204,7 +3235,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
- if (intel_rotation_90_or_270(pstate->rotation))
+ if (drm_rotation_90_or_270(pstate->rotation))
swap(width, height);
/* for planar format */
@@ -3304,7 +3335,7 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
- if (intel_rotation_90_or_270(pstate->rotation))
+ if (drm_rotation_90_or_270(pstate->rotation))
swap(src_w, src_h);
/* Halve UV plane width and height for NV12 */
@@ -3318,7 +3349,7 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
else
plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
- if (intel_rotation_90_or_270(pstate->rotation)) {
+ if (drm_rotation_90_or_270(pstate->rotation)) {
switch (plane_bpp) {
case 1:
min_scanlines = 32;
@@ -3354,7 +3385,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_plane *plane;
struct drm_plane_state *pstate;
enum pipe pipe = intel_crtc->pipe;
- struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
+ struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
uint16_t alloc_size, start, cursor_blocks;
uint16_t *minimum = cstate->wm.skl.minimum_blocks;
uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
@@ -3370,7 +3401,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
if (!cstate->base.active) {
- ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
+ alloc->start = alloc->end = 0;
return 0;
}
@@ -3549,22 +3580,28 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
uint32_t width = 0, height = 0;
uint32_t plane_pixel_rate;
uint32_t y_tile_minimum, y_min_scanlines;
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(cstate->base.state);
+ bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
*enabled = false;
return 0;
}
+ if (apply_memory_bw_wa && fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
+ latency += 15;
+
width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
- if (intel_rotation_90_or_270(pstate->rotation))
+ if (drm_rotation_90_or_270(pstate->rotation))
swap(width, height);
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
- if (intel_rotation_90_or_270(pstate->rotation)) {
+ if (drm_rotation_90_or_270(pstate->rotation)) {
int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(fb->pixel_format, 1) :
drm_format_plane_cpp(fb->pixel_format, 0);
@@ -3576,11 +3613,12 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
case 2:
y_min_scanlines = 8;
break;
- default:
- WARN(1, "Unsupported pixel depth for rotation");
case 4:
y_min_scanlines = 4;
break;
+ default:
+ MISSING_CASE(cpp);
+ return -EINVAL;
}
} else {
y_min_scanlines = 4;
@@ -3606,12 +3644,17 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
plane_blocks_per_line);
y_tile_minimum = plane_blocks_per_line * y_min_scanlines;
+ if (apply_memory_bw_wa)
+ y_tile_minimum *= 2;
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
selected_result = max(method2, y_tile_minimum);
} else {
- if ((ddb_allocation / plane_blocks_per_line) >= 1)
+ if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
+ (plane_bytes_per_line / 512 < 1))
+ selected_result = method2;
+ else if ((ddb_allocation / plane_blocks_per_line) >= 1)
selected_result = min(method1, method2);
else
selected_result = method1;
@@ -3661,67 +3704,52 @@ static int
skl_compute_wm_level(const struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb,
struct intel_crtc_state *cstate,
+ struct intel_plane *intel_plane,
int level,
struct skl_wm_level *result)
{
struct drm_atomic_state *state = cstate->base.state;
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
- struct drm_plane *plane;
- struct intel_plane *intel_plane;
- struct intel_plane_state *intel_pstate;
+ struct drm_plane *plane = &intel_plane->base;
+ struct intel_plane_state *intel_pstate = NULL;
uint16_t ddb_blocks;
enum pipe pipe = intel_crtc->pipe;
int ret;
+ int i = skl_wm_plane_id(intel_plane);
+
+ if (state)
+ intel_pstate =
+ intel_atomic_get_existing_plane_state(state,
+ intel_plane);
/*
- * We'll only calculate watermarks for planes that are actually
- * enabled, so make sure all other planes are set as disabled.
+ * Note: If we start supporting multiple pending atomic commits against
+ * the same planes/CRTC's in the future, plane->state will no longer be
+ * the correct pre-state to use for the calculations here and we'll
+ * need to change where we get the 'unchanged' plane data from.
+ *
+ * For now this is fine because we only allow one queued commit against
+ * a CRTC. Even if the plane isn't modified by this transaction and we
+ * don't have a plane lock, we still have the CRTC's lock, so we know
+ * that no other transactions are racing with us to update it.
*/
- memset(result, 0, sizeof(*result));
-
- for_each_intel_plane_mask(&dev_priv->drm,
- intel_plane,
- cstate->base.plane_mask) {
- int i = skl_wm_plane_id(intel_plane);
-
- plane = &intel_plane->base;
- intel_pstate = NULL;
- if (state)
- intel_pstate =
- intel_atomic_get_existing_plane_state(state,
- intel_plane);
-
- /*
- * Note: If we start supporting multiple pending atomic commits
- * against the same planes/CRTC's in the future, plane->state
- * will no longer be the correct pre-state to use for the
- * calculations here and we'll need to change where we get the
- * 'unchanged' plane data from.
- *
- * For now this is fine because we only allow one queued commit
- * against a CRTC. Even if the plane isn't modified by this
- * transaction and we don't have a plane lock, we still have
- * the CRTC's lock, so we know that no other transactions are
- * racing with us to update it.
- */
- if (!intel_pstate)
- intel_pstate = to_intel_plane_state(plane->state);
+ if (!intel_pstate)
+ intel_pstate = to_intel_plane_state(plane->state);
- WARN_ON(!intel_pstate->base.fb);
+ WARN_ON(!intel_pstate->base.fb);
- ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
+ ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
- ret = skl_compute_plane_wm(dev_priv,
- cstate,
- intel_pstate,
- ddb_blocks,
- level,
- &result->plane_res_b[i],
- &result->plane_res_l[i],
- &result->plane_en[i]);
- if (ret)
- return ret;
- }
+ ret = skl_compute_plane_wm(dev_priv,
+ cstate,
+ intel_pstate,
+ ddb_blocks,
+ level,
+ &result->plane_res_b,
+ &result->plane_res_l,
+ &result->plane_en);
+ if (ret)
+ return ret;
return 0;
}
@@ -3729,32 +3757,28 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
static uint32_t
skl_compute_linetime_wm(struct intel_crtc_state *cstate)
{
+ uint32_t pixel_rate;
+
if (!cstate->base.active)
return 0;
- if (WARN_ON(ilk_pipe_pixel_rate(cstate) == 0))
+ pixel_rate = ilk_pipe_pixel_rate(cstate);
+
+ if (WARN_ON(pixel_rate == 0))
return 0;
return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
- ilk_pipe_pixel_rate(cstate));
+ pixel_rate);
}
static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
struct skl_wm_level *trans_wm /* out */)
{
- struct drm_crtc *crtc = cstate->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane;
-
if (!cstate->base.active)
return;
/* Until we know more, just disable transition WMs */
- for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) {
- int i = skl_wm_plane_id(intel_plane);
-
- trans_wm->plane_en[i] = false;
- }
+ trans_wm->plane_en = false;
}
static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
@@ -3763,77 +3787,34 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
{
struct drm_device *dev = cstate->base.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- int level, max_level = ilk_wm_max_level(dev);
+ struct intel_plane *intel_plane;
+ struct skl_plane_wm *wm;
+ int level, max_level = ilk_wm_max_level(dev_priv);
int ret;
- for (level = 0; level <= max_level; level++) {
- ret = skl_compute_wm_level(dev_priv, ddb, cstate,
- level, &pipe_wm->wm[level]);
- if (ret)
- return ret;
- }
- pipe_wm->linetime = skl_compute_linetime_wm(cstate);
-
- skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
-
- return 0;
-}
-
-static void skl_compute_wm_results(struct drm_device *dev,
- struct skl_pipe_wm *p_wm,
- struct skl_wm_values *r,
- struct intel_crtc *intel_crtc)
-{
- int level, max_level = ilk_wm_max_level(dev);
- enum pipe pipe = intel_crtc->pipe;
- uint32_t temp;
- int i;
-
- for (level = 0; level <= max_level; level++) {
- for (i = 0; i < intel_num_planes(intel_crtc); i++) {
- temp = 0;
-
- temp |= p_wm->wm[level].plane_res_l[i] <<
- PLANE_WM_LINES_SHIFT;
- temp |= p_wm->wm[level].plane_res_b[i];
- if (p_wm->wm[level].plane_en[i])
- temp |= PLANE_WM_EN;
+ /*
+ * We'll only calculate watermarks for planes that are actually
+ * enabled, so make sure all other planes are set as disabled.
+ */
+ memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
- r->plane[pipe][i][level] = temp;
+ for_each_intel_plane_mask(&dev_priv->drm,
+ intel_plane,
+ cstate->base.plane_mask) {
+ wm = &pipe_wm->planes[skl_wm_plane_id(intel_plane)];
+
+ for (level = 0; level <= max_level; level++) {
+ ret = skl_compute_wm_level(dev_priv, ddb, cstate,
+ intel_plane, level,
+ &wm->wm[level]);
+ if (ret)
+ return ret;
}
-
- temp = 0;
-
- temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
- temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR];
-
- if (p_wm->wm[level].plane_en[PLANE_CURSOR])
- temp |= PLANE_WM_EN;
-
- r->plane[pipe][PLANE_CURSOR][level] = temp;
-
+ skl_compute_transition_wm(cstate, &wm->trans_wm);
}
+ pipe_wm->linetime = skl_compute_linetime_wm(cstate);
- /* transition WMs */
- for (i = 0; i < intel_num_planes(intel_crtc); i++) {
- temp = 0;
- temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
- temp |= p_wm->trans_wm.plane_res_b[i];
- if (p_wm->trans_wm.plane_en[i])
- temp |= PLANE_WM_EN;
-
- r->plane_trans[pipe][i] = temp;
- }
-
- temp = 0;
- temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
- temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR];
- if (p_wm->trans_wm.plane_en[PLANE_CURSOR])
- temp |= PLANE_WM_EN;
-
- r->plane_trans[pipe][PLANE_CURSOR] = temp;
-
- r->wm_linetime[pipe] = p_wm->linetime;
+ return 0;
}
static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
@@ -3846,53 +3827,77 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
I915_WRITE(reg, 0);
}
+static void skl_write_wm_level(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ const struct skl_wm_level *level)
+{
+ uint32_t val = 0;
+
+ if (level->plane_en) {
+ val |= PLANE_WM_EN;
+ val |= level->plane_res_b;
+ val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
+ }
+
+ I915_WRITE(reg, val);
+}
+
void skl_write_plane_wm(struct intel_crtc *intel_crtc,
- const struct skl_wm_values *wm,
+ const struct skl_plane_wm *wm,
+ const struct skl_ddb_allocation *ddb,
int plane)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
enum pipe pipe = intel_crtc->pipe;
for (level = 0; level <= max_level; level++) {
- I915_WRITE(PLANE_WM(pipe, plane, level),
- wm->plane[pipe][plane][level]);
+ skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane, level),
+ &wm->wm[level]);
}
- I915_WRITE(PLANE_WM_TRANS(pipe, plane), wm->plane_trans[pipe][plane]);
+ skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane),
+ &wm->trans_wm);
skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane),
- &wm->ddb.plane[pipe][plane]);
+ &ddb->plane[pipe][plane]);
skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane),
- &wm->ddb.y_plane[pipe][plane]);
+ &ddb->y_plane[pipe][plane]);
}
void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
- const struct skl_wm_values *wm)
+ const struct skl_plane_wm *wm,
+ const struct skl_ddb_allocation *ddb)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
enum pipe pipe = intel_crtc->pipe;
for (level = 0; level <= max_level; level++) {
- I915_WRITE(CUR_WM(pipe, level),
- wm->plane[pipe][PLANE_CURSOR][level]);
+ skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
+ &wm->wm[level]);
}
- I915_WRITE(CUR_WM_TRANS(pipe), wm->plane_trans[pipe][PLANE_CURSOR]);
+ skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
- &wm->ddb.plane[pipe][PLANE_CURSOR]);
+ &ddb->plane[pipe][PLANE_CURSOR]);
}
-bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
- const struct skl_ddb_allocation *new,
- enum pipe pipe)
+bool skl_wm_level_equals(const struct skl_wm_level *l1,
+ const struct skl_wm_level *l2)
{
- return new->pipe[pipe].start == old->pipe[pipe].start &&
- new->pipe[pipe].end == old->pipe[pipe].end;
+ if (l1->plane_en != l2->plane_en)
+ return false;
+
+ /* If both planes aren't enabled, the rest shouldn't matter */
+ if (!l1->plane_en)
+ return true;
+
+ return (l1->plane_res_l == l2->plane_res_l &&
+ l1->plane_res_b == l2->plane_res_b);
}
static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
@@ -3902,22 +3907,22 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
}
bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
- const struct skl_ddb_allocation *old,
- const struct skl_ddb_allocation *new,
- enum pipe pipe)
+ struct intel_crtc *intel_crtc)
{
- struct drm_device *dev = state->dev;
- struct intel_crtc *intel_crtc;
- enum pipe otherp;
+ struct drm_crtc *other_crtc;
+ struct drm_crtc_state *other_cstate;
+ struct intel_crtc *other_intel_crtc;
+ const struct skl_ddb_entry *ddb =
+ &to_intel_crtc_state(intel_crtc->base.state)->wm.skl.ddb;
+ int i;
- for_each_intel_crtc(dev, intel_crtc) {
- otherp = intel_crtc->pipe;
+ for_each_crtc_in_state(state, other_crtc, other_cstate, i) {
+ other_intel_crtc = to_intel_crtc(other_crtc);
- if (otherp == pipe)
+ if (other_intel_crtc == intel_crtc)
continue;
- if (skl_ddb_entries_overlap(&new->pipe[pipe],
- &old->pipe[otherp]))
+ if (skl_ddb_entries_overlap(ddb, &other_intel_crtc->hw_ddb))
return true;
}
@@ -3958,7 +3963,7 @@ pipes_modified(struct drm_atomic_state *state)
return ret;
}
-int
+static int
skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
{
struct drm_atomic_state *state = cstate->base.state;
@@ -4076,19 +4081,64 @@ skl_copy_wm_for_pipe(struct skl_wm_values *dst,
struct skl_wm_values *src,
enum pipe pipe)
{
- dst->wm_linetime[pipe] = src->wm_linetime[pipe];
- memcpy(dst->plane[pipe], src->plane[pipe],
- sizeof(dst->plane[pipe]));
- memcpy(dst->plane_trans[pipe], src->plane_trans[pipe],
- sizeof(dst->plane_trans[pipe]));
-
- dst->ddb.pipe[pipe] = src->ddb.pipe[pipe];
memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
sizeof(dst->ddb.y_plane[pipe]));
memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
sizeof(dst->ddb.plane[pipe]));
}
+static void
+skl_print_wm_changes(const struct drm_atomic_state *state)
+{
+ const struct drm_device *dev = state->dev;
+ const struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(state);
+ const struct drm_crtc *crtc;
+ const struct drm_crtc_state *cstate;
+ const struct drm_plane *plane;
+ const struct intel_plane *intel_plane;
+ const struct drm_plane_state *pstate;
+ const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
+ const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+ enum pipe pipe;
+ int id;
+ int i, j;
+
+ for_each_crtc_in_state(state, crtc, cstate, i) {
+ pipe = to_intel_crtc(crtc)->pipe;
+
+ for_each_plane_in_state(state, plane, pstate, j) {
+ const struct skl_ddb_entry *old, *new;
+
+ intel_plane = to_intel_plane(plane);
+ id = skl_wm_plane_id(intel_plane);
+ old = &old_ddb->plane[pipe][id];
+ new = &new_ddb->plane[pipe][id];
+
+ if (intel_plane->pipe != pipe)
+ continue;
+
+ if (skl_ddb_entry_equal(old, new))
+ continue;
+
+ if (id != PLANE_CURSOR) {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:plane %d%c] ddb (%d - %d) -> (%d - %d)\n",
+ plane->base.id, id + 1,
+ pipe_name(pipe),
+ old->start, old->end,
+ new->start, new->end);
+ } else {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:cursor %c] ddb (%d - %d) -> (%d - %d)\n",
+ plane->base.id,
+ pipe_name(pipe),
+ old->start, old->end,
+ new->start, new->end);
+ }
+ }
+ }
+}
+
static int
skl_compute_wm(struct drm_atomic_state *state)
{
@@ -4131,7 +4181,6 @@ skl_compute_wm(struct drm_atomic_state *state)
* no suitable watermark values can be found.
*/
for_each_crtc_in_state(state, crtc, cstate, i) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *intel_cstate =
to_intel_crtc_state(cstate);
@@ -4149,9 +4198,10 @@ skl_compute_wm(struct drm_atomic_state *state)
continue;
intel_cstate->update_wm_pre = true;
- skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc);
}
+ skl_print_wm_changes(state);
+
return 0;
}
@@ -4183,13 +4233,17 @@ static void skl_update_wm(struct drm_crtc *crtc)
int plane;
for (plane = 0; plane < intel_num_planes(intel_crtc); plane++)
- skl_write_plane_wm(intel_crtc, results, plane);
+ skl_write_plane_wm(intel_crtc, &pipe_wm->planes[plane],
+ &results->ddb, plane);
- skl_write_cursor_wm(intel_crtc, results);
+ skl_write_cursor_wm(intel_crtc, &pipe_wm->planes[PLANE_CURSOR],
+ &results->ddb);
}
skl_copy_wm_for_pipe(hw_vals, results, pipe);
+ intel_crtc->hw_ddb = cstate->wm.skl.ddb;
+
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -4268,114 +4322,77 @@ static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-static void skl_pipe_wm_active_state(uint32_t val,
- struct skl_pipe_wm *active,
- bool is_transwm,
- bool is_cursor,
- int i,
- int level)
+static inline void skl_wm_level_from_reg_val(uint32_t val,
+ struct skl_wm_level *level)
{
- bool is_enabled = (val & PLANE_WM_EN) != 0;
-
- if (!is_transwm) {
- if (!is_cursor) {
- active->wm[level].plane_en[i] = is_enabled;
- active->wm[level].plane_res_b[i] =
- val & PLANE_WM_BLOCKS_MASK;
- active->wm[level].plane_res_l[i] =
- (val >> PLANE_WM_LINES_SHIFT) &
- PLANE_WM_LINES_MASK;
- } else {
- active->wm[level].plane_en[PLANE_CURSOR] = is_enabled;
- active->wm[level].plane_res_b[PLANE_CURSOR] =
- val & PLANE_WM_BLOCKS_MASK;
- active->wm[level].plane_res_l[PLANE_CURSOR] =
- (val >> PLANE_WM_LINES_SHIFT) &
- PLANE_WM_LINES_MASK;
- }
- } else {
- if (!is_cursor) {
- active->trans_wm.plane_en[i] = is_enabled;
- active->trans_wm.plane_res_b[i] =
- val & PLANE_WM_BLOCKS_MASK;
- active->trans_wm.plane_res_l[i] =
- (val >> PLANE_WM_LINES_SHIFT) &
- PLANE_WM_LINES_MASK;
- } else {
- active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled;
- active->trans_wm.plane_res_b[PLANE_CURSOR] =
- val & PLANE_WM_BLOCKS_MASK;
- active->trans_wm.plane_res_l[PLANE_CURSOR] =
- (val >> PLANE_WM_LINES_SHIFT) &
- PLANE_WM_LINES_MASK;
- }
- }
+ level->plane_en = val & PLANE_WM_EN;
+ level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
+ level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
+ PLANE_WM_LINES_MASK;
}
-static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
+ struct skl_pipe_wm *out)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
- struct skl_pipe_wm *active = &cstate->wm.skl.optimal;
+ struct intel_plane *intel_plane;
+ struct skl_plane_wm *wm;
enum pipe pipe = intel_crtc->pipe;
- int level, i, max_level;
- uint32_t temp;
-
- max_level = ilk_wm_max_level(dev);
-
- hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
-
- for (level = 0; level <= max_level; level++) {
- for (i = 0; i < intel_num_planes(intel_crtc); i++)
- hw->plane[pipe][i][level] =
- I915_READ(PLANE_WM(pipe, i, level));
- hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level));
- }
+ int level, id, max_level;
+ uint32_t val;
- for (i = 0; i < intel_num_planes(intel_crtc); i++)
- hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
- hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe));
+ max_level = ilk_wm_max_level(dev_priv);
- if (!intel_crtc->active)
- return;
-
- hw->dirty_pipes |= drm_crtc_mask(crtc);
+ for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ id = skl_wm_plane_id(intel_plane);
+ wm = &out->planes[id];
- active->linetime = hw->wm_linetime[pipe];
+ for (level = 0; level <= max_level; level++) {
+ if (id != PLANE_CURSOR)
+ val = I915_READ(PLANE_WM(pipe, id, level));
+ else
+ val = I915_READ(CUR_WM(pipe, level));
- for (level = 0; level <= max_level; level++) {
- for (i = 0; i < intel_num_planes(intel_crtc); i++) {
- temp = hw->plane[pipe][i][level];
- skl_pipe_wm_active_state(temp, active, false,
- false, i, level);
+ skl_wm_level_from_reg_val(val, &wm->wm[level]);
}
- temp = hw->plane[pipe][PLANE_CURSOR][level];
- skl_pipe_wm_active_state(temp, active, false, true, i, level);
- }
- for (i = 0; i < intel_num_planes(intel_crtc); i++) {
- temp = hw->plane_trans[pipe][i];
- skl_pipe_wm_active_state(temp, active, true, false, i, 0);
+ if (id != PLANE_CURSOR)
+ val = I915_READ(PLANE_WM_TRANS(pipe, id));
+ else
+ val = I915_READ(CUR_WM_TRANS(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->trans_wm);
}
- temp = hw->plane_trans[pipe][PLANE_CURSOR];
- skl_pipe_wm_active_state(temp, active, true, true, i, 0);
+ if (!intel_crtc->active)
+ return;
- intel_crtc->wm.active.skl = *active;
+ out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
}
void skl_wm_get_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *cstate;
skl_ddb_get_hw_state(dev_priv, ddb);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- skl_pipe_wm_get_hw_state(crtc);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ intel_crtc = to_intel_crtc(crtc);
+ cstate = to_intel_crtc_state(crtc->state);
+
+ skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
+
+ if (intel_crtc->active) {
+ hw->dirty_pipes |= drm_crtc_mask(crtc);
+ intel_crtc->wm.active.skl = cstate->wm.skl.optimal;
+ }
+ }
if (dev_priv->active_crtcs) {
/* Fully recompute DDB on first atomic commit */
@@ -4402,7 +4419,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
};
hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
memset(active, 0, sizeof(*active));
@@ -4424,7 +4441,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
active->linetime = hw->wm_linetime[pipe];
} else {
- int level, max_level = ilk_wm_max_level(dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
/*
* For inactive pipes, all watermark levels
@@ -4610,10 +4627,10 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
}
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
- else if (IS_IVYBRIDGE(dev))
+ else if (IS_IVYBRIDGE(dev_priv))
hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
@@ -5357,6 +5374,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
uint32_t rc6_mask = 0;
/* 1a: Software RC state - RC0 */
@@ -5378,7 +5396,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
if (HAS_GUC(dev_priv))
@@ -5394,9 +5412,8 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
if (intel_enable_rc6() & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
- /* WaRsUseTimeoutMode */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+ /* WaRsUseTimeoutMode:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE |
@@ -5424,6 +5441,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
static void gen8_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
uint32_t rc6_mask = 0;
/* 1a: Software RC state - RC0 */
@@ -5440,7 +5458,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
if (IS_BROADWELL(dev_priv))
@@ -5500,6 +5518,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
static void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
u32 rc6vids, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
@@ -5533,7 +5552,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -5570,10 +5589,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
- if (ret)
- DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
-
reset_rps(dev_priv, gen6_set_rps);
rc6vids = 0;
@@ -5982,6 +5997,7 @@ static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
u32 gtfifodbg, val, rc6_mode = 0, pcbr;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -6008,7 +6024,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -6070,6 +6086,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
u32 gtfifodbg, val, rc6_mode = 0;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -6109,7 +6126,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
- for_each_engine(engine, dev_priv)
+ for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
@@ -6792,7 +6809,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work)
if (READ_ONCE(dev_priv->rps.enabled))
goto out;
- rcs = &dev_priv->engine[RCS];
+ rcs = dev_priv->engine[RCS];
if (rcs->last_context)
goto out;
@@ -6929,7 +6946,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
* The bit 22 of 0x42004
* The bit 7,8,9 of 0x42020.
*/
- if (IS_IRONLAKE_M(dev)) {
+ if (IS_IRONLAKE_M(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:ilk */
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
@@ -7131,7 +7148,7 @@ static void lpt_init_clock_gating(struct drm_device *dev)
* TODO: this bit should only be enabled when really needed, then
* disabled when not needed anymore in order to save power.
*/
- if (HAS_PCH_LPT_LP(dev))
+ if (HAS_PCH_LPT_LP(dev_priv))
I915_WRITE(SOUTH_DSPCLK_GATE_D,
I915_READ(SOUTH_DSPCLK_GATE_D) |
PCH_LP_PARTITION_LEVEL_DISABLE);
@@ -7146,7 +7163,7 @@ static void lpt_suspend_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (HAS_PCH_LPT_LP(dev)) {
+ if (HAS_PCH_LPT_LP(dev_priv)) {
uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
@@ -7339,7 +7356,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
CHICKEN3_DGMG_DONE_FIX_DISABLE);
/* WaDisablePSDDualDispatchEnable:ivb */
- if (IS_IVB_GT1(dev))
+ if (IS_IVB_GT1(dev_priv))
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
@@ -7355,7 +7372,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
GEN7_WA_L3_CHICKEN_MODE);
- if (IS_IVB_GT1(dev))
+ if (IS_IVB_GT1(dev_priv))
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
else {
@@ -7412,7 +7429,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
snpcr |= GEN6_MBC_SNPCR_MED;
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
- if (!HAS_PCH_NOP(dev))
+ if (!HAS_PCH_NOP(dev_priv))
cpt_init_clock_gating(dev);
gen6_check_mch_setup(dev);
@@ -7549,7 +7566,7 @@ static void g4x_init_clock_gating(struct drm_device *dev)
dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
OVRUNIT_CLOCK_GATE_DISABLE |
OVCUNIT_CLOCK_GATE_DISABLE;
- if (IS_GM45(dev))
+ if (IS_GM45(dev_priv))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
@@ -7655,7 +7672,7 @@ void intel_init_clock_gating(struct drm_device *dev)
void intel_suspend_hw(struct drm_device *dev)
{
- if (HAS_PCH_LPT(dev))
+ if (HAS_PCH_LPT(to_i915(dev)))
lpt_suspend_hw(dev);
}
@@ -7723,7 +7740,7 @@ void intel_init_pm(struct drm_device *dev)
/* For cxsr */
if (IS_PINEVIEW(dev))
i915_pineview_get_mem_freq(dev);
- else if (IS_GEN5(dev))
+ else if (IS_GEN5(dev_priv))
i915_ironlake_get_mem_freq(dev);
/* For FIFO watermark updates */
@@ -7731,12 +7748,12 @@ void intel_init_pm(struct drm_device *dev)
skl_setup_wm_latency(dev);
dev_priv->display.update_wm = skl_update_wm;
dev_priv->display.compute_global_watermarks = skl_compute_wm;
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_setup_wm_latency(dev);
- if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
+ if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] &&
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
- (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
+ (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
dev_priv->display.compute_intermediate_wm =
@@ -7749,14 +7766,14 @@ void intel_init_pm(struct drm_device *dev)
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
}
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
vlv_setup_wm_latency(dev);
dev_priv->display.update_wm = vlv_update_wm;
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
vlv_setup_wm_latency(dev);
dev_priv->display.update_wm = vlv_update_wm;
} else if (IS_PINEVIEW(dev)) {
- if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq)) {
@@ -7770,14 +7787,14 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
- } else if (IS_G4X(dev)) {
+ } else if (IS_G4X(dev_priv)) {
dev_priv->display.update_wm = g4x_update_wm;
- } else if (IS_GEN4(dev)) {
+ } else if (IS_GEN4(dev_priv)) {
dev_priv->display.update_wm = i965_update_wm;
- } else if (IS_GEN3(dev)) {
+ } else if (IS_GEN3(dev_priv)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
- } else if (IS_GEN2(dev)) {
+ } else if (IS_GEN2(dev_priv)) {
if (INTEL_INFO(dev)->num_pipes == 1) {
dev_priv->display.update_wm = i845_update_wm;
dev_priv->display.get_fifo_size = i845_get_fifo_size;
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 108ba1e5d658..271a3e29ff23 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -268,7 +268,7 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
if (dev_priv->psr.link_standby)
@@ -344,7 +344,7 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
* ones. Since by Display design transcoder EDP is tied to port A
* we can safely escape based on the port A.
*/
- if (HAS_DDI(dev) && dig_port->port != PORT_A) {
+ if (HAS_DDI(dev_priv) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
return false;
}
@@ -354,20 +354,20 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
!dev_priv->psr.link_standby) {
DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
return false;
}
- if (IS_HASWELL(dev) &&
+ if (IS_HASWELL(dev_priv) &&
I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
return false;
}
- if (IS_HASWELL(dev) &&
+ if (IS_HASWELL(dev_priv) &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
return false;
@@ -402,7 +402,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->psr.lock);
/* Enable/Re-enable PSR on the host */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
/* On HSW+ after we enable PSR on source it will activate it
* as soon as it match configure idle_frame count. So
* we just actually enable it here on activation time.
@@ -448,7 +448,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
dev_priv->psr.busy_frontbuffer_bits = 0;
- if (HAS_DDI(dev)) {
+ if (HAS_DDI(dev_priv)) {
hsw_psr_setup_vsc(intel_dp);
if (dev_priv->psr.psr2_support) {
@@ -580,7 +580,7 @@ void intel_psr_disable(struct intel_dp *intel_dp)
}
/* Disable PSR on Source */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
hsw_psr_disable(intel_dp);
else
vlv_psr_disable(intel_dp);
@@ -827,17 +827,17 @@ void intel_psr_init(struct drm_device *dev)
/* Per platform default */
if (i915.enable_psr == -1) {
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
i915.enable_psr = 1;
else
i915.enable_psr = 0;
}
/* Set link_standby x link_off defaults */
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
dev_priv->psr.link_standby = false;
- else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
/* On VLV and CHV only standby mode is supported. */
dev_priv->psr.link_standby = true;
else
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ed9955dce156..32786ba199b9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -405,22 +405,6 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
return gen8_emit_pipe_control(req, flags, scratch_addr);
}
-u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- u64 acthd;
-
- if (INTEL_GEN(dev_priv) >= 8)
- acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
- RING_ACTHD_UDW(engine->mmio_base));
- else if (INTEL_GEN(dev_priv) >= 4)
- acthd = I915_READ(RING_ACTHD(engine->mmio_base));
- else
- acthd = I915_READ(ACTHD);
-
- return acthd;
-}
-
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -585,9 +569,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
I915_WRITE_TAIL(engine, ring->tail);
(void)I915_READ_TAIL(engine);
- I915_WRITE_CTL(engine,
- ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
- | RING_VALID);
+ I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
/* If the head is still not zero, the ring is dead */
if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
@@ -851,15 +833,13 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
- /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_DG_MIRROR_FIX_ENABLE);
- /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
/*
@@ -869,10 +849,8 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
*/
}
- /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
- GEN9_ENABLE_YV12_BUGFIX |
GEN9_ENABLE_GPGPU_PREEMPTION);
/* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
@@ -884,9 +862,8 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
- /* WaDisableMaskBasedCammingInRCC:skl,bxt */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ /* WaDisableMaskBasedCammingInRCC:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
@@ -1003,47 +980,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
* until D0 which is the default case so this is equivalent to
* !WaDisablePerCtxtPreemptionGranularityControl:skl
*/
- if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
- }
-
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
- /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
- I915_WRITE(FF_SLICE_CS_CHICKEN2,
- _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
- }
-
- /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
- * involving this register should also be added to WA batch as required.
- */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
- /* WaDisableLSQCROPERFforOCL:skl */
- I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
- GEN8_LQSC_RO_PERF_DIS);
+ I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
/* WaEnableGapsTsvCreditFix:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
- I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
- GEN9_GAPS_TSV_CREDIT_DISABLE));
- }
-
- /* WaDisablePowerCompilerClockGating:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
- WA_SET_BIT_MASKED(HIZ_CHICKEN,
- BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
-
- /* WaBarrierPerformanceFixDisable:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FENCE_DEST_SLM_DISABLE |
- HDC_BARRIER_PERFORMANCE_DISABLE);
-
- /* WaDisableSbeCacheDispatchPortSharing:skl */
- if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
- WA_SET_BIT_MASKED(
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+ GEN9_GAPS_TSV_CREDIT_DISABLE));
/* WaDisableGafsUnitClkGating:skl */
WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
@@ -1284,7 +1226,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *req)
if (ret)
return ret;
- for_each_engine_id(waiter, dev_priv, id) {
+ for_each_engine(waiter, dev_priv, id) {
u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
@@ -1321,7 +1263,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *req)
if (ret)
return ret;
- for_each_engine_id(waiter, dev_priv, id) {
+ for_each_engine(waiter, dev_priv, id) {
u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
@@ -1348,6 +1290,7 @@ static int gen6_signal(struct drm_i915_gem_request *req)
struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret, num_rings;
num_rings = INTEL_INFO(dev_priv)->num_rings;
@@ -1355,7 +1298,7 @@ static int gen6_signal(struct drm_i915_gem_request *req)
if (ret)
return ret;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
i915_reg_t mbox_reg;
if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
@@ -1989,6 +1932,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
struct i915_vma *vma;
GEM_BUG_ON(!is_power_of_2(size));
+ GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
@@ -2146,9 +2090,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv;
- if (!intel_engine_initialized(engine))
- return;
-
dev_priv = engine->i915;
if (engine->buffer) {
@@ -2175,13 +2116,16 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
intel_ring_context_unpin(dev_priv->kernel_context, engine);
engine->i915 = NULL;
+ dev_priv->engine[engine->id] = NULL;
+ kfree(engine);
}
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv) {
+ for_each_engine(engine, dev_priv, id) {
engine->buffer->head = engine->buffer->tail;
engine->buffer->last_retired_head = -1;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ec0b4a0c605d..32b2e6332ccf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -73,13 +73,40 @@ enum intel_engine_hangcheck_action {
#define HANGCHECK_SCORE_RING_HUNG 31
+#define I915_MAX_SLICES 3
+#define I915_MAX_SUBSLICES 3
+
+#define instdone_slice_mask(dev_priv__) \
+ (INTEL_GEN(dev_priv__) == 7 ? \
+ 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
+
+#define instdone_subslice_mask(dev_priv__) \
+ (INTEL_GEN(dev_priv__) == 7 ? \
+ 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
+
+#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
+ for ((slice__) = 0, (subslice__) = 0; \
+ (slice__) < I915_MAX_SLICES; \
+ (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
+ (slice__) += ((subslice__) == 0)) \
+ for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
+ (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
+
+struct intel_instdone {
+ u32 instdone;
+ /* The following exist only in the RCS engine */
+ u32 slice_common;
+ u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+ u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+};
+
struct intel_engine_hangcheck {
u64 acthd;
u32 seqno;
int score;
enum intel_engine_hangcheck_action action;
int deadlock;
- u32 instdone[I915_NUM_INSTDONE_REG];
+ struct intel_instdone instdone;
};
struct intel_ring {
@@ -368,12 +395,6 @@ struct intel_engine_cs {
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
-static inline bool
-intel_engine_initialized(const struct intel_engine_cs *engine)
-{
- return engine->i915 != NULL;
-}
-
static inline unsigned
intel_engine_flag(const struct intel_engine_cs *engine)
{
@@ -394,7 +415,7 @@ intel_engine_sync_index(struct intel_engine_cs *engine,
* vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
*/
- idx = (other - engine) - 1;
+ idx = (other->id - engine->id) - 1;
if (idx < 0)
idx += I915_NUM_ENGINES;
@@ -514,6 +535,8 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
+u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
+
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
{
return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
@@ -521,6 +544,9 @@ static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
int init_workarounds_ring(struct intel_engine_cs *engine);
+void intel_engine_get_instdone(struct intel_engine_cs *engine,
+ struct intel_instdone *instdone);
+
/*
* Arbitrary size for largest possible 'add request' sequence. The code paths
* are complex and variable. Empirical measurement shows that the worst case
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 6c11168facd6..ee56a8756c07 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -288,7 +288,6 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_device *dev = &dev_priv->drm;
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
@@ -304,7 +303,7 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(pdev, VGA_RSRC_LEGACY_IO);
- if (IS_BROADWELL(dev))
+ if (IS_BROADWELL(dev_priv))
gen8_irq_power_well_post_enable(dev_priv,
1 << PIPE_C | 1 << PIPE_B);
}
@@ -2590,20 +2589,19 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
*/
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
{
- struct drm_device *dev = &dev_priv->drm;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
power_domains->initializing = true;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
skl_display_core_init(dev_priv, resume);
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
bxt_display_core_init(dev_priv, resume);
- } else if (IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
mutex_lock(&power_domains->lock);
chv_phy_control_init(dev_priv);
mutex_unlock(&power_domains->lock);
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(dev_priv);
mutex_unlock(&power_domains->lock);
@@ -2758,7 +2756,6 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_device *dev = &dev_priv->drm;
struct device *kdev = &pdev->dev;
pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
@@ -2770,7 +2767,7 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
* so the driver's own RPM reference tracking asserts also work on
* platforms without RPM support.
*/
- if (!HAS_RUNTIME_PM(dev)) {
+ if (!HAS_RUNTIME_PM(dev_priv)) {
pm_runtime_dont_use_autosuspend(kdev);
pm_runtime_get_sync(kdev);
} else {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index c551024d4871..49fb95d03d74 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -251,7 +251,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- if (HAS_PCH_IBX(dev)) {
+ if (HAS_PCH_IBX(dev_priv)) {
I915_WRITE(intel_sdvo->sdvo_reg, val);
POSTING_READ(intel_sdvo->sdvo_reg);
}
@@ -307,7 +307,7 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
static const struct _sdvo_cmd_name {
u8 cmd;
const char *name;
-} sdvo_cmd_names[] = {
+} __attribute__ ((packed)) sdvo_cmd_names[] = {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
@@ -1133,7 +1133,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
pipe_config->pipe_bpp = 8*3;
- if (HAS_PCH_SPLIT(encoder->base.dev))
+ if (HAS_PCH_SPLIT(to_i915(encoder->base.dev)))
pipe_config->has_pch_encoder = true;
/* We need to construct preferred input timings based on our
@@ -1273,7 +1273,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
- if (!HAS_PCH_SPLIT(dev) && crtc_state->limited_color_range)
+ if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
sdvox |= HDMI_COLOR_RANGE_16_235;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
@@ -1286,7 +1286,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
- if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_CPT)
sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
@@ -1296,7 +1296,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
if (INTEL_INFO(dev)->gen >= 4) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
- } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+ IS_G33(dev_priv)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
sdvox |= (crtc_state->pixel_multiplier - 1)
@@ -1339,7 +1340,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
return false;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
@@ -1389,7 +1390,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
* encoder->get_config we so already have a valid pixel multplier on all
* other platfroms.
*/
- if (IS_I915G(dev) || IS_I915GM(dev)) {
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
pipe_config->pixel_multiplier =
((sdvox & SDVO_PORT_MULTIPLY_MASK)
>> SDVO_PORT_MULTIPLY_SHIFT) + 1;
@@ -1595,15 +1596,15 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
{
- struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
uint16_t hotplug;
- if (!I915_HAS_HOTPLUG(dev))
+ if (!I915_HAS_HOTPLUG(dev_priv))
return 0;
/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
* on the line. */
- if (IS_I945G(dev) || IS_I945GM(dev))
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
return 0;
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
@@ -2981,6 +2982,7 @@ bool intel_sdvo_init(struct drm_device *dev,
/* encoder type will be decided later */
intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO;
+ intel_encoder->port = port;
drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
"SDVO %c", port_name(port));
@@ -2996,7 +2998,7 @@ bool intel_sdvo_init(struct drm_device *dev,
}
intel_encoder->compute_config = intel_sdvo_compute_config;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
intel_encoder->disable = pch_disable_sdvo;
intel_encoder->post_disable = pch_post_disable_sdvo;
} else {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 73a521fdf1bd..43d0350856e7 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -208,6 +208,8 @@ skl_update_plane(struct drm_plane *drm_plane,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
+ const struct skl_plane_wm *p_wm =
+ &crtc_state->wm.skl.optimal.planes[plane];
u32 plane_ctl;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr = plane_state->main.offset;
@@ -232,7 +234,7 @@ skl_update_plane(struct drm_plane *drm_plane,
plane_ctl |= skl_plane_ctl_rotation(rotation);
if (wm->dirty_pipes & drm_crtc_mask(crtc))
- skl_write_plane_wm(intel_crtc, wm, plane);
+ skl_write_plane_wm(intel_crtc, p_wm, &wm->ddb, plane);
if (key->flags) {
I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
@@ -289,6 +291,7 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(dplane);
+ struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
@@ -298,7 +301,8 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
*/
if (!dplane->state->visible)
skl_write_plane_wm(to_intel_crtc(crtc),
- &dev_priv->wm.skl_results, plane);
+ &cstate->wm.skl.optimal.planes[plane],
+ &dev_priv->wm.skl_results.ddb, plane);
I915_WRITE(PLANE_CTL(pipe, plane), 0);
@@ -450,7 +454,7 @@ vlv_update_plane(struct drm_plane *dplane,
if (key->flags & I915_SET_COLORKEY_SOURCE)
sprctl |= SP_SOURCE_KEY;
- if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
chv_update_csc(intel_plane, fb->pixel_format);
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
@@ -542,12 +546,12 @@ ivb_update_plane(struct drm_plane *plane,
if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
sprctl |= SPRITE_TILED;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
else
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
/* Sizes are 0 based */
@@ -566,7 +570,7 @@ ivb_update_plane(struct drm_plane *plane,
sprctl |= SPRITE_ROTATE_180;
/* HSW and BDW does this automagically in hardware */
- if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
x += src_w;
y += src_h;
}
@@ -590,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane,
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
else if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
@@ -680,7 +684,7 @@ ilk_update_plane(struct drm_plane *plane,
if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
dvscntr |= DVS_TILED;
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
/* Sizes are 0 based */
@@ -753,7 +757,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
- struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_crtc *crtc = state->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -797,7 +801,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
}
/* setup can_scale, min_scale, max_scale */
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
/* use scaler when colorkey is not required */
if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
can_scale = 1;
@@ -913,7 +917,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
- if (INTEL_INFO(dev)->gen < 9 && (src_w > 2048 || src_h > 2048 ||
+ if (INTEL_GEN(dev_priv) < 9 && (src_w > 2048 || src_h > 2048 ||
width_bytes > 4096 || fb->pitches[0] > 4096)) {
DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
return -EINVAL;
@@ -932,7 +936,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
dst->y1 = crtc_y;
dst->y2 = crtc_y + crtc_h;
- if (INTEL_GEN(dev) >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
ret = skl_check_plane_surface(state);
if (ret)
return ret;
@@ -944,6 +948,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_intel_sprite_colorkey *set = data;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
@@ -955,7 +960,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
- if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
@@ -987,9 +992,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
drm_modeset_backoff(&ctx);
}
- if (ret)
- drm_atomic_state_free(state);
-
+ drm_atomic_state_put(state);
out:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
@@ -1042,10 +1045,12 @@ static uint32_t skl_plane_formats[] = {
int
intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = NULL;
struct intel_plane_state *state = NULL;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
+ unsigned int supported_rotations;
int num_plane_formats;
int ret;
@@ -1073,7 +1078,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->update_plane = ilk_update_plane;
intel_plane->disable_plane = ilk_disable_plane;
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev_priv)) {
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
} else {
@@ -1084,7 +1089,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
case 7:
case 8:
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_IVYBRIDGE(dev_priv)) {
intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
} else {
@@ -1092,7 +1097,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->max_downscale = 1;
}
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_plane->update_plane = vlv_update_plane;
intel_plane->disable_plane = vlv_disable_plane;
@@ -1121,6 +1126,15 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
goto fail;
}
+ if (INTEL_GEN(dev_priv) >= 9) {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_90 |
+ DRM_ROTATE_180 | DRM_ROTATE_270;
+ } else {
+ supported_rotations =
+ DRM_ROTATE_0 | DRM_ROTATE_180;
+ }
+
intel_plane->pipe = pipe;
intel_plane->plane = plane;
intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
@@ -1143,7 +1157,9 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
if (ret)
goto fail;
- intel_create_rotation_property(dev, intel_plane);
+ drm_plane_create_rotation_property(&intel_plane->base,
+ DRM_ROTATE_0,
+ supported_rotations);
drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d960e4866595..7118fb55f57f 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -86,7 +86,8 @@ struct intel_tv {
};
struct video_levels {
- int blank, black, burst;
+ u16 blank, black;
+ u8 burst;
};
struct color_conversion {
@@ -339,34 +340,43 @@ static const struct video_levels component_levels = {
struct tv_mode {
const char *name;
- int clock;
- int refresh; /* in millihertz (for precision) */
+
+ u32 clock;
+ u16 refresh; /* in millihertz (for precision) */
u32 oversample;
- int hsync_end, hblank_start, hblank_end, htotal;
- bool progressive, trilevel_sync, component_only;
- int vsync_start_f1, vsync_start_f2, vsync_len;
- bool veq_ena;
- int veq_start_f1, veq_start_f2, veq_len;
- int vi_end_f1, vi_end_f2, nbr_end;
- bool burst_ena;
- int hburst_start, hburst_len;
- int vburst_start_f1, vburst_end_f1;
- int vburst_start_f2, vburst_end_f2;
- int vburst_start_f3, vburst_end_f3;
- int vburst_start_f4, vburst_end_f4;
+ u8 hsync_end;
+ u16 hblank_start, hblank_end, htotal;
+ bool progressive : 1, trilevel_sync : 1, component_only : 1;
+ u8 vsync_start_f1, vsync_start_f2, vsync_len;
+ bool veq_ena : 1;
+ u8 veq_start_f1, veq_start_f2, veq_len;
+ u8 vi_end_f1, vi_end_f2;
+ u16 nbr_end;
+ bool burst_ena : 1;
+ u8 hburst_start, hburst_len;
+ u8 vburst_start_f1;
+ u16 vburst_end_f1;
+ u8 vburst_start_f2;
+ u16 vburst_end_f2;
+ u8 vburst_start_f3;
+ u16 vburst_end_f3;
+ u8 vburst_start_f4;
+ u16 vburst_end_f4;
/*
* subcarrier programming
*/
- int dda2_size, dda3_size, dda1_inc, dda2_inc, dda3_inc;
+ u16 dda2_size, dda3_size;
+ u8 dda1_inc;
+ u16 dda2_inc, dda3_inc;
u32 sc_reset;
- bool pal_burst;
+ bool pal_burst : 1;
/*
* blank/black levels
*/
const struct video_levels *composite_levels, *svideo_levels;
const struct color_conversion *composite_color, *svideo_color;
const u32 *filter_table;
- int max_srcw;
+ u16 max_srcw;
};
@@ -1095,7 +1105,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
/* Enable two fixes for the chips that need them. */
- if (IS_I915GM(dev))
+ if (IS_I915GM(dev_priv))
tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
set_tv_mode_timings(dev_priv, tv_mode, burst_ena);
@@ -1220,7 +1230,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
* The TV sense state should be cleared to zero on cantiga platform. Otherwise
* the TV is misdetected. This is hardware requirement.
*/
- if (IS_GM45(dev))
+ if (IS_GM45(dev_priv))
tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
@@ -1610,7 +1620,9 @@ intel_tv_init(struct drm_device *dev)
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector_attach_encoder(intel_connector, intel_encoder);
+
intel_encoder->type = INTEL_OUTPUT_TVOUT;
+ intel_encoder->port = PORT_NONE;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->cloneable = 0;
intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index ee2306a79747..e2b188dcf908 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -231,19 +231,21 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
{
struct intel_uncore_forcewake_domain *domain =
container_of(timer, struct intel_uncore_forcewake_domain, timer);
+ struct drm_i915_private *dev_priv = domain->i915;
unsigned long irqflags;
- assert_rpm_device_not_suspended(domain->i915);
+ assert_rpm_device_not_suspended(dev_priv);
- spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (WARN_ON(domain->wake_count == 0))
domain->wake_count++;
- if (--domain->wake_count == 0)
- domain->i915->uncore.funcs.force_wake_put(domain->i915,
- 1 << domain->id);
+ if (--domain->wake_count == 0) {
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
+ dev_priv->uncore.fw_domains_active &= ~domain->mask;
+ }
- spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
return HRTIMER_NORESTART;
}
@@ -254,7 +256,7 @@ void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
int retry_count = 100;
- enum forcewake_domains fw = 0, active_domains;
+ enum forcewake_domains fw, active_domains;
/* Hold uncore.lock across reset to prevent any register access
* with forcewake not set correctly. Wait until all pending
@@ -291,10 +293,7 @@ void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
WARN_ON(active_domains);
- for_each_fw_domain(domain, dev_priv)
- if (domain->wake_count)
- fw |= domain->mask;
-
+ fw = dev_priv->uncore.fw_domains_active;
if (fw)
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
@@ -443,9 +442,6 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
{
struct intel_uncore_forcewake_domain *domain;
- if (!dev_priv->uncore.funcs.force_wake_get)
- return;
-
fw_domains &= dev_priv->uncore.fw_domains;
for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
@@ -453,8 +449,10 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
fw_domains &= ~domain->mask;
}
- if (fw_domains)
+ if (fw_domains) {
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+ dev_priv->uncore.fw_domains_active |= fw_domains;
+ }
}
/**
@@ -509,9 +507,6 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
{
struct intel_uncore_forcewake_domain *domain;
- if (!dev_priv->uncore.funcs.force_wake_put)
- return;
-
fw_domains &= dev_priv->uncore.fw_domains;
for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
@@ -567,13 +562,10 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
{
- struct intel_uncore_forcewake_domain *domain;
-
if (!dev_priv->uncore.funcs.force_wake_get)
return;
- for_each_fw_domain(domain, dev_priv)
- WARN_ON(domain->wake_count);
+ WARN_ON(dev_priv->uncore.fw_domains_active);
}
/* We give fast paths for the really cool registers */
@@ -589,49 +581,146 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
__fwd; \
})
-#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
+static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
+{
+ if (offset < entry->start)
+ return -1;
+ else if (offset > entry->end)
+ return 1;
+ else
+ return 0;
+}
+
+/* Copied and "macroized" from lib/bsearch.c */
+#define BSEARCH(key, base, num, cmp) ({ \
+ unsigned int start__ = 0, end__ = (num); \
+ typeof(base) result__ = NULL; \
+ while (start__ < end__) { \
+ unsigned int mid__ = start__ + (end__ - start__) / 2; \
+ int ret__ = (cmp)((key), (base) + mid__); \
+ if (ret__ < 0) { \
+ end__ = mid__; \
+ } else if (ret__ > 0) { \
+ start__ = mid__ + 1; \
+ } else { \
+ result__ = (base) + mid__; \
+ break; \
+ } \
+ } \
+ result__; \
+})
+
+static enum forcewake_domains
+find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
+{
+ const struct intel_forcewake_range *entry;
+
+ entry = BSEARCH(offset,
+ dev_priv->uncore.fw_domains_table,
+ dev_priv->uncore.fw_domains_table_entries,
+ fw_range_cmp);
-#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x2000, 0x4000) || \
- REG_RANGE((reg), 0x5000, 0x8000) || \
- REG_RANGE((reg), 0xB000, 0x12000) || \
- REG_RANGE((reg), 0x2E000, 0x30000))
+ return entry ? entry->domains : 0;
+}
+
+static void
+intel_fw_table_check(struct drm_i915_private *dev_priv)
+{
+ const struct intel_forcewake_range *ranges;
+ unsigned int num_ranges;
+ s32 prev;
+ unsigned int i;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+ return;
+
+ ranges = dev_priv->uncore.fw_domains_table;
+ if (!ranges)
+ return;
+
+ num_ranges = dev_priv->uncore.fw_domains_table_entries;
+
+ for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
+ WARN_ON_ONCE(prev >= (s32)ranges->start);
+ prev = ranges->start;
+ WARN_ON_ONCE(prev >= (s32)ranges->end);
+ prev = ranges->end;
+ }
+}
-#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x12000, 0x14000) || \
- REG_RANGE((reg), 0x22000, 0x24000) || \
- REG_RANGE((reg), 0x30000, 0x40000))
+#define GEN_FW_RANGE(s, e, d) \
+ { .start = (s), .end = (e), .domains = (d) }
-#define __vlv_reg_read_fw_domains(offset) \
+#define HAS_FWTABLE(dev_priv) \
+ (IS_GEN9(dev_priv) || \
+ IS_CHERRYVIEW(dev_priv) || \
+ IS_VALLEYVIEW(dev_priv))
+
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __vlv_fw_ranges[] = {
+ GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
+};
+
+#define __fwtable_reg_read_fw_domains(offset) \
({ \
enum forcewake_domains __fwd = 0; \
- if (!NEEDS_FORCE_WAKE(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
+ if (NEEDS_FORCE_WAKE((offset))) \
+ __fwd = find_fw_domain(dev_priv, offset); \
__fwd; \
})
+/* *Must* be sorted by offset! See intel_shadow_table_check(). */
static const i915_reg_t gen8_shadowed_regs[] = {
- GEN6_RPNSWREQ,
- GEN6_RC_VIDEO_FREQ,
- RING_TAIL(RENDER_RING_BASE),
- RING_TAIL(GEN6_BSD_RING_BASE),
- RING_TAIL(VEBOX_RING_BASE),
- RING_TAIL(BLT_RING_BASE),
+ RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
+ GEN6_RPNSWREQ, /* 0xA008 */
+ GEN6_RC_VIDEO_FREQ, /* 0xA00C */
+ RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
+ RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
+ RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
/* TODO: Other registers are not yet used */
};
+static void intel_shadow_table_check(void)
+{
+ const i915_reg_t *reg = gen8_shadowed_regs;
+ s32 prev;
+ u32 offset;
+ unsigned int i;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+ return;
+
+ for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
+ offset = i915_mmio_reg_offset(*reg);
+ WARN_ON_ONCE(prev >= (s32)offset);
+ prev = offset;
+ }
+}
+
+static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
+{
+ u32 offset = i915_mmio_reg_offset(*reg);
+
+ if (key < offset)
+ return -1;
+ else if (key > offset)
+ return 1;
+ else
+ return 0;
+}
+
static bool is_gen8_shadowed(u32 offset)
{
- int i;
- for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
- if (offset == gen8_shadowed_regs[i].reg)
- return true;
+ const i915_reg_t *regs = gen8_shadowed_regs;
- return false;
+ return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
+ mmio_reg_cmp);
}
#define __gen8_reg_write_fw_domains(offset) \
@@ -644,143 +733,70 @@ static bool is_gen8_shadowed(u32 offset)
__fwd; \
})
-#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x2000, 0x4000) || \
- REG_RANGE((reg), 0x5200, 0x8000) || \
- REG_RANGE((reg), 0x8300, 0x8500) || \
- REG_RANGE((reg), 0xB000, 0xB480) || \
- REG_RANGE((reg), 0xE000, 0xE800))
-
-#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x8800, 0x8900) || \
- REG_RANGE((reg), 0xD000, 0xD800) || \
- REG_RANGE((reg), 0x12000, 0x14000) || \
- REG_RANGE((reg), 0x1A000, 0x1C000) || \
- REG_RANGE((reg), 0x1E800, 0x1EA00) || \
- REG_RANGE((reg), 0x30000, 0x38000))
-
-#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x4000, 0x5000) || \
- REG_RANGE((reg), 0x8000, 0x8300) || \
- REG_RANGE((reg), 0x8500, 0x8600) || \
- REG_RANGE((reg), 0x9000, 0xB000) || \
- REG_RANGE((reg), 0xF000, 0x10000))
-
-#define __chv_reg_read_fw_domains(offset) \
-({ \
- enum forcewake_domains __fwd = 0; \
- if (!NEEDS_FORCE_WAKE(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
- __fwd; \
-})
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __chv_fw_ranges[] = {
+ GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
+};
-#define __chv_reg_write_fw_domains(offset) \
+#define __fwtable_reg_write_fw_domains(offset) \
({ \
enum forcewake_domains __fwd = 0; \
- if (!NEEDS_FORCE_WAKE(offset) || is_gen8_shadowed(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
- __fwd; \
-})
-
-#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
- REG_RANGE((reg), 0xB00, 0x2000)
-
-#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x2000, 0x2700) || \
- REG_RANGE((reg), 0x3000, 0x4000) || \
- REG_RANGE((reg), 0x5200, 0x8000) || \
- REG_RANGE((reg), 0x8140, 0x8160) || \
- REG_RANGE((reg), 0x8300, 0x8500) || \
- REG_RANGE((reg), 0x8C00, 0x8D00) || \
- REG_RANGE((reg), 0xB000, 0xB480) || \
- REG_RANGE((reg), 0xE000, 0xE900) || \
- REG_RANGE((reg), 0x24400, 0x24800))
-
-#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
- (REG_RANGE((reg), 0x8130, 0x8140) || \
- REG_RANGE((reg), 0x8800, 0x8A00) || \
- REG_RANGE((reg), 0xD000, 0xD800) || \
- REG_RANGE((reg), 0x12000, 0x14000) || \
- REG_RANGE((reg), 0x1A000, 0x1EA00) || \
- REG_RANGE((reg), 0x30000, 0x40000))
-
-#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
- REG_RANGE((reg), 0x9400, 0x9800)
-
-#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
- ((reg) < 0x40000 && \
- !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
- !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
- !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
- !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
-
-#define SKL_NEEDS_FORCE_WAKE(reg) \
- ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
-
-#define __gen9_reg_read_fw_domains(offset) \
-({ \
- enum forcewake_domains __fwd; \
- if (!SKL_NEEDS_FORCE_WAKE(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
- else \
- __fwd = FORCEWAKE_BLITTER; \
+ if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
+ __fwd = find_fw_domain(dev_priv, offset); \
__fwd; \
})
-static const i915_reg_t gen9_shadowed_regs[] = {
- RING_TAIL(RENDER_RING_BASE),
- RING_TAIL(GEN6_BSD_RING_BASE),
- RING_TAIL(VEBOX_RING_BASE),
- RING_TAIL(BLT_RING_BASE),
- GEN6_RPNSWREQ,
- GEN6_RC_VIDEO_FREQ,
- /* TODO: Other registers are not yet used */
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __gen9_fw_ranges[] = {
+ GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
+ GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xb480, 0xbfff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
};
-static bool is_gen9_shadowed(u32 offset)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
- if (offset == gen9_shadowed_regs[i].reg)
- return true;
-
- return false;
-}
-
-#define __gen9_reg_write_fw_domains(offset) \
-({ \
- enum forcewake_domains __fwd; \
- if (!SKL_NEEDS_FORCE_WAKE(offset) || is_gen9_shadowed(offset)) \
- __fwd = 0; \
- else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
- __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
- else \
- __fwd = FORCEWAKE_BLITTER; \
- __fwd; \
-})
-
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
@@ -869,26 +885,30 @@ __gen2_read(64)
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
-static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
- enum forcewake_domains fw_domains)
+static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv)
+ fw_domain_arm_timer(domain);
+
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+ dev_priv->uncore.fw_domains_active |= fw_domains;
+}
+
+static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
+{
if (WARN_ON(!fw_domains))
return;
- /* Ideally GCC would be constant-fold and eliminate this loop */
- for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
- if (domain->wake_count) {
- fw_domains &= ~domain->mask;
- continue;
- }
-
- fw_domain_arm_timer(domain);
- }
+ /* Turn on all requested but inactive supported forcewake domains. */
+ fw_domains &= dev_priv->uncore.fw_domains;
+ fw_domains &= ~dev_priv->uncore.fw_domains_active;
if (fw_domains)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+ ___force_wake_auto(dev_priv, fw_domains);
}
#define __gen6_read(x) \
@@ -903,62 +923,28 @@ gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
GEN6_READ_FOOTER; \
}
-#define __vlv_read(x) \
-static u##x \
-vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
- enum forcewake_domains fw_engine; \
- GEN6_READ_HEADER(x); \
- fw_engine = __vlv_reg_read_fw_domains(offset); \
- if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- val = __raw_i915_read##x(dev_priv, reg); \
- GEN6_READ_FOOTER; \
-}
-
-#define __chv_read(x) \
+#define __fwtable_read(x) \
static u##x \
-chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- fw_engine = __chv_reg_read_fw_domains(offset); \
+ fw_engine = __fwtable_reg_read_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
-#define __gen9_read(x) \
-static u##x \
-gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
- enum forcewake_domains fw_engine; \
- GEN6_READ_HEADER(x); \
- fw_engine = __gen9_reg_read_fw_domains(offset); \
- if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- val = __raw_i915_read##x(dev_priv, reg); \
- GEN6_READ_FOOTER; \
-}
-
-__gen9_read(8)
-__gen9_read(16)
-__gen9_read(32)
-__gen9_read(64)
-__chv_read(8)
-__chv_read(16)
-__chv_read(32)
-__chv_read(64)
-__vlv_read(8)
-__vlv_read(16)
-__vlv_read(32)
-__vlv_read(64)
+__fwtable_read(8)
+__fwtable_read(16)
+__fwtable_read(32)
+__fwtable_read(64)
__gen6_read(8)
__gen6_read(16)
__gen6_read(32)
__gen6_read(64)
-#undef __gen9_read
-#undef __chv_read
-#undef __vlv_read
+#undef __fwtable_read
#undef __gen6_read
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER
@@ -1054,21 +1040,6 @@ gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
GEN6_WRITE_FOOTER; \
}
-#define __hsw_write(x) \
-static void \
-hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
- u32 __fifo_ret = 0; \
- GEN6_WRITE_HEADER; \
- if (NEEDS_FORCE_WAKE(offset)) { \
- __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
- } \
- __raw_i915_write##x(dev_priv, reg, val); \
- if (unlikely(__fifo_ret)) { \
- gen6_gt_check_fifodbg(dev_priv); \
- } \
- GEN6_WRITE_FOOTER; \
-}
-
#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
@@ -1081,51 +1052,30 @@ gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
GEN6_WRITE_FOOTER; \
}
-#define __chv_write(x) \
-static void \
-chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
- enum forcewake_domains fw_engine; \
- GEN6_WRITE_HEADER; \
- fw_engine = __chv_reg_write_fw_domains(offset); \
- if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- __raw_i915_write##x(dev_priv, reg, val); \
- GEN6_WRITE_FOOTER; \
-}
-
-#define __gen9_write(x) \
+#define __fwtable_write(x) \
static void \
-gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
- bool trace) { \
+fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- fw_engine = __gen9_reg_write_fw_domains(offset); \
+ fw_engine = __fwtable_reg_write_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
-__gen9_write(8)
-__gen9_write(16)
-__gen9_write(32)
-__chv_write(8)
-__chv_write(16)
-__chv_write(32)
+__fwtable_write(8)
+__fwtable_write(16)
+__fwtable_write(32)
__gen8_write(8)
__gen8_write(16)
__gen8_write(32)
-__hsw_write(8)
-__hsw_write(16)
-__hsw_write(32)
__gen6_write(8)
__gen6_write(16)
__gen6_write(32)
-#undef __gen9_write
-#undef __chv_write
+#undef __fwtable_write
#undef __gen8_write
-#undef __hsw_write
#undef __gen6_write
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
@@ -1314,6 +1264,13 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
WARN_ON(dev_priv->uncore.fw_domains == 0);
}
+#define ASSIGN_FW_DOMAINS_TABLE(d) \
+{ \
+ dev_priv->uncore.fw_domains_table = \
+ (struct intel_forcewake_range *)(d); \
+ dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
+}
+
void intel_uncore_init(struct drm_i915_private *dev_priv)
{
i915_check_vgpu(dev_priv);
@@ -1327,13 +1284,15 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
switch (INTEL_INFO(dev_priv)->gen) {
default:
case 9:
- ASSIGN_WRITE_MMIO_VFUNCS(gen9);
- ASSIGN_READ_MMIO_VFUNCS(gen9);
+ ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(fwtable);
break;
case 8:
if (IS_CHERRYVIEW(dev_priv)) {
- ASSIGN_WRITE_MMIO_VFUNCS(chv);
- ASSIGN_READ_MMIO_VFUNCS(chv);
+ ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(fwtable);
} else {
ASSIGN_WRITE_MMIO_VFUNCS(gen8);
@@ -1342,14 +1301,11 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
break;
case 7:
case 6:
- if (IS_HASWELL(dev_priv)) {
- ASSIGN_WRITE_MMIO_VFUNCS(hsw);
- } else {
- ASSIGN_WRITE_MMIO_VFUNCS(gen6);
- }
+ ASSIGN_WRITE_MMIO_VFUNCS(gen6);
if (IS_VALLEYVIEW(dev_priv)) {
- ASSIGN_READ_MMIO_VFUNCS(vlv);
+ ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
+ ASSIGN_READ_MMIO_VFUNCS(fwtable);
} else {
ASSIGN_READ_MMIO_VFUNCS(gen6);
}
@@ -1366,6 +1322,10 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
break;
}
+ intel_fw_table_check(dev_priv);
+ if (INTEL_GEN(dev_priv) >= 8)
+ intel_shadow_table_check();
+
if (intel_vgpu_active(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
ASSIGN_READ_MMIO_VFUNCS(vgpu);
@@ -1815,35 +1775,16 @@ static enum forcewake_domains
intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
+ u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
- if (intel_vgpu_active(dev_priv))
- return 0;
-
- switch (INTEL_GEN(dev_priv)) {
- case 9:
- fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- break;
- case 8:
- if (IS_CHERRYVIEW(dev_priv))
- fw_domains = __chv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- else
- fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- break;
- case 7:
- case 6:
- if (IS_VALLEYVIEW(dev_priv))
- fw_domains = __vlv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- else
- fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
- break;
- default:
- MISSING_CASE(INTEL_INFO(dev_priv)->gen);
- case 5: /* forcewake was introduced with gen6 */
- case 4:
- case 3:
- case 2:
- return 0;
+ if (HAS_FWTABLE(dev_priv)) {
+ fw_domains = __fwtable_reg_read_fw_domains(offset);
+ } else if (INTEL_GEN(dev_priv) >= 6) {
+ fw_domains = __gen6_reg_read_fw_domains(offset);
+ } else {
+ WARN_ON(!IS_GEN(dev_priv, 2, 5));
+ fw_domains = 0;
}
WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
@@ -1855,32 +1796,18 @@ static enum forcewake_domains
intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
+ u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
- if (intel_vgpu_active(dev_priv))
- return 0;
-
- switch (INTEL_GEN(dev_priv)) {
- case 9:
- fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
- break;
- case 8:
- if (IS_CHERRYVIEW(dev_priv))
- fw_domains = __chv_reg_write_fw_domains(i915_mmio_reg_offset(reg));
- else
- fw_domains = __gen8_reg_write_fw_domains(i915_mmio_reg_offset(reg));
- break;
- case 7:
- case 6:
+ if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
+ fw_domains = __fwtable_reg_write_fw_domains(offset);
+ } else if (IS_GEN8(dev_priv)) {
+ fw_domains = __gen8_reg_write_fw_domains(offset);
+ } else if (IS_GEN(dev_priv, 6, 7)) {
fw_domains = FORCEWAKE_RENDER;
- break;
- default:
- MISSING_CASE(INTEL_INFO(dev_priv)->gen);
- case 5:
- case 4:
- case 3:
- case 2:
- return 0;
+ } else {
+ WARN_ON(!IS_GEN(dev_priv, 2, 5));
+ fw_domains = 0;
}
WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
@@ -1910,6 +1837,9 @@ intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
WARN_ON(!op);
+ if (intel_vgpu_active(dev_priv))
+ return 0;
+
if (op & FW_REG_READ)
fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index cf83f6507ec8..296f541fbe2f 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -18,6 +18,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
#include <linux/component.h>
#include <linux/iommu.h>
#include <linux/of_address.h>
@@ -83,7 +84,7 @@ static void mtk_atomic_complete(struct mtk_drm_private *private,
drm_atomic_helper_wait_for_vblanks(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
static void mtk_atomic_work(struct work_struct *work)
@@ -110,6 +111,7 @@ static int mtk_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (async)
mtk_atomic_schedule(private, state);
else
@@ -415,7 +417,8 @@ static int mtk_drm_probe(struct platform_device *pdev)
comp_type == MTK_DPI) {
dev_info(dev, "Adding component match for %s\n",
node->full_name);
- component_match_add(dev, &match, compare_of, node);
+ drm_of_component_match_add(dev, &match, compare_of,
+ node);
} else {
struct mtk_ddp_comp *comp;
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index dcf7d11ac380..5e20220ef4c6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver mgag200_bo_driver = {
.ttm_tt_populate = mgag200_ttm_tt_populate,
.ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
.init_mem_type = mgag200_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = mgag200_bo_evict_flags,
.move = NULL,
.verify_access = mgag200_bo_verify_access,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 5127b75dbf40..7250ffc6322f 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -25,9 +25,6 @@ bool hang_debug = false;
MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
module_param_named(hang_debug, hang_debug, bool, 0600);
-struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
-
static const struct adreno_info gpulist[] = {
{
.rev = ADRENO_REV(3, 0, 5, ANY_ID),
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index a54f6e036b4a..07d99bdf7c99 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -311,4 +311,7 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
gpu_write(&gpu->base, reg - 1, data);
}
+struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
+
#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 951c002b05df..cf50d3ec8d1b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -75,15 +75,12 @@ static void mdp5_plane_install_rotation_property(struct drm_device *dev,
!(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP))
return;
- if (!dev->mode_config.rotation_property)
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev,
- DRM_ROTATE_0 | DRM_REFLECT_X | DRM_REFLECT_Y);
-
- if (dev->mode_config.rotation_property)
- drm_object_attach_property(&plane->base,
- dev->mode_config.rotation_property,
- DRM_ROTATE_0);
+ drm_plane_create_rotation_property(plane,
+ DRM_ROTATE_0,
+ DRM_ROTATE_0 |
+ DRM_ROTATE_180 |
+ DRM_REFLECT_X |
+ DRM_REFLECT_Y);
}
/* helper to install properties which are common to planes and crtcs */
@@ -289,6 +286,8 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
plane_enabled(old_state), plane_enabled(state));
if (plane_enabled(state)) {
+ unsigned int rotation;
+
format = to_mdp_format(msm_framebuffer_format(state->fb));
if (MDP_FORMAT_IS_YUV(format) &&
!pipe_supports_yuv(mdp5_plane->caps)) {
@@ -309,8 +308,13 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- hflip = !!(state->rotation & DRM_REFLECT_X);
- vflip = !!(state->rotation & DRM_REFLECT_Y);
+ rotation = drm_rotation_simplify(state->rotation,
+ DRM_ROTATE_0 |
+ DRM_REFLECT_X |
+ DRM_REFLECT_Y);
+ hflip = !!(rotation & DRM_REFLECT_X);
+ vflip = !!(rotation & DRM_REFLECT_Y);
+
if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
(hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
dev_err(plane->dev->dev,
@@ -681,6 +685,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
int pe_top[COMP_MAX], pe_bottom[COMP_MAX];
uint32_t hdecm = 0, vdecm = 0;
uint32_t pix_format;
+ unsigned int rotation;
bool vflip, hflip;
unsigned long flags;
int ret;
@@ -743,8 +748,12 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
config |= get_scale_config(format, src_h, crtc_h, false);
DBG("scale config = %x", config);
- hflip = !!(pstate->rotation & DRM_REFLECT_X);
- vflip = !!(pstate->rotation & DRM_REFLECT_Y);
+ rotation = drm_rotation_simplify(pstate->rotation,
+ DRM_ROTATE_0 |
+ DRM_REFLECT_X |
+ DRM_REFLECT_Y);
+ hflip = !!(rotation & DRM_REFLECT_X);
+ vflip = !!(rotation & DRM_REFLECT_Y);
spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 73bae382eac3..db193f835298 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -141,7 +141,7 @@ static void complete_commit(struct msm_commit *c, bool async)
kms->funcs->complete_commit(kms, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
commit_destroy(c);
}
@@ -256,6 +256,7 @@ int msm_atomic_commit(struct drm_device *dev,
* current layout.
*/
+ drm_atomic_state_get(state);
if (nonblock) {
queue_work(priv->atomic_wq, &c->work);
return 0;
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 663f2b6ef091..3c853733c99a 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -18,6 +18,7 @@
#ifdef CONFIG_DEBUG_FS
#include "msm_drv.h"
#include "msm_gpu.h"
+#include "msm_debugfs.h"
static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
{
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index fb5c0b0a7594..84d38eaea585 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -15,6 +15,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_of.h>
+
#include "msm_drv.h"
#include "msm_debugfs.h"
#include "msm_fence.h"
@@ -919,8 +921,8 @@ static int add_components_mdp(struct device *mdp_dev,
continue;
}
- component_match_add(master_dev, matchptr, compare_of, intf);
-
+ drm_of_component_match_add(master_dev, matchptr, compare_of,
+ intf);
of_node_put(intf);
of_node_put(ep_node);
}
@@ -962,8 +964,8 @@ static int add_display_components(struct device *dev,
put_device(mdp_dev);
/* add the MDP component itself */
- component_match_add(dev, matchptr, compare_of,
- mdp_dev->of_node);
+ drm_of_component_match_add(dev, matchptr, compare_of,
+ mdp_dev->of_node);
} else {
/* MDP4 */
mdp_dev = dev;
@@ -996,7 +998,7 @@ static int add_gpu_components(struct device *dev,
if (!np)
return 0;
- component_match_add(dev, matchptr, compare_of, np);
+ drm_of_component_match_add(dev, matchptr, compare_of, np);
of_node_put(np);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d0da52f2a806..940bf4992fe2 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -217,7 +217,7 @@ void msm_gem_vunmap(struct drm_gem_object *obj);
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive);
void msm_gem_move_to_active(struct drm_gem_object *obj,
- struct msm_gpu *gpu, bool exclusive, struct fence *fence);
+ struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence);
void msm_gem_move_to_inactive(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index a9b9b1c95a2e..3f299c537b77 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -15,7 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include "msm_drv.h"
#include "msm_fence.h"
@@ -32,7 +32,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name)
fctx->dev = dev;
fctx->name = name;
- fctx->context = fence_context_alloc(1);
+ fctx->context = dma_fence_context_alloc(1);
init_waitqueue_head(&fctx->event);
spin_lock_init(&fctx->spinlock);
@@ -100,52 +100,52 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
struct msm_fence {
struct msm_fence_context *fctx;
- struct fence base;
+ struct dma_fence base;
};
-static inline struct msm_fence *to_msm_fence(struct fence *fence)
+static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
{
return container_of(fence, struct msm_fence, base);
}
-static const char *msm_fence_get_driver_name(struct fence *fence)
+static const char *msm_fence_get_driver_name(struct dma_fence *fence)
{
return "msm";
}
-static const char *msm_fence_get_timeline_name(struct fence *fence)
+static const char *msm_fence_get_timeline_name(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
return f->fctx->name;
}
-static bool msm_fence_enable_signaling(struct fence *fence)
+static bool msm_fence_enable_signaling(struct dma_fence *fence)
{
return true;
}
-static bool msm_fence_signaled(struct fence *fence)
+static bool msm_fence_signaled(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
return fence_completed(f->fctx, f->base.seqno);
}
-static void msm_fence_release(struct fence *fence)
+static void msm_fence_release(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
kfree_rcu(f, base.rcu);
}
-static const struct fence_ops msm_fence_ops = {
+static const struct dma_fence_ops msm_fence_ops = {
.get_driver_name = msm_fence_get_driver_name,
.get_timeline_name = msm_fence_get_timeline_name,
.enable_signaling = msm_fence_enable_signaling,
.signaled = msm_fence_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = msm_fence_release,
};
-struct fence *
+struct dma_fence *
msm_fence_alloc(struct msm_fence_context *fctx)
{
struct msm_fence *f;
@@ -156,8 +156,8 @@ msm_fence_alloc(struct msm_fence_context *fctx)
f->fctx = fctx;
- fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
- fctx->context, ++fctx->last_fence);
+ dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
+ fctx->context, ++fctx->last_fence);
return &f->base;
}
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index ceb5b3d314b4..56061aa1959d 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -41,6 +41,6 @@ int msm_queue_fence_cb(struct msm_fence_context *fctx,
struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
-struct fence * msm_fence_alloc(struct msm_fence_context *fctx);
+struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
#endif
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index b6ac27e31929..57db7dbbb618 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -521,7 +521,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
int i, ret;
if (!exclusive) {
@@ -540,7 +540,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
fence = reservation_object_get_excl(msm_obj->resv);
/* don't need to wait on our own fences, since ring is fifo */
if (fence && (fence->context != fctx->context)) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -553,7 +553,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(msm_obj->resv));
if (fence->context != fctx->context) {
- ret = fence_wait(fence, true);
+ ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
@@ -563,7 +563,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
}
void msm_gem_move_to_active(struct drm_gem_object *obj,
- struct msm_gpu *gpu, bool exclusive, struct fence *fence)
+ struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
@@ -616,10 +616,10 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj)
}
#ifdef CONFIG_DEBUG_FS
-static void describe_fence(struct fence *fence, const char *type,
+static void describe_fence(struct dma_fence *fence, const char *type,
struct seq_file *m)
{
- if (!fence_is_signaled(fence))
+ if (!dma_fence_is_signaled(fence))
seq_printf(m, "\t%9s: %s %s seq %u\n", type,
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
@@ -631,7 +631,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct reservation_object *robj = msm_obj->resv;
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index b2f13cfe945e..2cb8551fda70 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -104,7 +104,7 @@ struct msm_gem_submit {
struct list_head node; /* node in gpu submit_list */
struct list_head bo_list;
struct ww_acquire_ctx ticket;
- struct fence *fence;
+ struct dma_fence *fence;
struct pid *pid; /* submitting process */
bool valid; /* true if no cmdstream patching needed */
unsigned int nr_cmds;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index b6a0f37a65f3..25e8786fa4ca 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -60,7 +60,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
void msm_gem_submit_free(struct msm_gem_submit *submit)
{
- fence_put(submit->fence);
+ dma_fence_put(submit->fence);
list_del(&submit->node);
put_pid(submit->pid);
kfree(submit);
@@ -380,7 +380,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_file_private *ctx = file->driver_priv;
struct msm_gem_submit *submit;
struct msm_gpu *gpu = priv->gpu;
- struct fence *in_fence = NULL;
+ struct dma_fence *in_fence = NULL;
struct sync_file *sync_file = NULL;
int out_fence_fd = -1;
unsigned i;
@@ -439,7 +439,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
*/
if (in_fence->context != gpu->fctx->context) {
- ret = fence_wait(in_fence, true);
+ ret = dma_fence_wait(in_fence, true);
if (ret)
goto out;
}
@@ -542,7 +542,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out:
if (in_fence)
- fence_put(in_fence);
+ dma_fence_put(in_fence);
submit_cleanup(submit);
if (ret)
msm_gem_submit_free(submit);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 5bb09838b5ae..3249707e6834 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -476,7 +476,7 @@ static void retire_submits(struct msm_gpu *gpu)
submit = list_first_entry(&gpu->submit_list,
struct msm_gem_submit, node);
- if (fence_is_signaled(submit->fence)) {
+ if (dma_fence_is_signaled(submit->fence)) {
retire_submit(gpu, submit);
} else {
break;
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 2527bf4ca5d9..fde6e3656636 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -22,6 +22,7 @@ nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
nouveau-y += nouveau_drm.o
nouveau-y += nouveau_hwmon.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
+nouveau-$(CONFIG_LEDS_CLASS) += nouveau_led.o
nouveau-y += nouveau_nvif.o
nouveau-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o
nouveau-y += nouveau_usif.o # userspace <-> nvif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
index a47d46dda704..b7a54e605469 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
@@ -6,6 +6,7 @@ enum dcb_gpio_func_name {
DCB_GPIO_TVDAC1 = 0x2d,
DCB_GPIO_FAN = 0x09,
DCB_GPIO_FAN_SENSE = 0x3d,
+ DCB_GPIO_LOGO_LED_PWM = 0x84,
DCB_GPIO_UNUSED = 0xff,
DCB_GPIO_VID0 = 0x04,
DCB_GPIO_VID1 = 0x05,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
index 9cb97477248b..e933d3eede70 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
@@ -1,10 +1,16 @@
#ifndef __NVBIOS_ICCSENSE_H__
#define __NVBIOS_ICCSENSE_H__
+struct pwr_rail_resistor_t {
+ u8 mohm;
+ bool enabled;
+};
+
struct pwr_rail_t {
u8 mode;
u8 extdev_id;
- u8 resistor_mohm;
- u8 rail;
+ u8 resistor_count;
+ struct pwr_rail_resistor_t resistors[3];
+ u16 config;
};
struct nvbios_iccsense {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
index 6633c6db9281..8fa1294c27b7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
@@ -1,6 +1,9 @@
#ifndef __NVBIOS_VMAP_H__
#define __NVBIOS_VMAP_H__
struct nvbios_vmap {
+ u8 max0;
+ u8 max1;
+ u8 max2;
};
u16 nvbios_vmap_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
@@ -8,7 +11,7 @@ u16 nvbios_vmap_parse(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_vmap *);
struct nvbios_vmap_entry {
- u8 unk0;
+ u8 mode;
u8 link;
u32 min;
u32 max;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
index b0df610cec2b..23f3d1b93ebb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
@@ -13,8 +13,9 @@ struct nvbios_volt {
u32 base;
/* GPIO mode */
- u8 vidmask;
- s16 step;
+ bool ranged;
+ u8 vidmask;
+ s16 step;
/* PWM mode */
u32 pwm_freq;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
new file mode 100644
index 000000000000..87f804fc3a88
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
@@ -0,0 +1,24 @@
+#ifndef __NVBIOS_VPSTATE_H__
+#define __NVBIOS_VPSTATE_H__
+struct nvbios_vpstate_header {
+ u32 offset;
+
+ u8 version;
+ u8 hlen;
+ u8 ecount;
+ u8 elen;
+ u8 scount;
+ u8 slen;
+
+ u8 base_id;
+ u8 boost_id;
+ u8 tdp_id;
+};
+struct nvbios_vpstate_entry {
+ u8 pstate;
+ u16 clock_mhz;
+};
+int nvbios_vpstate_parse(struct nvkm_bios *, struct nvbios_vpstate_header *);
+int nvbios_vpstate_entry(struct nvkm_bios *, struct nvbios_vpstate_header *,
+ u8 idx, struct nvbios_vpstate_entry *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index fb54417bc458..e5275f742977 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -6,6 +6,10 @@
struct nvbios_pll;
struct nvkm_pll_vals;
+#define NVKM_CLK_CSTATE_DEFAULT -1 /* POSTed default */
+#define NVKM_CLK_CSTATE_BASE -2 /* pstate base */
+#define NVKM_CLK_CSTATE_HIGHEST -3 /* highest possible */
+
enum nv_clk_src {
nv_clk_src_crystal,
nv_clk_src_href,
@@ -52,6 +56,7 @@ struct nvkm_cstate {
struct list_head head;
u8 voltage;
u32 domain[nv_clk_src_max];
+ u8 id;
};
struct nvkm_pstate {
@@ -67,7 +72,8 @@ struct nvkm_pstate {
struct nvkm_domain {
enum nv_clk_src name;
u8 bios; /* 0xff for none */
-#define NVKM_CLK_DOM_FLAG_CORE 0x01
+#define NVKM_CLK_DOM_FLAG_CORE 0x01
+#define NVKM_CLK_DOM_FLAG_VPSTATE 0x02
u8 flags;
const char *mname;
int mdiv;
@@ -93,10 +99,16 @@ struct nvkm_clk {
int ustate_ac; /* user-requested (-1 disabled, -2 perfmon) */
int ustate_dc; /* user-requested (-1 disabled, -2 perfmon) */
int astate; /* perfmon adjustment (base) */
- int tstate; /* thermal adjustment (max-) */
int dstate; /* display adjustment (min+) */
+ u8 temp;
bool allow_reclock;
+#define NVKM_CLK_BOOST_NONE 0x0
+#define NVKM_CLK_BOOST_BIOS 0x1
+#define NVKM_CLK_BOOST_FULL 0x2
+ u8 boost_mode;
+ u32 base_khz;
+ u32 boost_khz;
/*XXX: die, these are here *only* to support the completely
* bat-shit insane what-was-nouveau_hw.c code
@@ -110,7 +122,7 @@ int nvkm_clk_read(struct nvkm_clk *, enum nv_clk_src);
int nvkm_clk_ustate(struct nvkm_clk *, int req, int pwr);
int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait);
int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel);
-int nvkm_clk_tstate(struct nvkm_clk *, int req, int rel);
+int nvkm_clk_tstate(struct nvkm_clk *, u8 temperature);
int nv04_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
int nv40_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index b765f4ffcde6..08ef9983c643 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -15,12 +15,28 @@ struct nvkm_volt {
u32 max_uv;
u32 min_uv;
+
+ /*
+ * These are fully functional map entries creating a sw ceiling for
+ * the voltage. These all can describe different kind of curves, so
+ * that for any given temperature a different one can return the lowest
+ * value of all three.
+ */
+ u8 max0_id;
+ u8 max1_id;
+ u8 max2_id;
+
+ int speedo;
};
+int nvkm_volt_map(struct nvkm_volt *volt, u8 id, u8 temperature);
+int nvkm_volt_map_min(struct nvkm_volt *volt, u8 id);
int nvkm_volt_get(struct nvkm_volt *);
-int nvkm_volt_set_id(struct nvkm_volt *, u8 id, int condition);
+int nvkm_volt_set_id(struct nvkm_volt *, u8 id, u8 min_id, u8 temp,
+ int condition);
int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+int gf100_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index f5101be806cb..5e2c5685b4dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -232,6 +232,7 @@ nouveau_backlight_init(struct drm_device *dev)
case NV_DEVICE_INFO_V0_TESLA:
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
+ case NV_DEVICE_INFO_V0_MAXWELL:
return nv50_backlight_init(connector);
default:
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 0067586eb015..18eb061ccafb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -31,10 +31,8 @@
#define DCB_LOC_ON_CHIP 0
-#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
-#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
-#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); })
-#define ROM64(x) le64_to_cpu(*(u64 *)&(x))
+#define ROM16(x) get_unaligned_le16(&(x))
+#define ROM32(x) get_unaligned_le32(&(x))
#define ROMPTR(d,x) ({ \
struct nouveau_drm *drm = nouveau_drm((d)); \
ROM16(x) ? &drm->vbios.data[ROM16(x)] : NULL; \
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 343b8659472c..e0c0007689e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -83,13 +83,13 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i)
static void
nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
- struct fence *fence)
+ struct dma_fence *fence)
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (tile) {
spin_lock(&drm->tile.lock);
- tile->fence = (struct nouveau_fence *)fence_get(fence);
+ tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
tile->used = false;
spin_unlock(&drm->tile.lock);
}
@@ -1243,7 +1243,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
- struct fence *fence = reservation_object_get_excl(bo->resv);
+ struct dma_fence *fence = reservation_object_get_excl(bo->resv);
nv10_bo_put_tile_region(dev, *old_tile, fence);
*old_tile = new_tile;
@@ -1561,6 +1561,7 @@ struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
.invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = nouveau_bo_evict_flags,
.move_notify = nouveau_bo_move_ntfy,
.move = nouveau_bo_move,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 3100fd88a015..6adf94789417 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -47,6 +47,7 @@
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
#include "nouveau_vga.h"
+#include "nouveau_led.h"
#include "nouveau_hwmon.h"
#include "nouveau_acpi.h"
#include "nouveau_bios.h"
@@ -475,6 +476,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
nouveau_hwmon_init(dev);
nouveau_accel_init(drm);
nouveau_fbcon_init(dev);
+ nouveau_led_init(dev);
if (nouveau_runtime_pm != 0) {
pm_runtime_use_autosuspend(dev->dev);
@@ -510,6 +512,7 @@ nouveau_drm_unload(struct drm_device *dev)
pm_runtime_forbid(dev->dev);
}
+ nouveau_led_fini(dev);
nouveau_fbcon_fini(dev);
nouveau_accel_fini(drm);
nouveau_hwmon_fini(dev);
@@ -561,6 +564,8 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
struct nouveau_cli *cli;
int ret;
+ nouveau_led_suspend(dev);
+
if (dev->mode_config.num_crtc) {
NV_INFO(drm, "suspending console...\n");
nouveau_fbcon_set_suspend(dev, 1);
@@ -649,6 +654,8 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
nouveau_fbcon_set_suspend(dev, 0);
}
+ nouveau_led_resume(dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 822a0212cd48..c0e2b3207503 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -166,6 +166,9 @@ struct nouveau_drm {
struct nouveau_hwmon *hwmon;
struct nouveau_debugfs *debugfs;
+ /* led management */
+ struct nouveau_led *led;
+
/* display power reference */
bool have_disp_power_ref;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 4bb9ab892ae1..e9529ee6bc23 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -28,7 +28,7 @@
#include <linux/ktime.h>
#include <linux/hrtimer.h>
-#include <trace/events/fence.h>
+#include <trace/events/dma_fence.h>
#include <nvif/cl826e.h>
#include <nvif/notify.h>
@@ -38,11 +38,11 @@
#include "nouveau_dma.h"
#include "nouveau_fence.h"
-static const struct fence_ops nouveau_fence_ops_uevent;
-static const struct fence_ops nouveau_fence_ops_legacy;
+static const struct dma_fence_ops nouveau_fence_ops_uevent;
+static const struct dma_fence_ops nouveau_fence_ops_legacy;
static inline struct nouveau_fence *
-from_fence(struct fence *fence)
+from_fence(struct dma_fence *fence)
{
return container_of(fence, struct nouveau_fence, base);
}
@@ -58,23 +58,23 @@ nouveau_fence_signal(struct nouveau_fence *fence)
{
int drop = 0;
- fence_signal_locked(&fence->base);
+ dma_fence_signal_locked(&fence->base);
list_del(&fence->head);
rcu_assign_pointer(fence->channel, NULL);
- if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
+ if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
if (!--fctx->notify_ref)
drop = 1;
}
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
return drop;
}
static struct nouveau_fence *
-nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) {
+nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm) {
struct nouveau_fence_priv *priv = (void*)drm->fence;
if (fence->ops != &nouveau_fence_ops_legacy &&
@@ -201,7 +201,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
struct nouveau_fence_work {
struct work_struct work;
- struct fence_cb cb;
+ struct dma_fence_cb cb;
void (*func)(void *);
void *data;
};
@@ -214,7 +214,7 @@ nouveau_fence_work_handler(struct work_struct *kwork)
kfree(work);
}
-static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
+static void nouveau_fence_work_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
@@ -222,12 +222,12 @@ static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
}
void
-nouveau_fence_work(struct fence *fence,
+nouveau_fence_work(struct dma_fence *fence,
void (*func)(void *), void *data)
{
struct nouveau_fence_work *work;
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto err;
work = kmalloc(sizeof(*work), GFP_KERNEL);
@@ -245,7 +245,7 @@ nouveau_fence_work(struct fence *fence,
work->func = func;
work->data = data;
- if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
+ if (dma_fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
goto err_free;
return;
@@ -266,17 +266,17 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
fence->timeout = jiffies + (15 * HZ);
if (priv->uevent)
- fence_init(&fence->base, &nouveau_fence_ops_uevent,
- &fctx->lock, fctx->context, ++fctx->sequence);
+ dma_fence_init(&fence->base, &nouveau_fence_ops_uevent,
+ &fctx->lock, fctx->context, ++fctx->sequence);
else
- fence_init(&fence->base, &nouveau_fence_ops_legacy,
- &fctx->lock, fctx->context, ++fctx->sequence);
+ dma_fence_init(&fence->base, &nouveau_fence_ops_legacy,
+ &fctx->lock, fctx->context, ++fctx->sequence);
kref_get(&fctx->fence_ref);
- trace_fence_emit(&fence->base);
+ trace_dma_fence_emit(&fence->base);
ret = fctx->emit(fence);
if (!ret) {
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
spin_lock_irq(&fctx->lock);
if (nouveau_fence_update(chan, fctx))
@@ -298,7 +298,7 @@ nouveau_fence_done(struct nouveau_fence *fence)
struct nouveau_channel *chan;
unsigned long flags;
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
return true;
spin_lock_irqsave(&fctx->lock, flags);
@@ -307,11 +307,11 @@ nouveau_fence_done(struct nouveau_fence *fence)
nvif_notify_put(&fctx->notify);
spin_unlock_irqrestore(&fctx->lock, flags);
}
- return fence_is_signaled(&fence->base);
+ return dma_fence_is_signaled(&fence->base);
}
static long
-nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait)
+nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
{
struct nouveau_fence *fence = from_fence(f);
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
@@ -378,7 +378,7 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
if (!lazy)
return nouveau_fence_wait_busy(fence, intr);
- ret = fence_wait_timeout(&fence->base, intr, 15 * HZ);
+ ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ);
if (ret < 0)
return ret;
else if (!ret)
@@ -391,7 +391,7 @@ int
nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
{
struct nouveau_fence_chan *fctx = chan->fence;
- struct fence *fence;
+ struct dma_fence *fence;
struct reservation_object *resv = nvbo->bo.resv;
struct reservation_object_list *fobj;
struct nouveau_fence *f;
@@ -421,7 +421,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
}
if (must_wait)
- ret = fence_wait(fence, intr);
+ ret = dma_fence_wait(fence, intr);
return ret;
}
@@ -446,7 +446,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
}
if (must_wait)
- ret = fence_wait(fence, intr);
+ ret = dma_fence_wait(fence, intr);
}
return ret;
@@ -456,7 +456,7 @@ void
nouveau_fence_unref(struct nouveau_fence **pfence)
{
if (*pfence)
- fence_put(&(*pfence)->base);
+ dma_fence_put(&(*pfence)->base);
*pfence = NULL;
}
@@ -484,12 +484,12 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
return ret;
}
-static const char *nouveau_fence_get_get_driver_name(struct fence *fence)
+static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
{
return "nouveau";
}
-static const char *nouveau_fence_get_timeline_name(struct fence *f)
+static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -503,7 +503,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f)
* result. The drm node should still be there, so we can derive the index from
* the fence context.
*/
-static bool nouveau_fence_is_signaled(struct fence *f)
+static bool nouveau_fence_is_signaled(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -519,7 +519,7 @@ static bool nouveau_fence_is_signaled(struct fence *f)
return ret;
}
-static bool nouveau_fence_no_signaling(struct fence *f)
+static bool nouveau_fence_no_signaling(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
@@ -530,30 +530,30 @@ static bool nouveau_fence_no_signaling(struct fence *f)
WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1);
/*
- * This needs uevents to work correctly, but fence_add_callback relies on
+ * This needs uevents to work correctly, but dma_fence_add_callback relies on
* being able to enable signaling. It will still get signaled eventually,
* just not right away.
*/
if (nouveau_fence_is_signaled(f)) {
list_del(&fence->head);
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
return false;
}
return true;
}
-static void nouveau_fence_release(struct fence *f)
+static void nouveau_fence_release(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
- fence_free(&fence->base);
+ dma_fence_free(&fence->base);
}
-static const struct fence_ops nouveau_fence_ops_legacy = {
+static const struct dma_fence_ops nouveau_fence_ops_legacy = {
.get_driver_name = nouveau_fence_get_get_driver_name,
.get_timeline_name = nouveau_fence_get_timeline_name,
.enable_signaling = nouveau_fence_no_signaling,
@@ -562,7 +562,7 @@ static const struct fence_ops nouveau_fence_ops_legacy = {
.release = nouveau_fence_release
};
-static bool nouveau_fence_enable_signaling(struct fence *f)
+static bool nouveau_fence_enable_signaling(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
@@ -573,18 +573,18 @@ static bool nouveau_fence_enable_signaling(struct fence *f)
ret = nouveau_fence_no_signaling(f);
if (ret)
- set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags);
+ set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
else if (!--fctx->notify_ref)
nvif_notify_put(&fctx->notify);
return ret;
}
-static const struct fence_ops nouveau_fence_ops_uevent = {
+static const struct dma_fence_ops nouveau_fence_ops_uevent = {
.get_driver_name = nouveau_fence_get_get_driver_name,
.get_timeline_name = nouveau_fence_get_timeline_name,
.enable_signaling = nouveau_fence_enable_signaling,
.signaled = nouveau_fence_is_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = NULL
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 64c4ce7115ad..41f3c019e534 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -1,14 +1,14 @@
#ifndef __NOUVEAU_FENCE_H__
#define __NOUVEAU_FENCE_H__
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <nvif/notify.h>
struct nouveau_drm;
struct nouveau_bo;
struct nouveau_fence {
- struct fence base;
+ struct dma_fence base;
struct list_head head;
@@ -24,7 +24,7 @@ void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
bool nouveau_fence_done(struct nouveau_fence *);
-void nouveau_fence_work(struct fence *, void (*)(void *), void *);
+void nouveau_fence_work(struct dma_fence *, void (*)(void *), void *);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 72e2399bce39..7f083c95f422 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -119,7 +119,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
struct reservation_object *resv = nvbo->bo.resv;
struct reservation_object_list *fobj;
- struct fence *fence = NULL;
+ struct dma_fence *fence = NULL;
fobj = reservation_object_get_list(resv);
@@ -861,6 +861,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
struct nouveau_bo *nvbo;
bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
+ long lret;
int ret;
gem = drm_gem_object_lookup(file_priv, req->handle);
@@ -868,19 +869,15 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- if (no_wait)
- ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY;
- else {
- long lret;
+ lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true,
+ no_wait ? 0 : 30 * HZ);
+ if (!lret)
+ ret = -EBUSY;
+ else if (lret > 0)
+ ret = 0;
+ else
+ ret = lret;
- lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ);
- if (!lret)
- ret = -EBUSY;
- else if (lret > 0)
- ret = 0;
- else
- ret = lret;
- }
nouveau_bo_sync_for_cpu(nvbo);
drm_gem_object_unreference_unlocked(gem);
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.c b/drivers/gpu/drm/nouveau/nouveau_led.c
new file mode 100644
index 000000000000..3e2f1b6cd4df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_led.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2016 Martin Peres
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Martin Peres <martin.peres@free.fr>
+ */
+
+#include <linux/leds.h>
+
+#include "nouveau_led.h"
+#include <nvkm/subdev/gpio.h>
+
+static enum led_brightness
+nouveau_led_get_brightness(struct led_classdev *led)
+{
+ struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nvif_object *device = &drm->device.object;
+ u32 div, duty;
+
+ div = nvif_rd32(device, 0x61c880) & 0x00ffffff;
+ duty = nvif_rd32(device, 0x61c884) & 0x00ffffff;
+
+ if (div > 0)
+ return duty * LED_FULL / div;
+ else
+ return 0;
+}
+
+static void
+nouveau_led_set_brightness(struct led_classdev *led, enum led_brightness value)
+{
+ struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nvif_object *device = &drm->device.object;
+
+ u32 input_clk = 27e6; /* PDISPLAY.SOR[1].PWM is connected to the crystal */
+ u32 freq = 100; /* this is what nvidia uses and it should be good-enough */
+ u32 div, duty;
+
+ div = input_clk / freq;
+ duty = value * div / LED_FULL;
+
+ /* for now, this is safe to directly poke those registers because:
+ * - A: nvidia never puts the logo led to any other PWM controler
+ * than PDISPLAY.SOR[1].PWM.
+ * - B: nouveau does not touch these registers anywhere else
+ */
+ nvif_wr32(device, 0x61c880, div);
+ nvif_wr32(device, 0x61c884, 0xc0000000 | duty);
+}
+
+
+int
+nouveau_led_init(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
+ struct dcb_gpio_func logo_led;
+ int ret;
+
+ if (!gpio)
+ return 0;
+
+ /* check that there is a GPIO controlling the logo LED */
+ if (nvkm_gpio_find(gpio, 0, DCB_GPIO_LOGO_LED_PWM, 0xff, &logo_led))
+ return 0;
+
+ drm->led = kzalloc(sizeof(*drm->led), GFP_KERNEL);
+ if (!drm->led)
+ return -ENOMEM;
+ drm->led->dev = dev;
+
+ drm->led->led.name = "nvidia-logo";
+ drm->led->led.max_brightness = 255;
+ drm->led->led.brightness_get = nouveau_led_get_brightness;
+ drm->led->led.brightness_set = nouveau_led_set_brightness;
+
+ ret = led_classdev_register(dev->dev, &drm->led->led);
+ if (ret) {
+ kfree(drm->led);
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nouveau_led_suspend(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (drm->led)
+ led_classdev_suspend(&drm->led->led);
+}
+
+void
+nouveau_led_resume(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (drm->led)
+ led_classdev_resume(&drm->led->led);
+}
+
+void
+nouveau_led_fini(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (drm->led) {
+ led_classdev_unregister(&drm->led->led);
+ kfree(drm->led);
+ drm->led = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.h b/drivers/gpu/drm/nouveau/nouveau_led.h
new file mode 100644
index 000000000000..187ecdb82002
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_led.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2015 Martin Peres
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@free.fr>
+ */
+
+#ifndef __NOUVEAU_LED_H__
+#define __NOUVEAU_LED_H__
+
+#include "nouveau_drv.h"
+
+struct led_classdev;
+
+struct nouveau_led {
+ struct drm_device *dev;
+
+ struct led_classdev led;
+};
+
+static inline struct nouveau_led *
+nouveau_led(struct drm_device *dev)
+{
+ return nouveau_drm(dev)->led;
+}
+
+/* nouveau_led.c */
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
+int nouveau_led_init(struct drm_device *dev);
+void nouveau_led_suspend(struct drm_device *dev);
+void nouveau_led_resume(struct drm_device *dev);
+void nouveau_led_fini(struct drm_device *dev);
+#else
+static inline int nouveau_led_init(struct drm_device *dev) { return 0; };
+static inline void nouveau_led_suspend(struct drm_device *dev) { };
+static inline void nouveau_led_resume(struct drm_device *dev) { };
+static inline void nouveau_led_fini(struct drm_device *dev) { };
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index 1915b7b82a59..fa8f2375c398 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -110,6 +110,6 @@ nv04_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv04_fence_context_new;
priv->base.context_del = nv04_fence_context_del;
priv->base.contexts = 15;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 4e3de34ff6f4..f99fcf56928a 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -107,7 +107,7 @@ nv10_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv10_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
priv->base.contexts = 31;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 7d5e562a55c5..79bc01111351 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -126,7 +126,7 @@ nv17_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv17_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
priv->base.contexts = 31;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 4d6f202b7770..8c5295414578 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -97,7 +97,7 @@ nv50_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv50_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
priv->base.contexts = 127;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
spin_lock_init(&priv->lock);
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 18bde9d8e6d6..23ef04b4e0b2 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -229,7 +229,7 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_del = nv84_fence_context_del;
priv->base.contexts = fifo->nr;
- priv->base.context_base = fence_context_alloc(priv->base.contexts);
+ priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
priv->base.uevent = true;
/* Use VRAM if there is any ; otherwise fallback to system memory */
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
index 34ecd4a7e0c1..058ff46b5f16 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -20,6 +20,7 @@
* DEALINGS IN THE SOFTWARE.
*/
#include <core/device.h>
+#include <core/firmware.h>
/**
* nvkm_firmware_get - load firmware from the official nvidia/chip/ directory
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 7218a067a6c5..53d171729353 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1357,7 +1357,7 @@ nvc0_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1394,7 +1394,7 @@ nvc1_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gt215_disp_new,
.dma = gf100_dma_new,
@@ -1430,7 +1430,7 @@ nvc3_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gt215_disp_new,
.dma = gf100_dma_new,
@@ -1466,7 +1466,7 @@ nvc4_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1503,7 +1503,7 @@ nvc8_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1540,7 +1540,7 @@ nvce_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.ce[1] = gf100_ce_new,
.disp = gt215_disp_new,
@@ -1577,7 +1577,7 @@ nvcf_chipset = {
.pmu = gf100_pmu_new,
.therm = gt215_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gt215_disp_new,
.dma = gf100_dma_new,
@@ -1612,6 +1612,7 @@ nvd7_chipset = {
.pci = gf106_pci_new,
.therm = gf119_therm_new,
.timer = nv41_timer_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gf119_disp_new,
.dma = gf119_dma_new,
@@ -1647,7 +1648,7 @@ nvd9_chipset = {
.pmu = gf119_pmu_new,
.therm = gf119_therm_new,
.timer = nv41_timer_new,
- .volt = nv40_volt_new,
+ .volt = gf100_volt_new,
.ce[0] = gf100_ce_new,
.disp = gf119_disp_new,
.dma = gf119_dma_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index 62ad0300cfa5..0030cd9543b2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1665,14 +1665,31 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
*pdevice = &pdev->device;
pdev->pdev = pci_dev;
- return nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
- pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
- pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
- NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
- (u64)pci_domain_nr(pci_dev->bus) << 32 |
- pci_dev->bus->number << 16 |
- PCI_SLOT(pci_dev->devfn) << 8 |
- PCI_FUNC(pci_dev->devfn), name,
- cfg, dbg, detect, mmio, subdev_mask,
- &pdev->device);
+ ret = nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
+ pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
+ pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
+ NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
+ (u64)pci_domain_nr(pci_dev->bus) << 32 |
+ pci_dev->bus->number << 16 |
+ PCI_SLOT(pci_dev->devfn) << 8 |
+ PCI_FUNC(pci_dev->devfn), name,
+ cfg, dbg, detect, mmio, subdev_mask,
+ &pdev->device);
+
+ if (ret)
+ return ret;
+
+ /*
+ * Set a preliminary DMA mask based on the .dma_bits member of the
+ * MMU subdevice. This allows other subdevices to create DMA mappings
+ * in their init() or oneinit() methods, which may be called before the
+ * TTM layer sets the DMA mask definitively.
+ * This is necessary for platforms where the default DMA mask of 32
+ * does not cover any system memory, i.e., when all RAM is > 4 GB.
+ */
+ if (subdev_mask & BIT(NVKM_SUBDEV_MMU))
+ dma_set_mask_and_coherent(&pci_dev->dev,
+ DMA_BIT_MASK(pdev->device.mmu->dma_bits));
+
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
index 1bb9d661e9b3..4510cb6e10a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
@@ -45,14 +45,6 @@ static const struct nvkm_output_func
g94_sor_output_func = {
};
-int
-g94_sor_output_new(struct nvkm_disp *disp, int index,
- struct dcb_output *dcbE, struct nvkm_output **poutp)
-{
- return nvkm_output_new_(&g94_sor_output_func, disp,
- index, dcbE, poutp);
-}
-
/*******************************************************************************
* DisplayPort
******************************************************************************/
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index f1e15a4d4f64..b4e3c50badc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -187,6 +187,7 @@ nv30_gr = {
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
{ -1, -1, 0x0397, &nv04_gr_object }, /* rankine */
+ { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
{}
}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index 300f5ed5de0b..e7ed04b935cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -123,6 +123,7 @@ nv34_gr = {
{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
+ { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
{ -1, -1, 0x0697, &nv04_gr_object }, /* rankine */
{}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
index 740df0f52c38..5e8abacbacc6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
@@ -124,6 +124,7 @@ nv35_gr = {
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
{ -1, -1, 0x0497, &nv04_gr_object }, /* rankine */
+ { -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
{}
}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 370dcd8ff7b5..6eff637ac301 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -84,7 +84,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
start = 0x0100000000ULL;
limit = start + device->func->resource_size(device, 3);
- ret = nvkm_vm_new(device, start, limit, start, &bar3_lock, &vm);
+ ret = nvkm_vm_new(device, start, limit - start, start, &bar3_lock, &vm);
if (ret)
return ret;
@@ -117,7 +117,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
start = 0x0000000000ULL;
limit = start + device->func->resource_size(device, 1);
- ret = nvkm_vm_new(device, start, limit--, start, &bar1_lock, &vm);
+ ret = nvkm_vm_new(device, start, limit-- - start, start, &bar1_lock, &vm);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
index dbcb0ef21587..be57220a2e01 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
@@ -31,6 +31,7 @@ nvkm-y += nvkm/subdev/bios/timing.o
nvkm-y += nvkm/subdev/bios/therm.o
nvkm-y += nvkm/subdev/bios/vmap.o
nvkm-y += nvkm/subdev/bios/volt.o
+nvkm-y += nvkm/subdev/bios/vpstate.o
nvkm-y += nvkm/subdev/bios/xpio.o
nvkm-y += nvkm/subdev/bios/M0203.o
nvkm-y += nvkm/subdev/bios/M0205.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
index 084328028af1..aafd5e17b1c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
@@ -23,6 +23,7 @@
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
+#include <subdev/bios/extdev.h>
#include <subdev/bios/iccsense.h>
static u16
@@ -77,23 +78,47 @@ nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense)
return -ENOMEM;
for (i = 0; i < cnt; ++i) {
+ struct nvbios_extdev_func extdev;
struct pwr_rail_t *rail = &iccsense->rail[i];
+ u8 res_start = 0;
+ int r;
+
entry = table + hdr + i * len;
switch(ver) {
case 0x10:
rail->mode = nvbios_rd08(bios, entry + 0x1);
rail->extdev_id = nvbios_rd08(bios, entry + 0x2);
- rail->resistor_mohm = nvbios_rd08(bios, entry + 0x3);
- rail->rail = nvbios_rd08(bios, entry + 0x4);
+ res_start = 0x3;
break;
case 0x20:
rail->mode = nvbios_rd08(bios, entry);
rail->extdev_id = nvbios_rd08(bios, entry + 0x1);
- rail->resistor_mohm = nvbios_rd08(bios, entry + 0x5);
- rail->rail = nvbios_rd08(bios, entry + 0x6);
+ res_start = 0x5;
+ break;
+ };
+
+ if (nvbios_extdev_parse(bios, rail->extdev_id, &extdev))
+ continue;
+
+ switch (extdev.type) {
+ case NVBIOS_EXTDEV_INA209:
+ case NVBIOS_EXTDEV_INA219:
+ rail->resistor_count = 1;
+ break;
+ case NVBIOS_EXTDEV_INA3221:
+ rail->resistor_count = 3;
+ break;
+ default:
+ rail->resistor_count = 0;
break;
};
+
+ for (r = 0; r < rail->resistor_count; ++r) {
+ rail->resistors[r].mohm = nvbios_rd08(bios, entry + res_start + r * 2);
+ rail->resistors[r].enabled = !(nvbios_rd08(bios, entry + res_start + r * 2 + 1) & 0x40);
+ }
+ rail->config = nvbios_rd16(bios, entry + res_start + rail->resistor_count * 2);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
index 2f13db745948..32bd8b1d154f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
@@ -61,7 +61,17 @@ nvbios_vmap_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
memset(info, 0x00, sizeof(*info));
switch (!!vmap * *ver) {
case 0x10:
+ info->max0 = 0xff;
+ info->max1 = 0xff;
+ info->max2 = 0xff;
+ break;
case 0x20:
+ info->max0 = nvbios_rd08(bios, vmap + 0x7);
+ info->max1 = nvbios_rd08(bios, vmap + 0x8);
+ if (*len >= 0xc)
+ info->max2 = nvbios_rd08(bios, vmap + 0xc);
+ else
+ info->max2 = 0xff;
break;
}
return vmap;
@@ -95,7 +105,7 @@ nvbios_vmap_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
info->arg[2] = nvbios_rd32(bios, vmap + 0x10);
break;
case 0x20:
- info->unk0 = nvbios_rd08(bios, vmap + 0x00);
+ info->mode = nvbios_rd08(bios, vmap + 0x00);
info->link = nvbios_rd08(bios, vmap + 0x01);
info->min = nvbios_rd32(bios, vmap + 0x02);
info->max = nvbios_rd32(bios, vmap + 0x06);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
index 6e0a33648be9..4504822ace51 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
@@ -75,20 +75,24 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
case 0x12:
info->type = NVBIOS_VOLT_GPIO;
info->vidmask = nvbios_rd08(bios, volt + 0x04);
+ info->ranged = false;
break;
case 0x20:
info->type = NVBIOS_VOLT_GPIO;
info->vidmask = nvbios_rd08(bios, volt + 0x05);
+ info->ranged = false;
break;
case 0x30:
info->type = NVBIOS_VOLT_GPIO;
info->vidmask = nvbios_rd08(bios, volt + 0x04);
+ info->ranged = false;
break;
case 0x40:
info->type = NVBIOS_VOLT_GPIO;
info->base = nvbios_rd32(bios, volt + 0x04);
info->step = nvbios_rd16(bios, volt + 0x08);
info->vidmask = nvbios_rd08(bios, volt + 0x0b);
+ info->ranged = true; /* XXX: find the flag byte */
/*XXX*/
info->min = 0;
info->max = info->base;
@@ -104,9 +108,11 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
info->pwm_freq = nvbios_rd32(bios, volt + 0x5) / 1000;
info->pwm_range = nvbios_rd32(bios, volt + 0x16);
} else {
- info->type = NVBIOS_VOLT_GPIO;
- info->vidmask = nvbios_rd08(bios, volt + 0x06);
- info->step = nvbios_rd16(bios, volt + 0x16);
+ info->type = NVBIOS_VOLT_GPIO;
+ info->vidmask = nvbios_rd08(bios, volt + 0x06);
+ info->step = nvbios_rd16(bios, volt + 0x16);
+ info->ranged =
+ !!(nvbios_rd08(bios, volt + 0x4) & 0x2);
}
break;
}
@@ -142,7 +148,10 @@ nvbios_volt_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
info->vid = nvbios_rd08(bios, volt + 0x01) >> 2;
break;
case 0x40:
+ break;
case 0x50:
+ info->voltage = nvbios_rd32(bios, volt) & 0x001fffff;
+ info->vid = (nvbios_rd32(bios, volt) >> 23) & 0xff;
break;
}
return volt;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
new file mode 100644
index 000000000000..f199270163d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2016 Karol Herbst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Karol Herbst
+ */
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/vpstate.h>
+
+static u32
+nvbios_vpstate_offset(struct nvkm_bios *b)
+{
+ struct bit_entry bit_P;
+
+ if (!bit_entry(b, 'P', &bit_P)) {
+ if (bit_P.version == 2)
+ return nvbios_rd32(b, bit_P.offset + 0x38);
+ }
+
+ return 0x0000;
+}
+
+int
+nvbios_vpstate_parse(struct nvkm_bios *b, struct nvbios_vpstate_header *h)
+{
+ if (!h)
+ return -EINVAL;
+
+ h->offset = nvbios_vpstate_offset(b);
+ if (!h->offset)
+ return -ENODEV;
+
+ h->version = nvbios_rd08(b, h->offset);
+ switch (h->version) {
+ case 0x10:
+ h->hlen = nvbios_rd08(b, h->offset + 0x1);
+ h->elen = nvbios_rd08(b, h->offset + 0x2);
+ h->slen = nvbios_rd08(b, h->offset + 0x3);
+ h->scount = nvbios_rd08(b, h->offset + 0x4);
+ h->ecount = nvbios_rd08(b, h->offset + 0x5);
+
+ h->base_id = nvbios_rd08(b, h->offset + 0x0f);
+ h->boost_id = nvbios_rd08(b, h->offset + 0x10);
+ h->tdp_id = nvbios_rd08(b, h->offset + 0x11);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int
+nvbios_vpstate_entry(struct nvkm_bios *b, struct nvbios_vpstate_header *h,
+ u8 idx, struct nvbios_vpstate_entry *e)
+{
+ u32 offset;
+
+ if (!e || !h || idx > h->ecount)
+ return -EINVAL;
+
+ offset = h->offset + h->hlen + idx * (h->elen + (h->slen * h->scount));
+ e->pstate = nvbios_rd08(b, offset);
+ e->clock_mhz = nvbios_rd16(b, offset + 0x5);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index 7102c25320fc..fa1c12185e19 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -27,6 +27,7 @@
#include <subdev/bios/boost.h>
#include <subdev/bios/cstep.h>
#include <subdev/bios/perf.h>
+#include <subdev/bios/vpstate.h>
#include <subdev/fb.h>
#include <subdev/therm.h>
#include <subdev/volt.h>
@@ -74,6 +75,88 @@ nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
/******************************************************************************
* C-States
*****************************************************************************/
+static bool
+nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
+ u32 max_volt, int temp)
+{
+ const struct nvkm_domain *domain = clk->domains;
+ struct nvkm_volt *volt = clk->subdev.device->volt;
+ int voltage;
+
+ while (domain && domain->name != nv_clk_src_max) {
+ if (domain->flags & NVKM_CLK_DOM_FLAG_VPSTATE) {
+ u32 freq = cstate->domain[domain->name];
+ switch (clk->boost_mode) {
+ case NVKM_CLK_BOOST_NONE:
+ if (clk->base_khz && freq > clk->base_khz)
+ return false;
+ case NVKM_CLK_BOOST_BIOS:
+ if (clk->boost_khz && freq > clk->boost_khz)
+ return false;
+ }
+ }
+ domain++;
+ }
+
+ if (!volt)
+ return true;
+
+ voltage = nvkm_volt_map(volt, cstate->voltage, temp);
+ if (voltage < 0)
+ return false;
+ return voltage <= min(max_volt, volt->max_uv);
+}
+
+static struct nvkm_cstate *
+nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
+ struct nvkm_cstate *start)
+{
+ struct nvkm_device *device = clk->subdev.device;
+ struct nvkm_volt *volt = device->volt;
+ struct nvkm_cstate *cstate;
+ int max_volt;
+
+ if (!pstate || !start)
+ return NULL;
+
+ if (!volt)
+ return start;
+
+ max_volt = volt->max_uv;
+ if (volt->max0_id != 0xff)
+ max_volt = min(max_volt,
+ nvkm_volt_map(volt, volt->max0_id, clk->temp));
+ if (volt->max1_id != 0xff)
+ max_volt = min(max_volt,
+ nvkm_volt_map(volt, volt->max1_id, clk->temp));
+ if (volt->max2_id != 0xff)
+ max_volt = min(max_volt,
+ nvkm_volt_map(volt, volt->max2_id, clk->temp));
+
+ for (cstate = start; &cstate->head != &pstate->list;
+ cstate = list_entry(cstate->head.prev, typeof(*cstate), head)) {
+ if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
+ break;
+ }
+
+ return cstate;
+}
+
+static struct nvkm_cstate *
+nvkm_cstate_get(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
+{
+ struct nvkm_cstate *cstate;
+ if (cstatei == NVKM_CLK_CSTATE_HIGHEST)
+ return list_last_entry(&pstate->list, typeof(*cstate), head);
+ else {
+ list_for_each_entry(cstate, &pstate->list, head) {
+ if (cstate->id == cstatei)
+ return cstate;
+ }
+ }
+ return NULL;
+}
+
static int
nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
{
@@ -85,7 +168,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
int ret;
if (!list_empty(&pstate->list)) {
- cstate = list_entry(pstate->list.prev, typeof(*cstate), head);
+ cstate = nvkm_cstate_get(clk, pstate, cstatei);
+ cstate = nvkm_cstate_find_best(clk, pstate, cstate);
} else {
cstate = &pstate->base;
}
@@ -99,7 +183,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
}
if (volt) {
- ret = nvkm_volt_set_id(volt, cstate->voltage, +1);
+ ret = nvkm_volt_set_id(volt, cstate->voltage,
+ pstate->base.voltage, clk->temp, +1);
if (ret && ret != -ENODEV) {
nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
return ret;
@@ -113,7 +198,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
}
if (volt) {
- ret = nvkm_volt_set_id(volt, cstate->voltage, -1);
+ ret = nvkm_volt_set_id(volt, cstate->voltage,
+ pstate->base.voltage, clk->temp, -1);
if (ret && ret != -ENODEV)
nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
}
@@ -138,6 +224,7 @@ static int
nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
{
struct nvkm_bios *bios = clk->subdev.device->bios;
+ struct nvkm_volt *volt = clk->subdev.device->volt;
const struct nvkm_domain *domain = clk->domains;
struct nvkm_cstate *cstate = NULL;
struct nvbios_cstepX cstepX;
@@ -148,12 +235,16 @@ nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
if (!data)
return -ENOENT;
+ if (volt && nvkm_volt_map_min(volt, cstepX.voltage) > volt->max_uv)
+ return -EINVAL;
+
cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
if (!cstate)
return -ENOMEM;
*cstate = pstate->base;
cstate->voltage = cstepX.voltage;
+ cstate->id = idx;
while (domain && domain->name != nv_clk_src_max) {
if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
@@ -175,7 +266,7 @@ static int
nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
{
struct nvkm_subdev *subdev = &clk->subdev;
- struct nvkm_ram *ram = subdev->device->fb->ram;
+ struct nvkm_fb *fb = subdev->device->fb;
struct nvkm_pci *pci = subdev->device->pci;
struct nvkm_pstate *pstate;
int ret, idx = 0;
@@ -190,7 +281,8 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width);
- if (ram && ram->func->calc) {
+ if (fb && fb->ram && fb->ram->func->calc) {
+ struct nvkm_ram *ram = fb->ram;
int khz = pstate->base.domain[nv_clk_src_mem];
do {
ret = ram->func->calc(ram, khz);
@@ -200,7 +292,7 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
ram->func->tidy(ram);
}
- return nvkm_cstate_prog(clk, pstate, 0);
+ return nvkm_cstate_prog(clk, pstate, NVKM_CLK_CSTATE_HIGHEST);
}
static void
@@ -214,14 +306,14 @@ nvkm_pstate_work(struct work_struct *work)
return;
clk->pwrsrc = power_supply_is_system_supplied();
- nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
+ nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d°C D %d\n",
clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
- clk->astate, clk->tstate, clk->dstate);
+ clk->astate, clk->temp, clk->dstate);
pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
if (clk->state_nr && pstate != -1) {
pstate = (pstate < 0) ? clk->astate : pstate;
- pstate = min(pstate, clk->state_nr - 1 + clk->tstate);
+ pstate = min(pstate, clk->state_nr - 1);
pstate = max(pstate, clk->dstate);
} else {
pstate = clk->pstate = -1;
@@ -448,13 +540,12 @@ nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait)
}
int
-nvkm_clk_tstate(struct nvkm_clk *clk, int req, int rel)
+nvkm_clk_tstate(struct nvkm_clk *clk, u8 temp)
{
- if (!rel) clk->tstate = req;
- if ( rel) clk->tstate += rel;
- clk->tstate = min(clk->tstate, 0);
- clk->tstate = max(clk->tstate, -(clk->state_nr - 1));
- return nvkm_pstate_calc(clk, true);
+ if (clk->temp == temp)
+ return 0;
+ clk->temp = temp;
+ return nvkm_pstate_calc(clk, false);
}
int
@@ -524,9 +615,9 @@ nvkm_clk_init(struct nvkm_subdev *subdev)
return clk->func->init(clk);
clk->astate = clk->state_nr - 1;
- clk->tstate = 0;
clk->dstate = 0;
clk->pstate = -1;
+ clk->temp = 90; /* reasonable default value */
nvkm_pstate_calc(clk, true);
return 0;
}
@@ -561,10 +652,22 @@ int
nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
int index, bool allow_reclock, struct nvkm_clk *clk)
{
+ struct nvkm_subdev *subdev = &clk->subdev;
+ struct nvkm_bios *bios = device->bios;
int ret, idx, arglen;
const char *mode;
+ struct nvbios_vpstate_header h;
+
+ nvkm_subdev_ctor(&nvkm_clk, device, index, subdev);
+
+ if (bios && !nvbios_vpstate_parse(bios, &h)) {
+ struct nvbios_vpstate_entry base, boost;
+ if (!nvbios_vpstate_entry(bios, &h, h.boost_id, &boost))
+ clk->boost_khz = boost.clock_mhz * 1000;
+ if (!nvbios_vpstate_entry(bios, &h, h.base_id, &base))
+ clk->base_khz = base.clock_mhz * 1000;
+ }
- nvkm_subdev_ctor(&nvkm_clk, device, index, &clk->subdev);
clk->func = func;
INIT_LIST_HEAD(&clk->states);
clk->domains = func->domains;
@@ -607,6 +710,8 @@ nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
if (mode)
clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
+ clk->boost_mode = nvkm_longopt(device->cfgopt, "NvBoost",
+ NVKM_CLK_BOOST_NONE);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index 89d5543118cf..7f67f9f5a550 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -457,7 +457,7 @@ gf100_clk = {
{ nv_clk_src_hubk06 , 0x00 },
{ nv_clk_src_hubk01 , 0x01 },
{ nv_clk_src_copy , 0x02 },
- { nv_clk_src_gpc , 0x03, 0, "core", 2000 },
+ { nv_clk_src_gpc , 0x03, NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 },
{ nv_clk_src_rop , 0x04 },
{ nv_clk_src_mem , 0x05, 0, "memory", 1000 },
{ nv_clk_src_vdec , 0x06 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index 06bc0d2d6ae1..0b37e3da7feb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -491,7 +491,7 @@ gk104_clk = {
.domains = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_href , 0xff },
- { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
+ { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE | NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 },
{ nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
{ nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE },
{ nv_clk_src_mem , 0x03, 0, "memory", 500 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index 76433cc66fff..3841ad6be99e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -50,24 +50,33 @@ gf100_fb_intr(struct nvkm_fb *base)
}
int
-gf100_fb_oneinit(struct nvkm_fb *fb)
+gf100_fb_oneinit(struct nvkm_fb *base)
{
- struct nvkm_device *device = fb->subdev.device;
+ struct gf100_fb *fb = gf100_fb(base);
+ struct nvkm_device *device = fb->base.subdev.device;
int ret, size = 0x1000;
size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size);
size = min(size, 0x1000);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
- false, &fb->mmu_rd);
+ false, &fb->base.mmu_rd);
if (ret)
return ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
- false, &fb->mmu_wr);
+ false, &fb->base.mmu_wr);
if (ret)
return ret;
+ fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (fb->r100c10_page) {
+ fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device->dev, fb->r100c10))
+ return -EFAULT;
+ }
+
return 0;
}
@@ -123,14 +132,6 @@ gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
nvkm_fb_ctor(func, device, index, &fb->base);
*pfb = &fb->base;
- fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (fb->r100c10_page) {
- fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(device->dev, fb->r100c10))
- return -EFAULT;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index 1b5fb02eab2a..0595e0722bfc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -210,6 +210,23 @@ nv50_fb_intr(struct nvkm_fb *base)
nvkm_fifo_chan_put(fifo, flags, &chan);
}
+static int
+nv50_fb_oneinit(struct nvkm_fb *base)
+{
+ struct nv50_fb *fb = nv50_fb(base);
+ struct nvkm_device *device = fb->base.subdev.device;
+
+ fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (fb->r100c08_page) {
+ fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device->dev, fb->r100c08))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
static void
nv50_fb_init(struct nvkm_fb *base)
{
@@ -245,6 +262,7 @@ nv50_fb_dtor(struct nvkm_fb *base)
static const struct nvkm_fb_func
nv50_fb_ = {
.dtor = nv50_fb_dtor,
+ .oneinit = nv50_fb_oneinit,
.init = nv50_fb_init,
.intr = nv50_fb_intr,
.ram_new = nv50_fb_ram_new,
@@ -263,16 +281,6 @@ nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
fb->func = func;
*pfb = &fb->base;
- fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (fb->r100c08_page) {
- fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(device->dev, fb->r100c08))
- return -EFAULT;
- } else {
- nvkm_warn(&fb->base.subdev, "failed 100c08 page alloc\n");
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index b9ec0ae6723a..b60068b7d8f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -24,6 +24,7 @@ int gf100_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
int gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **);
+int gk104_ram_ctor(struct nvkm_fb *, struct nvkm_ram **, u32);
int gk104_ram_init(struct nvkm_ram *ram);
/* RAM type-specific MR calculation routines */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 1fa3ade468ae..7904fa41acef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -259,7 +259,9 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
ram_block(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0000);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0000);
/* MR1: turn termination on early, for some reason.. */
if ((ram->base.mr[1] & 0x03c) != 0x030) {
@@ -658,7 +660,9 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
gk104_ram_train(fuc, 0x80020000, 0x01000000);
ram_unblock(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
if (next->bios.rammap_11_08_01)
data = 0x00000800;
@@ -706,7 +710,9 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
ram_block(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0000);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0000);
if (vc == 1 && ram_have(fuc, gpio2E)) {
u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
@@ -936,7 +942,9 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
ram_nsec(fuc, 1000);
ram_unblock(fuc);
- ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
+
+ if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
if (next->bios.rammap_11_08_01)
data = 0x00000800;
@@ -1530,6 +1538,12 @@ gk104_ram_func = {
int
gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
+ return gk104_ram_ctor(fb, pram, 0x022554);
+}
+
+int
+gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr)
+{
struct nvkm_subdev *subdev = &fb->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
@@ -1544,7 +1558,7 @@ gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
return -ENOMEM;
*pram = &ram->base;
- ret = gf100_ram_ctor(&gk104_ram_func, fb, 0x022554, &ram->base);
+ ret = gf100_ram_ctor(&gk104_ram_func, fb, maskaddr, &ram->base);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index 43d807f6ca71..ac862d1d77bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -23,18 +23,8 @@
*/
#include "ram.h"
-static const struct nvkm_ram_func
-gm107_ram_func = {
- .init = gk104_ram_init,
- .get = gf100_ram_get,
- .put = gf100_ram_put,
-};
-
int
gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
- if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
- return -ENOMEM;
-
- return gf100_ram_ctor(&gm107_ram_func, fb, 0x021c14, *pram);
+ return gk104_ram_ctor(fb, pram, 0x021c14);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index b7159b338fac..1a4ab825852c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -29,7 +29,7 @@ gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus)
nvkm_mask(device, 0x137250, 0x3f, 0);
nvkm_mask(device, 0x000200, 0x20, 0);
- usleep_range(20, 30);
+ udelay(20);
nvkm_mask(device, 0x000200, 0x20, 0x20);
nvkm_wr32(device, 0x12004c, 0x4);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
index 41bd5d0f7692..658355fc9354 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
@@ -96,60 +96,12 @@ nvkm_iccsense_ina3221_read(struct nvkm_iccsense *iccsense,
}
static void
-nvkm_iccsense_ina209_config(struct nvkm_iccsense *iccsense,
- struct nvkm_iccsense_sensor *sensor)
-{
- struct nvkm_subdev *subdev = &iccsense->subdev;
- /* configuration:
- * 0x0007: 0x0007 shunt and bus continous
- * 0x0078: 0x0078 128 samples shunt
- * 0x0780: 0x0780 128 samples bus
- * 0x1800: 0x0000 +-40 mV shunt range
- * 0x2000: 0x0000 16V FSR
- */
- u16 value = 0x07ff;
- nvkm_debug(subdev, "config for sensor id %i: 0x%x\n", sensor->id, value);
- nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, value);
-}
-
-static void
-nvkm_iccsense_ina3221_config(struct nvkm_iccsense *iccsense,
- struct nvkm_iccsense_sensor *sensor)
-{
- struct nvkm_subdev *subdev = &iccsense->subdev;
- /* configuration:
- * 0x0007: 0x0007 shunt and bus continous
- * 0x0031: 0x0000 140 us conversion time shunt
- * 0x01c0: 0x0000 140 us conversion time bus
- * 0x0f00: 0x0f00 1024 samples
- * 0x7000: 0x?000 channels
- */
- u16 value = 0x0e07;
- if (sensor->rail_mask & 0x1)
- value |= 0x1 << 14;
- if (sensor->rail_mask & 0x2)
- value |= 0x1 << 13;
- if (sensor->rail_mask & 0x4)
- value |= 0x1 << 12;
- nvkm_debug(subdev, "config for sensor id %i: 0x%x\n", sensor->id, value);
- nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, value);
-}
-
-static void
nvkm_iccsense_sensor_config(struct nvkm_iccsense *iccsense,
struct nvkm_iccsense_sensor *sensor)
{
- switch (sensor->type) {
- case NVBIOS_EXTDEV_INA209:
- case NVBIOS_EXTDEV_INA219:
- nvkm_iccsense_ina209_config(iccsense, sensor);
- break;
- case NVBIOS_EXTDEV_INA3221:
- nvkm_iccsense_ina3221_config(iccsense, sensor);
- break;
- default:
- break;
- }
+ struct nvkm_subdev *subdev = &iccsense->subdev;
+ nvkm_trace(subdev, "write config of extdev %i: 0x%04x\n", sensor->id, sensor->config);
+ nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, sensor->config);
}
int
@@ -196,7 +148,6 @@ nvkm_iccsense_dtor(struct nvkm_subdev *subdev)
static struct nvkm_iccsense_sensor*
nvkm_iccsense_create_sensor(struct nvkm_iccsense *iccsense, u8 id)
{
-
struct nvkm_subdev *subdev = &iccsense->subdev;
struct nvkm_bios *bios = subdev->device->bios;
struct nvkm_i2c *i2c = subdev->device->i2c;
@@ -245,7 +196,7 @@ nvkm_iccsense_create_sensor(struct nvkm_iccsense *iccsense, u8 id)
sensor->type = extdev.type;
sensor->i2c = &i2c_bus->i2c;
sensor->addr = addr;
- sensor->rail_mask = 0x0;
+ sensor->config = 0x0;
return sensor;
}
@@ -273,48 +224,56 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
iccsense->data_valid = true;
for (i = 0; i < stbl.nr_entry; ++i) {
- struct pwr_rail_t *r = &stbl.rail[i];
- struct nvkm_iccsense_rail *rail;
+ struct pwr_rail_t *pwr_rail = &stbl.rail[i];
struct nvkm_iccsense_sensor *sensor;
- int (*read)(struct nvkm_iccsense *,
- struct nvkm_iccsense_rail *);
+ int r;
- if (!r->mode || r->resistor_mohm == 0)
+ if (pwr_rail->mode != 1 || !pwr_rail->resistor_count)
continue;
- sensor = nvkm_iccsense_get_sensor(iccsense, r->extdev_id);
+ sensor = nvkm_iccsense_get_sensor(iccsense, pwr_rail->extdev_id);
if (!sensor)
continue;
- switch (sensor->type) {
- case NVBIOS_EXTDEV_INA209:
- if (r->rail != 0)
- continue;
- read = nvkm_iccsense_ina209_read;
- break;
- case NVBIOS_EXTDEV_INA219:
- if (r->rail != 0)
+ if (!sensor->config)
+ sensor->config = pwr_rail->config;
+ else if (sensor->config != pwr_rail->config)
+ nvkm_error(subdev, "config mismatch found for extdev %i\n", pwr_rail->extdev_id);
+
+ for (r = 0; r < pwr_rail->resistor_count; ++r) {
+ struct nvkm_iccsense_rail *rail;
+ struct pwr_rail_resistor_t *res = &pwr_rail->resistors[r];
+ int (*read)(struct nvkm_iccsense *,
+ struct nvkm_iccsense_rail *);
+
+ if (!res->mohm || !res->enabled)
continue;
- read = nvkm_iccsense_ina219_read;
- break;
- case NVBIOS_EXTDEV_INA3221:
- if (r->rail >= 3)
+
+ switch (sensor->type) {
+ case NVBIOS_EXTDEV_INA209:
+ read = nvkm_iccsense_ina209_read;
+ break;
+ case NVBIOS_EXTDEV_INA219:
+ read = nvkm_iccsense_ina219_read;
+ break;
+ case NVBIOS_EXTDEV_INA3221:
+ read = nvkm_iccsense_ina3221_read;
+ break;
+ default:
continue;
- read = nvkm_iccsense_ina3221_read;
- break;
- default:
- continue;
+ }
+
+ rail = kmalloc(sizeof(*rail), GFP_KERNEL);
+ if (!rail)
+ return -ENOMEM;
+
+ rail->read = read;
+ rail->sensor = sensor;
+ rail->idx = r;
+ rail->mohm = res->mohm;
+ nvkm_debug(subdev, "create rail for extdev %i: { idx: %i, mohm: %i }\n", pwr_rail->extdev_id, r, rail->mohm);
+ list_add_tail(&rail->head, &iccsense->rails);
}
-
- rail = kmalloc(sizeof(*rail), GFP_KERNEL);
- if (!rail)
- return -ENOMEM;
- sensor->rail_mask |= 1 << r->rail;
- rail->read = read;
- rail->sensor = sensor;
- rail->idx = r->rail;
- rail->mohm = r->resistor_mohm;
- list_add_tail(&rail->head, &iccsense->rails);
}
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
index b72c31d2f908..e90e0f6ed008 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
@@ -10,7 +10,7 @@ struct nvkm_iccsense_sensor {
enum nvbios_extdev_type type;
struct i2c_adapter *i2c;
u8 addr;
- u8 rail_mask;
+ u16 config;
};
struct nvkm_iccsense_rail {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
index 45a2f8e784f9..9abfa5e2fe9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
@@ -23,8 +23,8 @@
*/
#include "mxms.h"
-#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
-#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
+#define ROM16(x) get_unaligned_le16(&(x))
+#define ROM32(x) get_unaligned_le32(&(x))
static u8 *
mxms_data(struct nvkm_mxm *mxm)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
index c34076223b7b..bcd179ba11d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
@@ -1,6 +1,7 @@
nvkm-y += nvkm/subdev/volt/base.o
nvkm-y += nvkm/subdev/volt/gpio.o
nvkm-y += nvkm/subdev/volt/nv40.o
+nvkm-y += nvkm/subdev/volt/gf100.o
nvkm-y += nvkm/subdev/volt/gk104.o
nvkm-y += nvkm/subdev/volt/gk20a.o
nvkm-y += nvkm/subdev/volt/gm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index 1c3d23b0e84a..e8569b04b55d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -26,6 +26,7 @@
#include <subdev/bios.h>
#include <subdev/bios/vmap.h>
#include <subdev/bios/volt.h>
+#include <subdev/therm.h>
int
nvkm_volt_get(struct nvkm_volt *volt)
@@ -50,23 +51,35 @@ static int
nvkm_volt_set(struct nvkm_volt *volt, u32 uv)
{
struct nvkm_subdev *subdev = &volt->subdev;
- int i, ret = -EINVAL;
+ int i, ret = -EINVAL, best_err = volt->max_uv, best = -1;
if (volt->func->volt_set)
return volt->func->volt_set(volt, uv);
for (i = 0; i < volt->vid_nr; i++) {
- if (volt->vid[i].uv == uv) {
- ret = volt->func->vid_set(volt, volt->vid[i].vid);
- nvkm_debug(subdev, "set %duv: %d\n", uv, ret);
+ int err = volt->vid[i].uv - uv;
+ if (err < 0 || err > best_err)
+ continue;
+
+ best_err = err;
+ best = i;
+ if (best_err == 0)
break;
- }
}
+
+ if (best == -1) {
+ nvkm_error(subdev, "couldn't set %iuv\n", uv);
+ return ret;
+ }
+
+ ret = volt->func->vid_set(volt, volt->vid[best].vid);
+ nvkm_debug(subdev, "set req %duv to %duv: %d\n", uv,
+ volt->vid[best].uv, ret);
return ret;
}
-static int
-nvkm_volt_map(struct nvkm_volt *volt, u8 id)
+int
+nvkm_volt_map_min(struct nvkm_volt *volt, u8 id)
{
struct nvkm_bios *bios = volt->subdev.device->bios;
struct nvbios_vmap_entry info;
@@ -76,7 +89,7 @@ nvkm_volt_map(struct nvkm_volt *volt, u8 id)
vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
if (vmap) {
if (info.link != 0xff) {
- int ret = nvkm_volt_map(volt, info.link);
+ int ret = nvkm_volt_map_min(volt, info.link);
if (ret < 0)
return ret;
info.min += ret;
@@ -88,19 +101,79 @@ nvkm_volt_map(struct nvkm_volt *volt, u8 id)
}
int
-nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
+nvkm_volt_map(struct nvkm_volt *volt, u8 id, u8 temp)
+{
+ struct nvkm_bios *bios = volt->subdev.device->bios;
+ struct nvbios_vmap_entry info;
+ u8 ver, len;
+ u16 vmap;
+
+ vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
+ if (vmap) {
+ s64 result;
+
+ if (volt->speedo < 0)
+ return volt->speedo;
+
+ if (ver == 0x10 || (ver == 0x20 && info.mode == 0)) {
+ result = div64_s64((s64)info.arg[0], 10);
+ result += div64_s64((s64)info.arg[1] * volt->speedo, 10);
+ result += div64_s64((s64)info.arg[2] * volt->speedo * volt->speedo, 100000);
+ } else if (ver == 0x20) {
+ switch (info.mode) {
+ /* 0x0 handled above! */
+ case 0x1:
+ result = ((s64)info.arg[0] * 15625) >> 18;
+ result += ((s64)info.arg[1] * volt->speedo * 15625) >> 18;
+ result += ((s64)info.arg[2] * temp * 15625) >> 10;
+ result += ((s64)info.arg[3] * volt->speedo * temp * 15625) >> 18;
+ result += ((s64)info.arg[4] * volt->speedo * volt->speedo * 15625) >> 30;
+ result += ((s64)info.arg[5] * temp * temp * 15625) >> 18;
+ break;
+ case 0x3:
+ result = (info.min + info.max) / 2;
+ break;
+ case 0x2:
+ default:
+ result = info.min;
+ break;
+ }
+ } else {
+ return -ENODEV;
+ }
+
+ result = min(max(result, (s64)info.min), (s64)info.max);
+
+ if (info.link != 0xff) {
+ int ret = nvkm_volt_map(volt, info.link, temp);
+ if (ret < 0)
+ return ret;
+ result += ret;
+ }
+ return result;
+ }
+
+ return id ? id * 10000 : -ENODEV;
+}
+
+int
+nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, u8 min_id, u8 temp,
+ int condition)
{
int ret;
if (volt->func->set_id)
return volt->func->set_id(volt, id, condition);
- ret = nvkm_volt_map(volt, id);
+ ret = nvkm_volt_map(volt, id, temp);
if (ret >= 0) {
int prev = nvkm_volt_get(volt);
if (!condition || prev < 0 ||
(condition < 0 && ret < prev) ||
(condition > 0 && ret > prev)) {
+ int min = nvkm_volt_map(volt, min_id, temp);
+ if (min >= 0)
+ ret = max(min, ret);
ret = nvkm_volt_set(volt, ret);
} else {
ret = 0;
@@ -112,6 +185,7 @@ nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
static void
nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
{
+ struct nvkm_subdev *subdev = &bios->subdev;
struct nvbios_volt_entry ivid;
struct nvbios_volt info;
u8 ver, hdr, cnt, len;
@@ -119,7 +193,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
int i;
data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
- if (data && info.vidmask && info.base && info.step) {
+ if (data && info.vidmask && info.base && info.step && info.ranged) {
+ nvkm_debug(subdev, "found ranged based VIDs\n");
volt->min_uv = info.min;
volt->max_uv = info.max;
for (i = 0; i < info.vidmask + 1; i++) {
@@ -132,7 +207,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
info.base += info.step;
}
volt->vid_mask = info.vidmask;
- } else if (data && info.vidmask) {
+ } else if (data && info.vidmask && !info.ranged) {
+ nvkm_debug(subdev, "found entry based VIDs\n");
volt->min_uv = 0xffffffff;
volt->max_uv = 0;
for (i = 0; i < cnt; i++) {
@@ -154,6 +230,14 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
}
static int
+nvkm_volt_speedo_read(struct nvkm_volt *volt)
+{
+ if (volt->func->speedo_read)
+ return volt->func->speedo_read(volt);
+ return -EINVAL;
+}
+
+static int
nvkm_volt_init(struct nvkm_subdev *subdev)
{
struct nvkm_volt *volt = nvkm_volt(subdev);
@@ -167,6 +251,21 @@ nvkm_volt_init(struct nvkm_subdev *subdev)
return 0;
}
+static int
+nvkm_volt_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_volt *volt = nvkm_volt(subdev);
+
+ volt->speedo = nvkm_volt_speedo_read(volt);
+ if (volt->speedo > 0)
+ nvkm_debug(&volt->subdev, "speedo %x\n", volt->speedo);
+
+ if (volt->func->oneinit)
+ return volt->func->oneinit(volt);
+
+ return 0;
+}
+
static void *
nvkm_volt_dtor(struct nvkm_subdev *subdev)
{
@@ -177,6 +276,7 @@ static const struct nvkm_subdev_func
nvkm_volt = {
.dtor = nvkm_volt_dtor,
.init = nvkm_volt_init,
+ .oneinit = nvkm_volt_oneinit,
};
void
@@ -191,9 +291,22 @@ nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
/* Assuming the non-bios device should build the voltage table later */
if (bios) {
+ u8 ver, hdr, cnt, len;
+ struct nvbios_vmap vmap;
+
nvkm_volt_parse_bios(bios, volt);
nvkm_debug(&volt->subdev, "min: %iuv max: %iuv\n",
volt->min_uv, volt->max_uv);
+
+ if (nvbios_vmap_parse(bios, &ver, &hdr, &cnt, &len, &vmap)) {
+ volt->max0_id = vmap.max0;
+ volt->max1_id = vmap.max1;
+ volt->max2_id = vmap.max2;
+ } else {
+ volt->max0_id = 0xff;
+ volt->max1_id = 0xff;
+ volt->max2_id = 0xff;
+ }
}
if (volt->vid_nr) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
new file mode 100644
index 000000000000..d9ed6925ca64
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2016 Karol Herbst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Karol Herbst
+ */
+#include "priv.h"
+
+#include <subdev/fuse.h>
+
+static int
+gf100_volt_speedo_read(struct nvkm_volt *volt)
+{
+ struct nvkm_device *device = volt->subdev.device;
+ struct nvkm_fuse *fuse = device->fuse;
+
+ if (!fuse)
+ return -EINVAL;
+
+ return nvkm_fuse_read(fuse, 0x1cc);
+}
+
+int
+gf100_volt_oneinit(struct nvkm_volt *volt)
+{
+ struct nvkm_subdev *subdev = &volt->subdev;
+ if (volt->speedo <= 0)
+ nvkm_error(subdev, "couldn't find speedo value, volting not "
+ "possible\n");
+ return 0;
+}
+
+static const struct nvkm_volt_func
+gf100_volt = {
+ .oneinit = gf100_volt_oneinit,
+ .vid_get = nvkm_voltgpio_get,
+ .vid_set = nvkm_voltgpio_set,
+ .speedo_read = gf100_volt_speedo_read,
+};
+
+int
+gf100_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+{
+ struct nvkm_volt *volt;
+ int ret;
+
+ ret = nvkm_volt_new_(&gf100_volt, device, index, &volt);
+ *pvolt = volt;
+ if (ret)
+ return ret;
+
+ return nvkm_voltgpio_init(volt);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
index 420bd84d8483..b2c5d1166a13 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
@@ -27,6 +27,7 @@
#include <subdev/gpio.h>
#include <subdev/bios.h>
#include <subdev/bios/volt.h>
+#include <subdev/fuse.h>
#define gk104_volt(p) container_of((p), struct gk104_volt, base)
struct gk104_volt {
@@ -64,13 +65,33 @@ gk104_volt_set(struct nvkm_volt *base, u32 uv)
return 0;
}
+static int
+gk104_volt_speedo_read(struct nvkm_volt *volt)
+{
+ struct nvkm_device *device = volt->subdev.device;
+ struct nvkm_fuse *fuse = device->fuse;
+ int ret;
+
+ if (!fuse)
+ return -EINVAL;
+
+ nvkm_wr32(device, 0x122634, 0x0);
+ ret = nvkm_fuse_read(fuse, 0x3a8);
+ nvkm_wr32(device, 0x122634, 0x41);
+ return ret;
+}
+
static const struct nvkm_volt_func
gk104_volt_pwm = {
+ .oneinit = gf100_volt_oneinit,
.volt_get = gk104_volt_get,
.volt_set = gk104_volt_set,
+ .speedo_read = gk104_volt_speedo_read,
}, gk104_volt_gpio = {
+ .oneinit = gf100_volt_oneinit,
.vid_get = nvkm_voltgpio_get,
.vid_set = nvkm_voltgpio_set,
+ .speedo_read = gk104_volt_speedo_read,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
index d5140d991161..354bafe4b4e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
@@ -9,11 +9,13 @@ int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *,
int index, struct nvkm_volt **);
struct nvkm_volt_func {
+ int (*oneinit)(struct nvkm_volt *);
int (*volt_get)(struct nvkm_volt *);
int (*volt_set)(struct nvkm_volt *, u32 uv);
int (*vid_get)(struct nvkm_volt *);
int (*vid_set)(struct nvkm_volt *, u8 vid);
int (*set_id)(struct nvkm_volt *, u8 id, int condition);
+ int (*speedo_read)(struct nvkm_volt *);
};
int nvkm_voltgpio_init(struct nvkm_volt *);
@@ -23,4 +25,6 @@ int nvkm_voltgpio_set(struct nvkm_volt *, u8);
int nvkm_voltpwm_init(struct nvkm_volt *volt);
int nvkm_voltpwm_get(struct nvkm_volt *volt);
int nvkm_voltpwm_set(struct nvkm_volt *volt, u32 uv);
+
+int gf100_volt_oneinit(struct nvkm_volt *);
#endif
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index 3485d1ecd655..aaa8a58390f1 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -24,23 +24,24 @@ struct panel_drv_data {
struct device *dev;
- struct omap_video_timings timings;
+ struct videomode vm;
bool invert_polarity;
};
-static const struct omap_video_timings tvc_pal_timings = {
- .x_res = 720,
- .y_res = 574,
+static const struct videomode tvc_pal_vm = {
+ .hactive = 720,
+ .vactive = 574,
.pixelclock = 13500000,
- .hsw = 64,
- .hfp = 12,
- .hbp = 68,
- .vsw = 5,
- .vfp = 5,
- .vbp = 41,
-
- .interlace = true,
+ .hsync_len = 64,
+ .hfront_porch = 12,
+ .hback_porch = 68,
+ .vsync_len = 5,
+ .vfront_porch = 5,
+ .vback_porch = 41,
+
+ .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
+ DISPLAY_FLAGS_VSYNC_LOW,
};
static const struct of_device_id tvc_of_match[];
@@ -92,7 +93,7 @@ static int tvc_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.atv->set_timings(in, &ddata->timings);
+ in->ops.atv->set_timings(in, &ddata->vm);
if (!ddata->dev->of_node) {
in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE);
@@ -126,32 +127,32 @@ static void tvc_disable(struct omap_dss_device *dssdev)
}
static void tvc_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.atv->set_timings(in, timings);
+ in->ops.atv->set_timings(in, vm);
}
static void tvc_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int tvc_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.atv->check_timings(in, timings);
+ return in->ops.atv->check_timings(in, vm);
}
static u32 tvc_get_wss(struct omap_dss_device *dssdev)
@@ -253,14 +254,14 @@ static int tvc_probe(struct platform_device *pdev)
return -ENODEV;
}
- ddata->timings = tvc_pal_timings;
+ ddata->vm = tvc_pal_vm;
dssdev = &ddata->dssdev;
dssdev->driver = &tvc_driver;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_VENC;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = tvc_pal_timings;
+ dssdev->panel.vm = tvc_pal_vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
index 684b7aeda411..d6875d9fcefa 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
@@ -19,32 +19,30 @@
#include "../dss/omapdss.h"
-static const struct omap_video_timings dvic_default_timings = {
- .x_res = 640,
- .y_res = 480,
+static const struct videomode dvic_default_vm = {
+ .hactive = 640,
+ .vactive = 480,
.pixelclock = 23500000,
- .hfp = 48,
- .hsw = 32,
- .hbp = 80,
+ .hfront_porch = 48,
+ .hsync_len = 32,
+ .hback_porch = 80,
- .vfp = 3,
- .vsw = 4,
- .vbp = 7,
+ .vfront_porch = 3,
+ .vsync_len = 4,
+ .vback_porch = 7,
- .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .flags = DISPLAY_FLAGS_HSYNC_HIGH | DISPLAY_FLAGS_VSYNC_HIGH |
+ DISPLAY_FLAGS_SYNC_NEGEDGE | DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings timings;
+ struct videomode vm;
struct i2c_adapter *i2c_adapter;
};
@@ -90,7 +88,7 @@ static int dvic_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dvi->set_timings(in, &ddata->timings);
+ in->ops.dvi->set_timings(in, &ddata->vm);
r = in->ops.dvi->enable(in);
if (r)
@@ -115,32 +113,32 @@ static void dvic_disable(struct omap_dss_device *dssdev)
}
static void dvic_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dvi->set_timings(in, timings);
+ in->ops.dvi->set_timings(in, vm);
}
static void dvic_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int dvic_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dvi->check_timings(in, timings);
+ return in->ops.dvi->check_timings(in, vm);
}
static int dvic_ddc_read(struct i2c_adapter *adapter,
@@ -287,14 +285,14 @@ static int dvic_probe(struct platform_device *pdev)
if (r)
return r;
- ddata->timings = dvic_default_timings;
+ ddata->vm = dvic_default_vm;
dssdev = &ddata->dssdev;
dssdev->driver = &dvic_driver;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_DVI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = dvic_default_timings;
+ dssdev->panel.vm = dvic_default_vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 7bdf83af9797..1ef130641bae 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -21,21 +21,18 @@
#include "../dss/omapdss.h"
-static const struct omap_video_timings hdmic_default_timings = {
- .x_res = 640,
- .y_res = 480,
+static const struct videomode hdmic_default_vm = {
+ .hactive = 640,
+ .vactive = 480,
.pixelclock = 25175000,
- .hsw = 96,
- .hfp = 16,
- .hbp = 48,
- .vsw = 2,
- .vfp = 11,
- .vbp = 31,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
-
- .interlace = false,
+ .hsync_len = 96,
+ .hfront_porch = 16,
+ .hback_porch = 48,
+ .vsync_len = 2,
+ .vfront_porch = 11,
+ .vback_porch = 31,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
struct panel_drv_data {
@@ -44,7 +41,7 @@ struct panel_drv_data {
struct device *dev;
- struct omap_video_timings timings;
+ struct videomode vm;
int hpd_gpio;
};
@@ -96,7 +93,7 @@ static int hdmic_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.hdmi->set_timings(in, &ddata->timings);
+ in->ops.hdmi->set_timings(in, &ddata->vm);
r = in->ops.hdmi->enable(in);
if (r)
@@ -123,32 +120,32 @@ static void hdmic_disable(struct omap_dss_device *dssdev)
}
static void hdmic_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.hdmi->set_timings(in, timings);
+ in->ops.hdmi->set_timings(in, vm);
}
static void hdmic_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int hdmic_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.hdmi->check_timings(in, timings);
+ return in->ops.hdmi->check_timings(in, vm);
}
static int hdmic_read_edid(struct omap_dss_device *dssdev,
@@ -259,14 +256,14 @@ static int hdmic_probe(struct platform_device *pdev)
goto err_reg;
}
- ddata->timings = hdmic_default_timings;
+ ddata->vm = hdmic_default_vm;
dssdev = &ddata->dssdev;
dssdev->driver = &hdmic_driver;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = hdmic_default_timings;
+ dssdev->panel.vm = hdmic_default_vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index fe4e7ec3bab0..f7a5731492d0 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -27,7 +27,7 @@ struct panel_drv_data {
struct gpio_desc *enable_gpio;
- struct omap_video_timings timings;
+ struct videomode vm;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
@@ -90,7 +90,7 @@ static int opa362_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.atv->set_timings(in, &ddata->timings);
+ in->ops.atv->set_timings(in, &ddata->vm);
r = in->ops.atv->enable(in);
if (r)
@@ -123,38 +123,38 @@ static void opa362_disable(struct omap_dss_device *dssdev)
}
static void opa362_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
dev_dbg(dssdev->dev, "set_timings\n");
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.atv->set_timings(in, timings);
+ in->ops.atv->set_timings(in, vm);
}
static void opa362_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
dev_dbg(dssdev->dev, "get_timings\n");
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int opa362_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
dev_dbg(dssdev->dev, "check_timings\n");
- return in->ops.atv->check_timings(in, timings);
+ return in->ops.atv->check_timings(in, vm);
}
static void opa362_set_type(struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index d768217cefe0..13e32d02c884 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -24,7 +24,7 @@ struct panel_drv_data {
int pd_gpio;
int data_lines;
- struct omap_video_timings timings;
+ struct videomode vm;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
@@ -81,7 +81,7 @@ static int tfp410_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dpi->set_timings(in, &ddata->timings);
+ in->ops.dpi->set_timings(in, &ddata->vm);
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
@@ -113,44 +113,43 @@ static void tfp410_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void tfp410_fix_timings(struct omap_video_timings *timings)
+static void tfp410_fix_timings(struct videomode *vm)
{
- timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
+ vm->flags |= DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE;
}
static void tfp410_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- tfp410_fix_timings(timings);
+ tfp410_fix_timings(vm);
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void tfp410_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int tfp410_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- tfp410_fix_timings(timings);
+ tfp410_fix_timings(vm);
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static const struct omapdss_dvi_ops tfp410_dvi_ops = {
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index 46855c8f5cbf..6d8f79b29af6 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -26,7 +26,7 @@ struct panel_drv_data {
struct gpio_desc *ls_oe_gpio;
struct gpio_desc *hpd_gpio;
- struct omap_video_timings timings;
+ struct videomode vm;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
@@ -80,7 +80,7 @@ static int tpd_enable(struct omap_dss_device *dssdev)
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
return 0;
- in->ops.hdmi->set_timings(in, &ddata->timings);
+ in->ops.hdmi->set_timings(in, &ddata->vm);
r = in->ops.hdmi->enable(in);
if (r)
@@ -105,33 +105,33 @@ static void tpd_disable(struct omap_dss_device *dssdev)
}
static void tpd_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->timings = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.hdmi->set_timings(in, timings);
+ in->ops.hdmi->set_timings(in, vm);
}
static void tpd_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->timings;
+ *vm = ddata->vm;
}
static int tpd_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
int r;
- r = in->ops.hdmi->check_timings(in, timings);
+ r = in->ops.hdmi->check_timings(in, vm);
return r;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index 7f16f985ab22..38003208d9ca 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -28,7 +28,7 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
/* used for non-DT boot, to be removed */
int backlight_gpio;
@@ -80,7 +80,7 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -122,32 +122,32 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev)
}
static void panel_dpi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void panel_dpi_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int panel_dpi_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver panel_dpi_ops = {
@@ -169,7 +169,6 @@ static int panel_dpi_probe_pdata(struct platform_device *pdev)
const struct panel_dpi_platform_data *pdata;
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev, *in;
- struct videomode vm;
int r;
pdata = dev_get_platdata(&pdev->dev);
@@ -185,8 +184,7 @@ static int panel_dpi_probe_pdata(struct platform_device *pdev)
ddata->data_lines = pdata->data_lines;
- videomode_from_timing(pdata->display_timing, &vm);
- videomode_to_omap_video_timings(&vm, &ddata->videomode);
+ videomode_from_timing(pdata->display_timing, &ddata->vm);
dssdev = &ddata->dssdev;
dssdev->name = pdata->name;
@@ -214,7 +212,6 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
struct omap_dss_device *in;
int r;
struct display_timing timing;
- struct videomode vm;
struct gpio_desc *gpio;
gpio = devm_gpiod_get_optional(&pdev->dev, "enable", GPIOD_OUT_LOW);
@@ -245,8 +242,7 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
return r;
}
- videomode_from_timing(&timing, &vm);
- videomode_to_omap_video_timings(&vm, &ddata->videomode);
+ videomode_from_timing(&timing, &ddata->vm);
in = omapdss_of_find_source_for_first_ep(node);
if (IS_ERR(in)) {
@@ -295,7 +291,7 @@ static int panel_dpi_probe(struct platform_device *pdev)
dssdev->driver = &panel_dpi_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index b1f3b818edf4..dc026a843712 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -42,7 +42,7 @@ struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings timings;
+ struct videomode vm;
struct platform_device *pdev;
@@ -382,8 +382,8 @@ static const struct backlight_ops dsicm_bl_ops = {
static void dsicm_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
- *xres = dssdev->panel.timings.x_res;
- *yres = dssdev->panel.timings.y_res;
+ *xres = dssdev->panel.vm.hactive;
+ *yres = dssdev->panel.vm.vactive;
}
static ssize_t dsicm_num_errors_show(struct device *dev,
@@ -589,7 +589,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
struct omap_dss_dsi_config dsi_config = {
.mode = OMAP_DSS_DSI_CMD_MODE,
.pixel_format = OMAP_DSS_DSI_FMT_RGB888,
- .timings = &ddata->timings,
+ .vm = &ddata->vm,
.hs_clk_min = 150000000,
.hs_clk_max = 300000000,
.lp_clk_min = 7000000,
@@ -892,8 +892,8 @@ static int dsicm_update(struct omap_dss_device *dssdev,
/* XXX no need to send this every frame, but dsi break if not done */
r = dsicm_set_update_window(ddata, 0, 0,
- dssdev->panel.timings.x_res,
- dssdev->panel.timings.y_res);
+ dssdev->panel.vm.hactive,
+ dssdev->panel.vm.vactive);
if (r)
goto err;
@@ -1023,9 +1023,8 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
goto err1;
}
- size = min(w * h * 3,
- dssdev->panel.timings.x_res *
- dssdev->panel.timings.y_res * 3);
+ size = min((u32)w * h * 3,
+ dssdev->panel.vm.hactive * dssdev->panel.vm.vactive * 3);
in->ops.dsi->bus_lock(in);
@@ -1186,14 +1185,14 @@ static int dsicm_probe(struct platform_device *pdev)
if (r)
return r;
- ddata->timings.x_res = 864;
- ddata->timings.y_res = 480;
- ddata->timings.pixelclock = 864 * 480 * 60;
+ ddata->vm.hactive = 864;
+ ddata->vm.vactive = 480;
+ ddata->vm.pixelclock = 864 * 480 * 60;
dssdev = &ddata->dssdev;
dssdev->dev = dev;
dssdev->driver = &dsicm_ops;
- dssdev->panel.timings = ddata->timings;
+ dssdev->panel.vm = ddata->vm;
dssdev->type = OMAP_DISPLAY_TYPE_DSI;
dssdev->owner = THIS_MODULE;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index 6dfb96cea293..43d21edb51f5 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -19,25 +19,28 @@
#include "../dss/omapdss.h"
-static struct omap_video_timings lb035q02_timings = {
- .x_res = 320,
- .y_res = 240,
+static struct videomode lb035q02_vm = {
+ .hactive = 320,
+ .vactive = 240,
.pixelclock = 6500000,
- .hsw = 2,
- .hfp = 20,
- .hbp = 68,
-
- .vsw = 2,
- .vfp = 4,
- .vbp = 18,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 2,
+ .hfront_porch = 20,
+ .hback_porch = 68,
+
+ .vsync_len = 2,
+ .vfront_porch = 4,
+ .vback_porch = 18,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * DE is active LOW
+ * DATA needs to be driven on the FALLING edge
+ */
};
struct panel_drv_data {
@@ -48,7 +51,7 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
struct gpio_desc *enable_gpio;
};
@@ -158,7 +161,7 @@ static int lb035q02_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -189,32 +192,32 @@ static void lb035q02_disable(struct omap_dss_device *dssdev)
}
static void lb035q02_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void lb035q02_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int lb035q02_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver lb035q02_ops = {
@@ -278,14 +281,14 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
if (r)
return r;
- ddata->videomode = lb035q02_timings;
+ ddata->vm = lb035q02_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &lb035q02_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index 9f3d6f48f3e1..2de27ba01552 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -23,7 +23,7 @@ struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings videomode;
+ struct videomode vm;
int data_lines;
@@ -65,22 +65,20 @@ static const struct {
{ 156, 0x00 }, { 157, 0x00 }, { 2, 0x00 },
};
-static const struct omap_video_timings nec_8048_panel_timings = {
- .x_res = LCD_XRES,
- .y_res = LCD_YRES,
+static const struct videomode nec_8048_panel_vm = {
+ .hactive = LCD_XRES,
+ .vactive = LCD_YRES,
.pixelclock = LCD_PIXEL_CLOCK,
- .hfp = 6,
- .hsw = 1,
- .hbp = 4,
- .vfp = 3,
- .vsw = 1,
- .vbp = 4,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .hfront_porch = 6,
+ .hsync_len = 1,
+ .hback_porch = 4,
+ .vfront_porch = 3,
+ .vsync_len = 1,
+ .vback_porch = 4,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -157,7 +155,7 @@ static int nec_8048_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -188,32 +186,32 @@ static void nec_8048_disable(struct omap_dss_device *dssdev)
}
static void nec_8048_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void nec_8048_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int nec_8048_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver nec_8048_ops = {
@@ -306,14 +304,14 @@ static int nec_8048_probe(struct spi_device *spi)
goto err_gpio;
}
- ddata->videomode = nec_8048_panel_timings;
+ ddata->vm = nec_8048_panel_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &nec_8048_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index 3d3efc561ea9..04fe235b7cac 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -26,7 +26,7 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
struct gpio_desc *resb_gpio; /* low = reset active min 20 us */
struct gpio_desc *ini_gpio; /* high = power on */
@@ -35,25 +35,27 @@ struct panel_drv_data {
struct gpio_desc *ud_gpio; /* high = conventional vertical scanning */
};
-static const struct omap_video_timings sharp_ls_timings = {
- .x_res = 480,
- .y_res = 640,
+static const struct videomode sharp_ls_vm = {
+ .hactive = 480,
+ .vactive = 640,
.pixelclock = 19200000,
- .hsw = 2,
- .hfp = 1,
- .hbp = 28,
-
- .vsw = 1,
- .vfp = 1,
- .vbp = 1,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 2,
+ .hfront_porch = 1,
+ .hback_porch = 28,
+
+ .vsync_len = 1,
+ .vfront_porch = 1,
+ .vback_porch = 1,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * DATA needs to be driven on the FALLING edge
+ */
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -99,7 +101,7 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
if (ddata->vcc) {
r = regulator_enable(ddata->vcc);
@@ -154,32 +156,32 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev)
}
static void sharp_ls_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void sharp_ls_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int sharp_ls_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver sharp_ls_ops = {
@@ -279,14 +281,14 @@ static int sharp_ls_probe(struct platform_device *pdev)
if (r)
return r;
- ddata->videomode = sharp_ls_timings;
+ ddata->vm = sharp_ls_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &pdev->dev;
dssdev->driver = &sharp_ls_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 3557a4c7dd7b..746cb8d9cba1 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -71,7 +71,7 @@ struct panel_drv_data {
int reset_gpio;
int datapairs;
- struct omap_video_timings videomode;
+ struct videomode vm;
char *name;
int enabled;
@@ -92,23 +92,20 @@ struct panel_drv_data {
struct backlight_device *bl_dev;
};
-static const struct omap_video_timings acx565akm_panel_timings = {
- .x_res = 800,
- .y_res = 480,
+static const struct videomode acx565akm_panel_vm = {
+ .hactive = 800,
+ .vactive = 480,
.pixelclock = 24000000,
- .hfp = 28,
- .hsw = 4,
- .hbp = 24,
- .vfp = 3,
- .vsw = 3,
- .vbp = 4,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
-
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hfront_porch = 28,
+ .hsync_len = 4,
+ .hback_porch = 24,
+ .vfront_porch = 3,
+ .vsync_len = 3,
+ .vback_porch = 4,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -548,7 +545,7 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
- in->ops.sdi->set_timings(in, &ddata->videomode);
+ in->ops.sdi->set_timings(in, &ddata->vm);
if (ddata->datapairs > 0)
in->ops.sdi->set_datapairs(in, ddata->datapairs);
@@ -662,32 +659,32 @@ static void acx565akm_disable(struct omap_dss_device *dssdev)
}
static void acx565akm_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.sdi->set_timings(in, timings);
+ in->ops.sdi->set_timings(in, vm);
}
static void acx565akm_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int acx565akm_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.sdi->check_timings(in, timings);
+ return in->ops.sdi->check_timings(in, vm);
}
static struct omap_dss_driver acx565akm_ops = {
@@ -845,14 +842,14 @@ static int acx565akm_probe(struct spi_device *spi)
acx565akm_bl_update_status(bldev);
- ddata->videomode = acx565akm_panel_timings;
+ ddata->vm = acx565akm_panel_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &acx565akm_ops;
dssdev->type = OMAP_DISPLAY_TYPE_SDI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index e859b3f893f7..f313dbfcbacb 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -37,28 +37,29 @@ struct panel_drv_data {
int data_lines;
- struct omap_video_timings videomode;
+ struct videomode vm;
struct spi_device *spi_dev;
};
-static struct omap_video_timings td028ttec1_panel_timings = {
- .x_res = 480,
- .y_res = 640,
+static struct videomode td028ttec1_panel_vm = {
+ .hactive = 480,
+ .vactive = 640,
.pixelclock = 22153000,
- .hfp = 24,
- .hsw = 8,
- .hbp = 8,
- .vfp = 4,
- .vsw = 2,
- .vbp = 2,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
-
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .hfront_porch = 24,
+ .hsync_len = 8,
+ .hback_porch = 8,
+ .vfront_porch = 4,
+ .vsync_len = 2,
+ .vback_porch = 2,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * SYNC needs to be driven on the FALLING edge
+ */
};
#define JBT_COMMAND 0x000
@@ -208,7 +209,7 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -325,32 +326,32 @@ static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
}
static void td028ttec1_panel_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int td028ttec1_panel_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver td028ttec1_ops = {
@@ -414,14 +415,14 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
if (r)
return r;
- ddata->videomode = td028ttec1_panel_timings;
+ ddata->vm = td028ttec1_panel_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &td028ttec1_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index 66c6bbe6472b..0787dba44faa 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -56,7 +56,7 @@ struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- struct omap_video_timings videomode;
+ struct videomode vm;
int data_lines;
@@ -72,25 +72,27 @@ struct panel_drv_data {
u32 power_on_resume:1;
};
-static const struct omap_video_timings tpo_td043_timings = {
- .x_res = 800,
- .y_res = 480,
+static const struct videomode tpo_td043_vm = {
+ .hactive = 800,
+ .vactive = 480,
.pixelclock = 36000000,
- .hsw = 1,
- .hfp = 68,
- .hbp = 214,
+ .hsync_len = 1,
+ .hfront_porch = 68,
+ .hback_porch = 214,
- .vsw = 1,
- .vfp = 39,
- .vbp = 34,
+ .vsync_len = 1,
+ .vfront_porch = 39,
+ .vback_porch = 34,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE,
+ /*
+ * Note: According to the panel documentation:
+ * SYNC needs to be driven on the FALLING edge
+ */
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -378,7 +380,7 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev)
if (ddata->data_lines)
in->ops.dpi->set_data_lines(in, ddata->data_lines);
- in->ops.dpi->set_timings(in, &ddata->videomode);
+ in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
if (r)
@@ -418,32 +420,32 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev)
}
static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- ddata->videomode = *timings;
- dssdev->panel.timings = *timings;
+ ddata->vm = *vm;
+ dssdev->panel.vm = *vm;
- in->ops.dpi->set_timings(in, timings);
+ in->ops.dpi->set_timings(in, vm);
}
static void tpo_td043_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *timings = ddata->videomode;
+ *vm = ddata->vm;
}
static int tpo_td043_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- return in->ops.dpi->check_timings(in, timings);
+ return in->ops.dpi->check_timings(in, vm);
}
static struct omap_dss_driver tpo_td043_ops = {
@@ -546,14 +548,14 @@ static int tpo_td043_probe(struct spi_device *spi)
goto err_sysfs;
}
- ddata->videomode = tpo_td043_timings;
+ ddata->vm = tpo_td043_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
dssdev->driver = &tpo_td043_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = ddata->videomode;
+ dssdev->panel.vm = ddata->vm;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 535240fba671..c839f6456db2 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -75,7 +75,7 @@ struct dispc_features {
unsigned long max_lcd_pclk;
unsigned long max_tv_pclk;
int (*calc_scaling) (unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -1679,7 +1679,7 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
{
int scale_x = out_width != orig_width;
int scale_y = out_height != orig_height;
- bool chroma_upscale = plane != OMAP_DSS_WB ? true : false;
+ bool chroma_upscale = plane != OMAP_DSS_WB;
if (!dss_has_feature(FEAT_HANDLE_UV_SEPARATE))
return;
@@ -2179,7 +2179,7 @@ static void calc_tiler_rotation_offset(u16 screen_width, u16 width,
* undocumented horizontal position and timing related limitations.
*/
static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *t, u16 pos_x,
+ const struct videomode *vm, u16 pos_x,
u16 width, u16 height, u16 out_width, u16 out_height,
bool five_taps)
{
@@ -2189,14 +2189,16 @@ static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
u64 val, blank;
int i;
- nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width;
+ nonactive = vm->hactive + vm->hfront_porch + vm->hsync_len +
+ vm->hback_porch - out_width;
i = 0;
if (out_height < height)
i++;
if (out_width < width)
i++;
- blank = div_u64((u64)(t->hbp + t->hsw + t->hfp) * lclk, pclk);
+ blank = div_u64((u64)(vm->hback_porch + vm->hsync_len + vm->hfront_porch) *
+ lclk, pclk);
DSSDBG("blanking period + ppl = %llu (limit = %u)\n", blank, limits[i]);
if (blank <= limits[i])
return -EINVAL;
@@ -2231,7 +2233,7 @@ static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
}
static unsigned long calc_core_clk_five_taps(unsigned long pclk,
- const struct omap_video_timings *mgr_timings, u16 width,
+ const struct videomode *vm, u16 width,
u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode)
{
@@ -2242,7 +2244,7 @@ static unsigned long calc_core_clk_five_taps(unsigned long pclk,
return (unsigned long) pclk;
if (height > out_height) {
- unsigned int ppl = mgr_timings->x_res;
+ unsigned int ppl = vm->hactive;
tmp = (u64)pclk * height * out_width;
do_div(tmp, 2 * out_height * ppl);
@@ -2324,7 +2326,7 @@ static unsigned long calc_core_clk_44xx(unsigned long pclk, u16 width,
}
static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -2370,7 +2372,7 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
}
static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -2392,7 +2394,7 @@ static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
*five_taps = false;
again:
if (*five_taps)
- *core_clk = calc_core_clk_five_taps(pclk, mgr_timings,
+ *core_clk = calc_core_clk_five_taps(pclk, vm,
in_width, in_height, out_width,
out_height, color_mode);
else
@@ -2400,7 +2402,7 @@ again:
in_height, out_width, out_height,
mem_to_mem);
- error = check_horiz_timing_omap3(pclk, lclk, mgr_timings,
+ error = check_horiz_timing_omap3(pclk, lclk, vm,
pos_x, in_width, in_height, out_width,
out_height, *five_taps);
if (error && *five_taps) {
@@ -2435,7 +2437,7 @@ again:
return -EINVAL;
}
- if (check_horiz_timing_omap3(pclk, lclk, mgr_timings, pos_x, in_width,
+ if (check_horiz_timing_omap3(pclk, lclk, vm, pos_x, in_width,
in_height, out_width, out_height, *five_taps)) {
DSSERR("horizontal timing too tight\n");
return -EINVAL;
@@ -2455,7 +2457,7 @@ again:
}
static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
@@ -2501,7 +2503,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
enum omap_overlay_caps caps,
- const struct omap_video_timings *mgr_timings,
+ const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, u16 pos_x,
@@ -2515,7 +2517,7 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
if (width == out_width && height == out_height)
return 0;
- if (!mem_to_mem && (pclk == 0 || mgr_timings->pixelclock == 0)) {
+ if (!mem_to_mem && (pclk == 0 || vm->pixelclock == 0)) {
DSSERR("cannot calculate scaling settings: pclk is zero\n");
return -EINVAL;
}
@@ -2551,7 +2553,7 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
if (decim_y > *y_predecim || out_height > height * 8)
return -EINVAL;
- ret = dispc.feat->calc_scaling(pclk, lclk, mgr_timings, width, height,
+ ret = dispc.feat->calc_scaling(pclk, lclk, vm, width, height,
out_width, out_height, color_mode, five_taps,
x_predecim, y_predecim, &decim_x, &decim_y, pos_x, &core_clk,
mem_to_mem);
@@ -2591,7 +2593,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
u16 out_width, u16 out_height, enum omap_color_mode color_mode,
u8 rotation, bool mirror, u8 zorder, u8 pre_mult_alpha,
u8 global_alpha, enum omap_dss_rotation_type rotation_type,
- bool replication, const struct omap_video_timings *mgr_timings,
+ bool replication, const struct videomode *vm,
bool mem_to_mem)
{
bool five_taps = true;
@@ -2605,7 +2607,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
u16 in_height = height;
u16 in_width = width;
int x_predecim = 1, y_predecim = 1;
- bool ilace = mgr_timings->interlace;
+ bool ilace = !!(vm->flags & DISPLAY_FLAGS_INTERLACED);
unsigned long pclk = dispc_plane_pclk_rate(plane);
unsigned long lclk = dispc_plane_lclk_rate(plane);
@@ -2647,7 +2649,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
if (!dss_feat_color_mode_supported(plane, color_mode))
return -EINVAL;
- r = dispc_ovl_calc_scaling(pclk, lclk, caps, mgr_timings, in_width,
+ r = dispc_ovl_calc_scaling(pclk, lclk, caps, vm, in_width,
in_height, out_width, out_height, color_mode,
&five_taps, &x_predecim, &y_predecim, pos_x,
rotation_type, mem_to_mem);
@@ -2784,7 +2786,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
}
int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
- bool replication, const struct omap_video_timings *mgr_timings,
+ bool replication, const struct videomode *vm,
bool mem_to_mem)
{
int r;
@@ -2803,14 +2805,14 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
oi->out_width, oi->out_height, oi->color_mode, oi->rotation,
oi->mirror, oi->zorder, oi->pre_mult_alpha, oi->global_alpha,
- oi->rotation_type, replication, mgr_timings, mem_to_mem);
+ oi->rotation_type, replication, vm, mem_to_mem);
return r;
}
EXPORT_SYMBOL(dispc_ovl_setup);
int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct omap_video_timings *mgr_timings)
+ bool mem_to_mem, const struct videomode *vm)
{
int r;
u32 l;
@@ -2819,8 +2821,8 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
const u8 zorder = 0, global_alpha = 0;
const bool replication = false;
bool truncation;
- int in_width = mgr_timings->x_res;
- int in_height = mgr_timings->y_res;
+ int in_width = vm->hactive;
+ int in_height = vm->vactive;
enum omap_overlay_caps caps =
OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA;
@@ -2833,7 +2835,7 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
wi->height, wi->color_mode, wi->rotation, wi->mirror, zorder,
wi->pre_mult_alpha, global_alpha, wi->rotation_type,
- replication, mgr_timings, mem_to_mem);
+ replication, vm, mem_to_mem);
switch (wi->color_mode) {
case OMAP_DSS_COLOR_RGB16:
@@ -2867,8 +2869,8 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
} else {
int wbdelay;
- wbdelay = min(mgr_timings->vfp + mgr_timings->vsw +
- mgr_timings->vbp, 255);
+ wbdelay = min(vm->vfront_porch +
+ vm->vsync_len + vm->vback_porch, (u32)255);
/* WBDELAYCOUNT */
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0);
@@ -3093,10 +3095,10 @@ static bool _dispc_mgr_size_ok(u16 width, u16 height)
height <= dispc.feat->mgr_height_max;
}
-static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
+static bool _dispc_lcd_timings_ok(int hsync_len, int hfp, int hbp,
int vsw, int vfp, int vbp)
{
- if (hsw < 1 || hsw > dispc.feat->sw_max ||
+ if (hsync_len < 1 || hsync_len > dispc.feat->sw_max ||
hfp < 1 || hfp > dispc.feat->hp_max ||
hbp < 1 || hbp > dispc.feat->hp_max ||
vsw < 1 || vsw > dispc.feat->sw_max ||
@@ -3110,113 +3112,77 @@ static bool _dispc_mgr_pclk_ok(enum omap_channel channel,
unsigned long pclk)
{
if (dss_mgr_is_lcd(channel))
- return pclk <= dispc.feat->max_lcd_pclk ? true : false;
+ return pclk <= dispc.feat->max_lcd_pclk;
else
- return pclk <= dispc.feat->max_tv_pclk ? true : false;
+ return pclk <= dispc.feat->max_tv_pclk;
}
-bool dispc_mgr_timings_ok(enum omap_channel channel,
- const struct omap_video_timings *timings)
+bool dispc_mgr_timings_ok(enum omap_channel channel, const struct videomode *vm)
{
- if (!_dispc_mgr_size_ok(timings->x_res, timings->y_res))
+ if (!_dispc_mgr_size_ok(vm->hactive, vm->vactive))
return false;
- if (!_dispc_mgr_pclk_ok(channel, timings->pixelclock))
+ if (!_dispc_mgr_pclk_ok(channel, vm->pixelclock))
return false;
if (dss_mgr_is_lcd(channel)) {
/* TODO: OMAP4+ supports interlace for LCD outputs */
- if (timings->interlace)
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
return false;
- if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp,
- timings->hbp, timings->vsw, timings->vfp,
- timings->vbp))
+ if (!_dispc_lcd_timings_ok(vm->hsync_len,
+ vm->hfront_porch, vm->hback_porch,
+ vm->vsync_len, vm->vfront_porch,
+ vm->vback_porch))
return false;
}
return true;
}
-static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
- int hfp, int hbp, int vsw, int vfp, int vbp,
- enum omap_dss_signal_level vsync_level,
- enum omap_dss_signal_level hsync_level,
- enum omap_dss_signal_edge data_pclk_edge,
- enum omap_dss_signal_level de_level,
- enum omap_dss_signal_edge sync_pclk_edge)
-
+static void _dispc_mgr_set_lcd_timings(enum omap_channel channel,
+ const struct videomode *vm)
{
u32 timing_h, timing_v, l;
bool onoff, rf, ipc, vs, hs, de;
- timing_h = FLD_VAL(hsw-1, dispc.feat->sw_start, 0) |
- FLD_VAL(hfp-1, dispc.feat->fp_start, 8) |
- FLD_VAL(hbp-1, dispc.feat->bp_start, 20);
- timing_v = FLD_VAL(vsw-1, dispc.feat->sw_start, 0) |
- FLD_VAL(vfp, dispc.feat->fp_start, 8) |
- FLD_VAL(vbp, dispc.feat->bp_start, 20);
+ timing_h = FLD_VAL(vm->hsync_len - 1, dispc.feat->sw_start, 0) |
+ FLD_VAL(vm->hfront_porch - 1, dispc.feat->fp_start, 8) |
+ FLD_VAL(vm->hback_porch - 1, dispc.feat->bp_start, 20);
+ timing_v = FLD_VAL(vm->vsync_len - 1, dispc.feat->sw_start, 0) |
+ FLD_VAL(vm->vfront_porch, dispc.feat->fp_start, 8) |
+ FLD_VAL(vm->vback_porch, dispc.feat->bp_start, 20);
dispc_write_reg(DISPC_TIMING_H(channel), timing_h);
dispc_write_reg(DISPC_TIMING_V(channel), timing_v);
- switch (vsync_level) {
- case OMAPDSS_SIG_ACTIVE_LOW:
- vs = true;
- break;
- case OMAPDSS_SIG_ACTIVE_HIGH:
+ if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
vs = false;
- break;
- default:
- BUG();
- }
+ else
+ vs = true;
- switch (hsync_level) {
- case OMAPDSS_SIG_ACTIVE_LOW:
- hs = true;
- break;
- case OMAPDSS_SIG_ACTIVE_HIGH:
+ if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH)
hs = false;
- break;
- default:
- BUG();
- }
+ else
+ hs = true;
- switch (de_level) {
- case OMAPDSS_SIG_ACTIVE_LOW:
- de = true;
- break;
- case OMAPDSS_SIG_ACTIVE_HIGH:
+ if (vm->flags & DISPLAY_FLAGS_DE_HIGH)
de = false;
- break;
- default:
- BUG();
- }
+ else
+ de = true;
- switch (data_pclk_edge) {
- case OMAPDSS_DRIVE_SIG_RISING_EDGE:
+ if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
ipc = false;
- break;
- case OMAPDSS_DRIVE_SIG_FALLING_EDGE:
+ else
ipc = true;
- break;
- default:
- BUG();
- }
/* always use the 'rf' setting */
onoff = true;
- switch (sync_pclk_edge) {
- case OMAPDSS_DRIVE_SIG_FALLING_EDGE:
- rf = false;
- break;
- case OMAPDSS_DRIVE_SIG_RISING_EDGE:
+ if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE)
rf = true;
- break;
- default:
- BUG();
- }
+ else
+ rf = false;
l = FLD_VAL(onoff, 17, 17) |
FLD_VAL(rf, 16, 16) |
@@ -3253,13 +3219,13 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
/* change name to mode? */
void dispc_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings)
+ const struct videomode *vm)
{
unsigned xtot, ytot;
unsigned long ht, vt;
- struct omap_video_timings t = *timings;
+ struct videomode t = *vm;
- DSSDBG("channel %d xres %u yres %u\n", channel, t.x_res, t.y_res);
+ DSSDBG("channel %d xres %u yres %u\n", channel, t.hactive, t.vactive);
if (!dispc_mgr_timings_ok(channel, &t)) {
BUG();
@@ -3267,34 +3233,37 @@ void dispc_mgr_set_timings(enum omap_channel channel,
}
if (dss_mgr_is_lcd(channel)) {
- _dispc_mgr_set_lcd_timings(channel, t.hsw, t.hfp, t.hbp, t.vsw,
- t.vfp, t.vbp, t.vsync_level, t.hsync_level,
- t.data_pclk_edge, t.de_level, t.sync_pclk_edge);
+ _dispc_mgr_set_lcd_timings(channel, &t);
- xtot = t.x_res + t.hfp + t.hsw + t.hbp;
- ytot = t.y_res + t.vfp + t.vsw + t.vbp;
+ xtot = t.hactive + t.hfront_porch + t.hsync_len + t.hback_porch;
+ ytot = t.vactive + t.vfront_porch + t.vsync_len + t.vback_porch;
- ht = timings->pixelclock / xtot;
- vt = timings->pixelclock / xtot / ytot;
+ ht = vm->pixelclock / xtot;
+ vt = vm->pixelclock / xtot / ytot;
- DSSDBG("pck %u\n", timings->pixelclock);
- DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
- t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp);
+ DSSDBG("pck %lu\n", vm->pixelclock);
+ DSSDBG("hsync_len %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
+ t.hsync_len, t.hfront_porch, t.hback_porch,
+ t.vsync_len, t.vfront_porch, t.vback_porch);
DSSDBG("vsync_level %d hsync_level %d data_pclk_edge %d de_level %d sync_pclk_edge %d\n",
- t.vsync_level, t.hsync_level, t.data_pclk_edge,
- t.de_level, t.sync_pclk_edge);
+ !!(t.flags & DISPLAY_FLAGS_VSYNC_HIGH),
+ !!(t.flags & DISPLAY_FLAGS_HSYNC_HIGH),
+ !!(t.flags & DISPLAY_FLAGS_PIXDATA_POSEDGE),
+ !!(t.flags & DISPLAY_FLAGS_DE_HIGH),
+ !!(t.flags & DISPLAY_FLAGS_SYNC_POSEDGE));
DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
} else {
- if (t.interlace)
- t.y_res /= 2;
+ if (t.flags & DISPLAY_FLAGS_INTERLACED)
+ t.vactive /= 2;
if (dispc.feat->supports_double_pixel)
- REG_FLD_MOD(DISPC_CONTROL, t.double_pixel ? 1 : 0,
- 19, 17);
+ REG_FLD_MOD(DISPC_CONTROL,
+ !!(t.flags & DISPLAY_FLAGS_DOUBLECLK),
+ 19, 17);
}
- dispc_mgr_set_size(channel, t.x_res, t.y_res);
+ dispc_mgr_set_size(channel, t.hactive, t.vactive);
}
EXPORT_SYMBOL(dispc_mgr_set_timings);
@@ -4214,23 +4183,20 @@ EXPORT_SYMBOL(dispc_free_irq);
*/
static const struct dispc_errata_i734_data {
- struct omap_video_timings timings;
+ struct videomode vm;
struct omap_overlay_info ovli;
struct omap_overlay_manager_info mgri;
struct dss_lcd_mgr_config lcd_conf;
} i734 = {
- .timings = {
- .x_res = 8, .y_res = 1,
+ .vm = {
+ .hactive = 8, .vactive = 1,
.pixelclock = 16000000,
- .hsw = 8, .hfp = 4, .hbp = 4,
- .vsw = 1, .vfp = 1, .vbp = 1,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .interlace = false,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .double_pixel = false,
+ .hsync_len = 8, .hfront_porch = 4, .hback_porch = 4,
+ .vsync_len = 1, .vfront_porch = 1, .vback_porch = 1,
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE,
},
.ovli = {
.screen_width = 1,
@@ -4320,7 +4286,7 @@ static void dispc_errata_i734_wa(void)
/* Setup and enable GFX plane */
dispc_ovl_set_channel_out(OMAP_DSS_GFX, OMAP_DSS_CHANNEL_LCD);
- dispc_ovl_setup(OMAP_DSS_GFX, &ovli, false, &i734.timings, false);
+ dispc_ovl_setup(OMAP_DSS_GFX, &ovli, false, &i734.vm, false);
dispc_ovl_enable(OMAP_DSS_GFX, true);
/* Set up and enable display manager for LCD1 */
@@ -4328,7 +4294,7 @@ static void dispc_errata_i734_wa(void)
dispc_calc_clock_rates(dss_get_dispc_clk_rate(),
&lcd_conf.clock_info);
dispc_mgr_set_lcd_config(OMAP_DSS_CHANNEL_LCD, &lcd_conf);
- dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.timings);
+ dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.vm);
dispc_clear_irqstatus(framedone_irq);
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 8dcdd7cf9937..425a5a8dff8b 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -35,8 +35,8 @@
void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
- *xres = dssdev->panel.timings.x_res;
- *yres = dssdev->panel.timings.y_res;
+ *xres = dssdev->panel.vm.hactive;
+ *yres = dssdev->panel.vm.vactive;
}
EXPORT_SYMBOL(omapdss_default_get_resolution);
@@ -72,9 +72,9 @@ int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
EXPORT_SYMBOL(omapdss_default_get_recommended_bpp);
void omapdss_default_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = dssdev->panel.timings;
+ *vm = dssdev->panel.vm;
}
EXPORT_SYMBOL(omapdss_default_get_timings);
@@ -217,73 +217,3 @@ struct omap_dss_device *omap_dss_find_device(void *data,
return NULL;
}
EXPORT_SYMBOL(omap_dss_find_device);
-
-void videomode_to_omap_video_timings(const struct videomode *vm,
- struct omap_video_timings *ovt)
-{
- memset(ovt, 0, sizeof(*ovt));
-
- ovt->pixelclock = vm->pixelclock;
- ovt->x_res = vm->hactive;
- ovt->hbp = vm->hback_porch;
- ovt->hfp = vm->hfront_porch;
- ovt->hsw = vm->hsync_len;
- ovt->y_res = vm->vactive;
- ovt->vbp = vm->vback_porch;
- ovt->vfp = vm->vfront_porch;
- ovt->vsw = vm->vsync_len;
-
- ovt->vsync_level = vm->flags & DISPLAY_FLAGS_VSYNC_HIGH ?
- OMAPDSS_SIG_ACTIVE_HIGH :
- OMAPDSS_SIG_ACTIVE_LOW;
- ovt->hsync_level = vm->flags & DISPLAY_FLAGS_HSYNC_HIGH ?
- OMAPDSS_SIG_ACTIVE_HIGH :
- OMAPDSS_SIG_ACTIVE_LOW;
- ovt->de_level = vm->flags & DISPLAY_FLAGS_DE_HIGH ?
- OMAPDSS_SIG_ACTIVE_HIGH :
- OMAPDSS_SIG_ACTIVE_LOW;
- ovt->data_pclk_edge = vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE ?
- OMAPDSS_DRIVE_SIG_RISING_EDGE :
- OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-
- ovt->sync_pclk_edge = ovt->data_pclk_edge;
-}
-EXPORT_SYMBOL(videomode_to_omap_video_timings);
-
-void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
- struct videomode *vm)
-{
- memset(vm, 0, sizeof(*vm));
-
- vm->pixelclock = ovt->pixelclock;
-
- vm->hactive = ovt->x_res;
- vm->hback_porch = ovt->hbp;
- vm->hfront_porch = ovt->hfp;
- vm->hsync_len = ovt->hsw;
- vm->vactive = ovt->y_res;
- vm->vback_porch = ovt->vbp;
- vm->vfront_porch = ovt->vfp;
- vm->vsync_len = ovt->vsw;
-
- if (ovt->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- vm->flags |= DISPLAY_FLAGS_HSYNC_HIGH;
- else
- vm->flags |= DISPLAY_FLAGS_HSYNC_LOW;
-
- if (ovt->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- vm->flags |= DISPLAY_FLAGS_VSYNC_HIGH;
- else
- vm->flags |= DISPLAY_FLAGS_VSYNC_LOW;
-
- if (ovt->de_level == OMAPDSS_SIG_ACTIVE_HIGH)
- vm->flags |= DISPLAY_FLAGS_DE_HIGH;
- else
- vm->flags |= DISPLAY_FLAGS_DE_LOW;
-
- if (ovt->data_pclk_edge == OMAPDSS_DRIVE_SIG_RISING_EDGE)
- vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
- else
- vm->flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE;
-}
-EXPORT_SYMBOL(omap_video_timings_to_videomode);
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index b268295b76cf..e75162d26ac0 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -47,7 +47,7 @@ struct dpi_data {
struct mutex lock;
- struct omap_video_timings timings;
+ struct videomode vm;
struct dss_lcd_mgr_config mgr_config;
int data_lines;
@@ -333,31 +333,31 @@ static int dpi_set_mode(struct dpi_data *dpi)
{
struct omap_dss_device *out = &dpi->output;
enum omap_channel channel = out->dispc_channel;
- struct omap_video_timings *t = &dpi->timings;
+ struct videomode *vm = &dpi->vm;
int lck_div = 0, pck_div = 0;
unsigned long fck = 0;
unsigned long pck;
int r = 0;
if (dpi->pll)
- r = dpi_set_pll_clk(dpi, channel, t->pixelclock, &fck,
+ r = dpi_set_pll_clk(dpi, channel, vm->pixelclock, &fck,
&lck_div, &pck_div);
else
- r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck,
+ r = dpi_set_dispc_clk(dpi, vm->pixelclock, &fck,
&lck_div, &pck_div);
if (r)
return r;
pck = fck / lck_div / pck_div;
- if (pck != t->pixelclock) {
- DSSWARN("Could not find exact pixel clock. Requested %d Hz, got %lu Hz\n",
- t->pixelclock, pck);
+ if (pck != vm->pixelclock) {
+ DSSWARN("Could not find exact pixel clock. Requested %lu Hz, got %lu Hz\n",
+ vm->pixelclock, pck);
- t->pixelclock = pck;
+ vm->pixelclock = pck;
}
- dss_mgr_set_timings(channel, t);
+ dss_mgr_set_timings(channel, vm);
return 0;
}
@@ -476,7 +476,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
}
static void dpi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
@@ -484,25 +484,25 @@ static void dpi_set_timings(struct omap_dss_device *dssdev,
mutex_lock(&dpi->lock);
- dpi->timings = *timings;
+ dpi->vm = *vm;
mutex_unlock(&dpi->lock);
}
static void dpi_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
mutex_lock(&dpi->lock);
- *timings = dpi->timings;
+ *vm = dpi->vm;
mutex_unlock(&dpi->lock);
}
static int dpi_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
enum omap_channel channel = dpi->output.dispc_channel;
@@ -512,23 +512,23 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
struct dpi_clk_calc_ctx ctx;
bool ok;
- if (timings->x_res % 8 != 0)
+ if (vm->hactive % 8 != 0)
return -EINVAL;
- if (!dispc_mgr_timings_ok(channel, timings))
+ if (!dispc_mgr_timings_ok(channel, vm))
return -EINVAL;
- if (timings->pixelclock == 0)
+ if (vm->pixelclock == 0)
return -EINVAL;
if (dpi->pll) {
- ok = dpi_pll_clk_calc(dpi, timings->pixelclock, &ctx);
+ ok = dpi_pll_clk_calc(dpi, vm->pixelclock, &ctx);
if (!ok)
return -EINVAL;
fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
} else {
- ok = dpi_dss_clk_calc(timings->pixelclock, &ctx);
+ ok = dpi_dss_clk_calc(vm->pixelclock, &ctx);
if (!ok)
return -EINVAL;
@@ -540,7 +540,7 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
pck = fck / lck_div / pck_div;
- timings->pixelclock = pck;
+ vm->pixelclock = pck;
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index e1be5e795cd8..f060bda31235 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -289,7 +289,7 @@ struct dsi_clk_calc_ctx {
struct dss_pll_clock_info dsi_cinfo;
struct dispc_clock_info dispc_cinfo;
- struct omap_video_timings dispc_vm;
+ struct videomode vm;
struct omap_dss_dsi_videomode_timings dsi_vm;
};
@@ -383,7 +383,7 @@ struct dsi_data {
unsigned scp_clk_refcount;
struct dss_lcd_mgr_config mgr_config;
- struct omap_video_timings timings;
+ struct videomode vm;
enum omap_dss_dsi_pixel_format pix_fmt;
enum omap_dss_dsi_mode mode;
struct omap_dss_dsi_videomode_timings vm_timings;
@@ -3321,12 +3321,12 @@ static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
- struct omap_video_timings *timings = &dsi->timings;
+ struct videomode *vm = &dsi->vm;
/*
* Don't use line buffers if width is greater than the video
* port's line buffer size
*/
- if (dsi->line_buffer_size <= timings->x_res * bpp / 8)
+ if (dsi->line_buffer_size <= vm->hactive * bpp / 8)
num_line_buffers = 0;
else
num_line_buffers = 2;
@@ -3453,7 +3453,7 @@ static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
int tclk_trail, ths_exit, exiths_clk;
bool ddr_alwon;
- struct omap_video_timings *timings = &dsi->timings;
+ struct videomode *vm = &dsi->vm;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
int ndl = dsi->num_lanes_used - 1;
int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1;
@@ -3494,7 +3494,7 @@ static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
exiths_clk = ths_exit + tclk_trail;
- width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
+ width_bytes = DIV_ROUND_UP(vm->hactive * bpp, 8);
bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl);
if (!hsa_blanking_mode) {
@@ -3705,7 +3705,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
int vbp = dsi->vm_timings.vbp;
int window_sync = dsi->vm_timings.window_sync;
bool hsync_end;
- struct omap_video_timings *timings = &dsi->timings;
+ struct videomode *vm = &dsi->vm;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
int tl, t_he, width_bytes;
@@ -3713,7 +3713,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
t_he = hsync_end ?
((hsa == 0 && ndl == 3) ? 1 : DIV_ROUND_UP(4, ndl)) : 0;
- width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
+ width_bytes = DIV_ROUND_UP(vm->hactive * bpp, 8);
/* TL = t_HS + HSA + t_HE + HFP + ceil((WC + 6) / NDL) + HBP */
tl = DIV_ROUND_UP(4, ndl) + (hsync_end ? hsa : 0) + t_he + hfp +
@@ -3722,7 +3722,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
DSSDBG("HBP: %d, HFP: %d, HSA: %d, TL: %d TXBYTECLKHS\n", hbp,
hfp, hsync_end ? hsa : 0, tl);
DSSDBG("VBP: %d, VFP: %d, VSA: %d, VACT: %d lines\n", vbp, vfp,
- vsa, timings->y_res);
+ vsa, vm->vactive);
r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
r = FLD_MOD(r, hbp, 11, 0); /* HBP */
@@ -3738,7 +3738,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
dsi_write_reg(dsidev, DSI_VM_TIMING2, r);
r = dsi_read_reg(dsidev, DSI_VM_TIMING3);
- r = FLD_MOD(r, timings->y_res, 14, 0); /* VACT */
+ r = FLD_MOD(r, vm->vactive, 14, 0); /* VACT */
r = FLD_MOD(r, tl, 31, 16); /* TL */
dsi_write_reg(dsidev, DSI_VM_TIMING3, r);
}
@@ -3856,7 +3856,7 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
/* MODE, 1 = video mode */
REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4);
- word_count = DIV_ROUND_UP(dsi->timings.x_res * bpp, 8);
+ word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8);
dsi_vc_write_long_header(dsidev, channel, data_type,
word_count, 0);
@@ -3918,8 +3918,8 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
int r;
const unsigned channel = dsi->update_channel;
const unsigned line_buf_size = dsi->line_buffer_size;
- u16 w = dsi->timings.x_res;
- u16 h = dsi->timings.y_res;
+ u16 w = dsi->vm.hactive;
+ u16 h = dsi->vm.vactive;
DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
@@ -3969,7 +3969,7 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
msecs_to_jiffies(250));
BUG_ON(r == 0);
- dss_mgr_set_timings(dispc_channel, &dsi->timings);
+ dss_mgr_set_timings(dispc_channel, &dsi->vm);
dss_mgr_start_update(dispc_channel);
@@ -4056,8 +4056,8 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel,
dsi->framedone_callback = callback;
dsi->framedone_data = data;
- dw = dsi->timings.x_res;
- dh = dsi->timings.y_res;
+ dw = dsi->vm.hactive;
+ dh = dsi->vm.vactive;
#ifdef DSI_PERF_MEASURE
dsi->update_bytes = dw * dh *
@@ -4120,16 +4120,21 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
/*
* override interlace, logic level and edge related parameters in
- * omap_video_timings with default values
+ * videomode with default values
*/
- dsi->timings.interlace = false;
- dsi->timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- dsi->timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- dsi->timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- dsi->timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- dsi->timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-
- dss_mgr_set_timings(channel, &dsi->timings);
+ dsi->vm.flags &= ~DISPLAY_FLAGS_INTERLACED;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
+ dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
+
+ dss_mgr_set_timings(channel, &dsi->vm);
r = dsi_configure_dispc_clocks(dsidev);
if (r)
@@ -4331,7 +4336,7 @@ static void print_dsi_vm(const char *str,
wc = DIV_ROUND_UP(t->hact * t->bitspp, 8);
pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */
- bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp;
+ bl = t->hss + t->hsa + t->hse + t->hbp + t->hfront_porch;
tot = bl + pps;
#define TO_DSI_T(x) ((u32)div64_u64((u64)x * 1000000000llu, byteclk))
@@ -4340,14 +4345,14 @@ static void print_dsi_vm(const char *str,
"%u/%u/%u/%u/%u/%u = %u + %u = %u\n",
str,
byteclk,
- t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp,
+ t->hss, t->hsa, t->hse, t->hbp, pps, t->hfront_porch,
bl, pps, tot,
TO_DSI_T(t->hss),
TO_DSI_T(t->hsa),
TO_DSI_T(t->hse),
TO_DSI_T(t->hbp),
TO_DSI_T(pps),
- TO_DSI_T(t->hfp),
+ TO_DSI_T(t->hfront_porch),
TO_DSI_T(bl),
TO_DSI_T(pps),
@@ -4356,13 +4361,13 @@ static void print_dsi_vm(const char *str,
#undef TO_DSI_T
}
-static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
+static void print_dispc_vm(const char *str, const struct videomode *vm)
{
- unsigned long pck = t->pixelclock;
+ unsigned long pck = vm->pixelclock;
int hact, bl, tot;
- hact = t->x_res;
- bl = t->hsw + t->hbp + t->hfp;
+ hact = vm->hactive;
+ bl = vm->hsync_len + vm->hbp + vm->hfront_porch;
tot = hact + bl;
#define TO_DISPC_T(x) ((u32)div64_u64((u64)x * 1000000000llu, pck))
@@ -4371,12 +4376,12 @@ static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
"%u/%u/%u/%u = %u + %u = %u\n",
str,
pck,
- t->hsw, t->hbp, hact, t->hfp,
+ vm->hsync_len, vm->hbp, hact, vm->hfront_porch,
bl, hact, tot,
- TO_DISPC_T(t->hsw),
- TO_DISPC_T(t->hbp),
+ TO_DISPC_T(vm->hsync_len),
+ TO_DISPC_T(vm->hbp),
TO_DISPC_T(hact),
- TO_DISPC_T(t->hfp),
+ TO_DISPC_T(vm->hfront_porch),
TO_DISPC_T(bl),
TO_DISPC_T(hact),
TO_DISPC_T(tot));
@@ -4387,7 +4392,7 @@ static void print_dispc_vm(const char *str, const struct omap_video_timings *t)
static void print_dsi_dispc_vm(const char *str,
const struct omap_dss_dsi_videomode_timings *t)
{
- struct omap_video_timings vm = { 0 };
+ struct videomode vm = { 0 };
unsigned long byteclk = t->hsclk / 4;
unsigned long pck;
u64 dsi_tput;
@@ -4396,13 +4401,13 @@ static void print_dsi_dispc_vm(const char *str,
dsi_tput = (u64)byteclk * t->ndl * 8;
pck = (u32)div64_u64(dsi_tput, t->bitspp);
dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl);
- dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp;
+ dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfront_porch;
vm.pixelclock = pck;
- vm.hsw = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
+ vm.hsync_len = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
vm.hbp = div64_u64((u64)t->hbp * pck, byteclk);
- vm.hfp = div64_u64((u64)t->hfp * pck, byteclk);
- vm.x_res = t->hact;
+ vm.hfront_porch = div64_u64((u64)t->hfront_porch * pck, byteclk);
+ vm.hactive = t->hact;
print_dispc_vm(str, &vm);
}
@@ -4412,19 +4417,19 @@ static bool dsi_cm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
unsigned long pck, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
- struct omap_video_timings *t = &ctx->dispc_vm;
+ struct videomode *vm = &ctx->vm;
ctx->dispc_cinfo.lck_div = lckd;
ctx->dispc_cinfo.pck_div = pckd;
ctx->dispc_cinfo.lck = lck;
ctx->dispc_cinfo.pck = pck;
- *t = *ctx->config->timings;
- t->pixelclock = pck;
- t->x_res = ctx->config->timings->x_res;
- t->y_res = ctx->config->timings->y_res;
- t->hsw = t->hfp = t->hbp = t->vsw = 1;
- t->vfp = t->vbp = 0;
+ *vm = *ctx->config->vm;
+ vm->pixelclock = pck;
+ vm->hactive = ctx->config->vm->hactive;
+ vm->vactive = ctx->config->vm->vactive;
+ vm->hsync_len = vm->hfront_porch = vm->hback_porch = vm->vsync_len = 1;
+ vm->vfront_porch = vm->vback_porch = 0;
return true;
}
@@ -4475,7 +4480,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
* especially as we go to LP between each pixel packet due to HW
* "feature". So let's just estimate very roughly and multiply by 1.5.
*/
- pck = cfg->timings->pixelclock;
+ pck = cfg->vm->pixelclock;
pck = pck * 3 / 2;
txbyteclk = pck * bitspp / 8 / ndl;
@@ -4510,14 +4515,14 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
int dispc_htot, dispc_hbl; /* pixels */
int dsi_htot, dsi_hact, dsi_hbl, hss, hse; /* byteclks */
int hfp, hsa, hbp;
- const struct omap_video_timings *req_vm;
- struct omap_video_timings *dispc_vm;
+ const struct videomode *req_vm;
+ struct videomode *dispc_vm;
struct omap_dss_dsi_videomode_timings *dsi_vm;
u64 dsi_tput, dispc_tput;
dsi_tput = (u64)byteclk * ndl * 8;
- req_vm = cfg->timings;
+ req_vm = cfg->vm;
req_pck_min = ctx->req_pck_min;
req_pck_max = ctx->req_pck_max;
req_pck_nom = ctx->req_pck_nom;
@@ -4525,9 +4530,10 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
dispc_pck = ctx->dispc_cinfo.pck;
dispc_tput = (u64)dispc_pck * bitspp;
- xres = req_vm->x_res;
+ xres = req_vm->hactive;
- panel_hbl = req_vm->hfp + req_vm->hbp + req_vm->hsw;
+ panel_hbl = req_vm->hfront_porch + req_vm->hback_porch +
+ req_vm->hsync_len;
panel_htot = xres + panel_hbl;
dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(xres * bitspp, 8) + 6, ndl);
@@ -4557,7 +4563,7 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
hss = DIV_ROUND_UP(4, ndl);
if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
- if (ndl == 3 && req_vm->hsw == 0)
+ if (ndl == 3 && req_vm->hsync_len == 0)
hse = 1;
else
hse = DIV_ROUND_UP(4, ndl);
@@ -4596,14 +4602,14 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
if (cfg->trans_mode != OMAP_DSS_DSI_PULSE_MODE) {
hsa = 0;
- } else if (ndl == 3 && req_vm->hsw == 0) {
+ } else if (ndl == 3 && req_vm->hsync_len == 0) {
hsa = 0;
} else {
- hsa = div64_u64((u64)req_vm->hsw * byteclk, req_pck_nom);
+ hsa = div64_u64((u64)req_vm->hsync_len * byteclk, req_pck_nom);
hsa = max(hsa - hse, 1);
}
- hbp = div64_u64((u64)req_vm->hbp * byteclk, req_pck_nom);
+ hbp = div64_u64((u64)req_vm->hback_porch * byteclk, req_pck_nom);
hbp = max(hbp, 1);
hfp = dsi_hbl - (hss + hsa + hse + hbp);
@@ -4633,10 +4639,10 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
dsi_vm->hact = xres;
dsi_vm->hfp = hfp;
- dsi_vm->vsa = req_vm->vsw;
- dsi_vm->vbp = req_vm->vbp;
- dsi_vm->vact = req_vm->y_res;
- dsi_vm->vfp = req_vm->vfp;
+ dsi_vm->vsa = req_vm->vsync_len;
+ dsi_vm->vbp = req_vm->vback_porch;
+ dsi_vm->vact = req_vm->vactive;
+ dsi_vm->vfp = req_vm->vfront_porch;
dsi_vm->trans_mode = cfg->trans_mode;
@@ -4650,19 +4656,19 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
/* setup DISPC videomode */
- dispc_vm = &ctx->dispc_vm;
+ dispc_vm = &ctx->vm;
*dispc_vm = *req_vm;
dispc_vm->pixelclock = dispc_pck;
if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
- hsa = div64_u64((u64)req_vm->hsw * dispc_pck,
+ hsa = div64_u64((u64)req_vm->hsync_len * dispc_pck,
req_pck_nom);
hsa = max(hsa, 1);
} else {
hsa = 1;
}
- hbp = div64_u64((u64)req_vm->hbp * dispc_pck, req_pck_nom);
+ hbp = div64_u64((u64)req_vm->hback_porch * dispc_pck, req_pck_nom);
hbp = max(hbp, 1);
hfp = dispc_hbl - hsa - hbp;
@@ -4685,9 +4691,9 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
if (hfp < 1)
return false;
- dispc_vm->hfp = hfp;
- dispc_vm->hsw = hsa;
- dispc_vm->hbp = hbp;
+ dispc_vm->hfront_porch = hfp;
+ dispc_vm->hsync_len = hsa;
+ dispc_vm->hback_porch = hbp;
return true;
}
@@ -4707,9 +4713,9 @@ static bool dsi_vm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
return false;
#ifdef PRINT_VERBOSE_VM_TIMINGS
- print_dispc_vm("dispc", &ctx->dispc_vm);
+ print_dispc_vm("dispc", &ctx->vm);
print_dsi_vm("dsi ", &ctx->dsi_vm);
- print_dispc_vm("req ", ctx->config->timings);
+ print_dispc_vm("req ", ctx->config->vm);
print_dsi_dispc_vm("act ", &ctx->dsi_vm);
#endif
@@ -4758,7 +4764,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
const struct omap_dss_dsi_config *cfg,
struct dsi_clk_calc_ctx *ctx)
{
- const struct omap_video_timings *t = cfg->timings;
+ const struct videomode *vm = cfg->vm;
unsigned long clkin;
unsigned long pll_min;
unsigned long pll_max;
@@ -4774,9 +4780,9 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
ctx->config = cfg;
/* these limits should come from the panel driver */
- ctx->req_pck_min = t->pixelclock - 1000;
- ctx->req_pck_nom = t->pixelclock;
- ctx->req_pck_max = t->pixelclock + 1000;
+ ctx->req_pck_min = vm->pixelclock - 1000;
+ ctx->req_pck_nom = vm->pixelclock;
+ ctx->req_pck_max = vm->pixelclock + 1000;
byteclk_min = div64_u64((u64)ctx->req_pck_min * bitspp, ndl * 8);
pll_min = max(cfg->hs_clk_min * 4, byteclk_min * 4 * 4);
@@ -4833,7 +4839,7 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
dsi->user_dsi_cinfo = ctx.dsi_cinfo;
dsi->user_dispc_cinfo = ctx.dispc_cinfo;
- dsi->timings = ctx.dispc_vm;
+ dsi->vm = ctx.vm;
dsi->vm_timings = ctx.dsi_vm;
mutex_unlock(&dsi->lock);
@@ -5342,7 +5348,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->phy_base = devm_ioremap(&dsidev->dev, res->start,
resource_size(res));
- if (!dsi->proto_base) {
+ if (!dsi->phy_base) {
DSSERR("can't ioremap DSI PHY\n");
return -ENOMEM;
}
@@ -5362,7 +5368,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->pll_base = devm_ioremap(&dsidev->dev, res->start,
resource_size(res));
- if (!dsi->proto_base) {
+ if (!dsi->pll_base) {
DSSERR("can't ioremap DSI PLL\n");
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 4fd06dc41cb3..56493b290731 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -366,8 +366,7 @@ bool dispc_div_calc(unsigned long dispc,
unsigned long pck_min, unsigned long pck_max,
dispc_div_calc_func func, void *data);
-bool dispc_mgr_timings_ok(enum omap_channel channel,
- const struct omap_video_timings *timings);
+bool dispc_mgr_timings_ok(enum omap_channel channel, const struct videomode *vm);
int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
struct dispc_clock_info *cinfo);
@@ -390,7 +389,7 @@ void dispc_wb_enable(bool enable);
bool dispc_wb_is_enabled(void);
void dispc_wb_set_channel_in(enum dss_writeback_channel channel);
int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct omap_video_timings *timings);
+ bool mem_to_mem, const struct videomode *vm);
/* VENC */
int venc_init_platform_driver(void) __init;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index 63e711545865..fb6cccd02374 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -181,7 +181,7 @@ struct hdmi_video_format {
};
struct hdmi_config {
- struct omap_video_timings timings;
+ struct videomode vm;
struct hdmi_avi_infoframe infoframe;
enum hdmi_core_hdmi_dvi hdmi_dvi_mode;
};
@@ -298,11 +298,11 @@ int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val);
void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
struct hdmi_video_format *video_fmt);
void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
- struct omap_video_timings *timings, struct hdmi_config *param);
+ struct videomode *vm, struct hdmi_config *param);
int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp);
phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index cbd28dfdb86a..e7162c16de2e 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -155,7 +155,7 @@ static void hdmi_power_off_core(struct omap_dss_device *dssdev)
static int hdmi_power_on_full(struct omap_dss_device *dssdev)
{
int r;
- struct omap_video_timings *p;
+ struct videomode *vm;
enum omap_channel channel = dssdev->dispc_channel;
struct hdmi_wp_data *wp = &hdmi.wp;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
@@ -169,12 +169,13 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi_wp_clear_irqenable(wp, 0xffffffff);
hdmi_wp_set_irqstatus(wp, 0xffffffff);
- p = &hdmi.cfg.timings;
+ vm = &hdmi.cfg.vm;
- DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
+ DSSDBG("hdmi_power_on hactive= %d vactive = %d\n", vm->hactive,
+ vm->vactive);
- pc = p->pixelclock;
- if (p->double_pixel)
+ pc = vm->pixelclock;
+ if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
pc *= 2;
/* DSS_HDMI_TCLK is bitclk / 10 */
@@ -209,7 +210,7 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
/* tv size */
- dss_mgr_set_timings(channel, p);
+ dss_mgr_set_timings(channel, vm);
r = dss_mgr_enable(channel);
if (r)
@@ -255,30 +256,30 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
}
static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- if (!dispc_mgr_timings_ok(dssdev->dispc_channel, timings))
+ if (!dispc_mgr_timings_ok(dssdev->dispc_channel, vm))
return -EINVAL;
return 0;
}
static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
mutex_lock(&hdmi.lock);
- hdmi.cfg.timings = *timings;
+ hdmi.cfg.vm = *vm;
- dispc_set_tv_pclk(timings->pixelclock);
+ dispc_set_tv_pclk(vm->pixelclock);
mutex_unlock(&hdmi.lock);
}
static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = hdmi.cfg.timings;
+ *vm = hdmi.cfg.vm;
}
static void hdmi_dump_regs(struct seq_file *s)
@@ -352,7 +353,7 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
if (hdmi.audio_configured) {
r = hdmi4_audio_config(&hdmi.core, &hdmi.wp, &hdmi.audio_config,
- hdmi.cfg.timings.pixelclock);
+ hdmi.cfg.vm.pixelclock);
if (r) {
DSSERR("Error restoring audio configuration: %d", r);
hdmi.audio_abort_cb(&hdmi.pdev->dev);
@@ -643,7 +644,7 @@ static int hdmi_audio_config(struct device *dev,
}
ret = hdmi4_audio_config(&hd->core, &hd->wp, dss_audio,
- hd->cfg.timings.pixelclock);
+ hd->cfg.vm.pixelclock);
if (!ret) {
hd->audio_configured = true;
hd->audio_config = *dss_audio;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index ef3afe99e487..e05b7ac4f7dd 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -310,7 +310,7 @@ void hdmi4_configure(struct hdmi_core_data *core,
struct hdmi_wp_data *wp, struct hdmi_config *cfg)
{
/* HDMI */
- struct omap_video_timings video_timing;
+ struct videomode vm;
struct hdmi_video_format video_format;
/* HDMI core */
struct hdmi_core_video_config v_core_cfg;
@@ -318,16 +318,16 @@ void hdmi4_configure(struct hdmi_core_data *core,
hdmi_core_init(&v_core_cfg);
- hdmi_wp_init_vid_fmt_timings(&video_format, &video_timing, cfg);
+ hdmi_wp_init_vid_fmt_timings(&video_format, &vm, cfg);
- hdmi_wp_video_config_timing(wp, &video_timing);
+ hdmi_wp_video_config_timing(wp, &vm);
/* video config */
video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
hdmi_wp_video_config_format(wp, &video_format);
- hdmi_wp_video_config_interface(wp, &video_timing);
+ hdmi_wp_video_config_interface(wp, &vm);
/*
* configure core video part
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 0c0a5139a301..678dfb02764a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -172,7 +172,7 @@ static void hdmi_power_off_core(struct omap_dss_device *dssdev)
static int hdmi_power_on_full(struct omap_dss_device *dssdev)
{
int r;
- struct omap_video_timings *p;
+ struct videomode *vm;
enum omap_channel channel = dssdev->dispc_channel;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
unsigned pc;
@@ -181,12 +181,13 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
if (r)
return r;
- p = &hdmi.cfg.timings;
+ vm = &hdmi.cfg.vm;
- DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
+ DSSDBG("hdmi_power_on hactive= %d vactive = %d\n", vm->hactive,
+ vm->vactive);
- pc = p->pixelclock;
- if (p->double_pixel)
+ pc = vm->pixelclock;
+ if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
pc *= 2;
/* DSS_HDMI_TCLK is bitclk / 10 */
@@ -226,7 +227,7 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
/* tv size */
- dss_mgr_set_timings(channel, p);
+ dss_mgr_set_timings(channel, vm);
r = dss_mgr_enable(channel);
if (r)
@@ -272,30 +273,30 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
}
static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- if (!dispc_mgr_timings_ok(dssdev->dispc_channel, timings))
+ if (!dispc_mgr_timings_ok(dssdev->dispc_channel, vm))
return -EINVAL;
return 0;
}
static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
mutex_lock(&hdmi.lock);
- hdmi.cfg.timings = *timings;
+ hdmi.cfg.vm = *vm;
- dispc_set_tv_pclk(timings->pixelclock);
+ dispc_set_tv_pclk(vm->pixelclock);
mutex_unlock(&hdmi.lock);
}
static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = hdmi.cfg.timings;
+ *vm = hdmi.cfg.vm;
}
static void hdmi_dump_regs(struct seq_file *s)
@@ -378,7 +379,7 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
if (hdmi.audio_configured) {
r = hdmi5_audio_config(&hdmi.core, &hdmi.wp, &hdmi.audio_config,
- hdmi.cfg.timings.pixelclock);
+ hdmi.cfg.vm.pixelclock);
if (r) {
DSSERR("Error restoring audio configuration: %d", r);
hdmi.audio_abort_cb(&hdmi.pdev->dev);
@@ -669,7 +670,7 @@ static int hdmi_audio_config(struct device *dev,
}
ret = hdmi5_audio_config(&hd->core, &hd->wp, dss_audio,
- hd->cfg.timings.pixelclock);
+ hd->cfg.vm.pixelclock);
if (!ret) {
hd->audio_configured = true;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 8ab2093daa12..8de1d7b2ae55 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -292,35 +292,35 @@ static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg,
{
DSSDBG("hdmi_core_init\n");
- video_cfg->v_fc_config.timings = cfg->timings;
+ video_cfg->v_fc_config.vm = cfg->vm;
/* video core */
video_cfg->data_enable_pol = 1; /* It is always 1*/
- video_cfg->hblank = cfg->timings.hfp +
- cfg->timings.hbp + cfg->timings.hsw;
+ video_cfg->hblank = cfg->vm.hfront_porch +
+ cfg->vm.hback_porch + cfg->vm.hsync_len;
video_cfg->vblank_osc = 0;
- video_cfg->vblank = cfg->timings.vsw +
- cfg->timings.vfp + cfg->timings.vbp;
+ video_cfg->vblank = cfg->vm.vsync_len + cfg->vm.vfront_porch +
+ cfg->vm.vback_porch;
video_cfg->v_fc_config.hdmi_dvi_mode = cfg->hdmi_dvi_mode;
- if (cfg->timings.interlace) {
+ if (cfg->vm.flags & DISPLAY_FLAGS_INTERLACED) {
/* set vblank_osc if vblank is fractional */
if (video_cfg->vblank % 2 != 0)
video_cfg->vblank_osc = 1;
- video_cfg->v_fc_config.timings.y_res /= 2;
+ video_cfg->v_fc_config.vm.vactive /= 2;
video_cfg->vblank /= 2;
- video_cfg->v_fc_config.timings.vfp /= 2;
- video_cfg->v_fc_config.timings.vsw /= 2;
- video_cfg->v_fc_config.timings.vbp /= 2;
+ video_cfg->v_fc_config.vm.vfront_porch /= 2;
+ video_cfg->v_fc_config.vm.vsync_len /= 2;
+ video_cfg->v_fc_config.vm.vback_porch /= 2;
}
- if (cfg->timings.double_pixel) {
- video_cfg->v_fc_config.timings.x_res *= 2;
+ if (cfg->vm.flags & DISPLAY_FLAGS_DOUBLECLK) {
+ video_cfg->v_fc_config.vm.hactive *= 2;
video_cfg->hblank *= 2;
- video_cfg->v_fc_config.timings.hfp *= 2;
- video_cfg->v_fc_config.timings.hsw *= 2;
- video_cfg->v_fc_config.timings.hbp *= 2;
+ video_cfg->v_fc_config.vm.hfront_porch *= 2;
+ video_cfg->v_fc_config.vm.hsync_len *= 2;
+ video_cfg->v_fc_config.vm.hback_porch *= 2;
}
}
@@ -329,13 +329,12 @@ static void hdmi_core_video_config(struct hdmi_core_data *core,
struct hdmi_core_vid_config *cfg)
{
void __iomem *base = core->base;
+ struct videomode *vm = &cfg->v_fc_config.vm;
unsigned char r = 0;
bool vsync_pol, hsync_pol;
- vsync_pol =
- cfg->v_fc_config.timings.vsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
- hsync_pol =
- cfg->v_fc_config.timings.hsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
+ vsync_pol = !!(vm->flags & DISPLAY_FLAGS_VSYNC_HIGH);
+ hsync_pol = !!(vm->flags & DISPLAY_FLAGS_HSYNC_HIGH);
/* Set hsync, vsync and data-enable polarity */
r = hdmi_read_reg(base, HDMI_CORE_FC_INVIDCONF);
@@ -343,20 +342,16 @@ static void hdmi_core_video_config(struct hdmi_core_data *core,
r = FLD_MOD(r, hsync_pol, 5, 5);
r = FLD_MOD(r, cfg->data_enable_pol, 4, 4);
r = FLD_MOD(r, cfg->vblank_osc, 1, 1);
- r = FLD_MOD(r, cfg->v_fc_config.timings.interlace, 0, 0);
+ r = FLD_MOD(r, !!(vm->flags & DISPLAY_FLAGS_INTERLACED), 0, 0);
hdmi_write_reg(base, HDMI_CORE_FC_INVIDCONF, r);
/* set x resolution */
- REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV1,
- cfg->v_fc_config.timings.x_res >> 8, 4, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV0,
- cfg->v_fc_config.timings.x_res & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV1, vm->hactive >> 8, 4, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV0, vm->hactive & 0xFF, 7, 0);
/* set y resolution */
- REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV1,
- cfg->v_fc_config.timings.y_res >> 8, 4, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV0,
- cfg->v_fc_config.timings.y_res & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV1, vm->vactive >> 8, 4, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV0, vm->vactive & 0xFF, 7, 0);
/* set horizontal blanking pixels */
REG_FLD_MOD(base, HDMI_CORE_FC_INHBLANK1, cfg->hblank >> 8, 4, 0);
@@ -366,30 +361,28 @@ static void hdmi_core_video_config(struct hdmi_core_data *core,
REG_FLD_MOD(base, HDMI_CORE_FC_INVBLANK, cfg->vblank, 7, 0);
/* set horizontal sync offset */
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY1,
- cfg->v_fc_config.timings.hfp >> 8, 4, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY0,
- cfg->v_fc_config.timings.hfp & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY1, vm->hfront_porch >> 8,
+ 4, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY0, vm->hfront_porch & 0xFF,
+ 7, 0);
/* set vertical sync offset */
- REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINDELAY,
- cfg->v_fc_config.timings.vfp, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINDELAY, vm->vfront_porch, 7, 0);
/* set horizontal sync pulse width */
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH1,
- (cfg->v_fc_config.timings.hsw >> 8), 1, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH0,
- cfg->v_fc_config.timings.hsw & 0xFF, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH1, (vm->hsync_len >> 8),
+ 1, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH0, vm->hsync_len & 0xFF,
+ 7, 0);
/* set vertical sync pulse width */
- REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINWIDTH,
- cfg->v_fc_config.timings.vsw, 5, 0);
+ REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINWIDTH, vm->vsync_len, 5, 0);
/* select DVI mode */
REG_FLD_MOD(base, HDMI_CORE_FC_INVIDCONF,
- cfg->v_fc_config.hdmi_dvi_mode, 3, 3);
+ cfg->v_fc_config.hdmi_dvi_mode, 3, 3);
- if (cfg->v_fc_config.timings.double_pixel)
+ if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, 2, 7, 4);
else
REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, 1, 7, 4);
@@ -616,7 +609,7 @@ int hdmi5_core_handle_irqs(struct hdmi_core_data *core)
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg)
{
- struct omap_video_timings video_timing;
+ struct videomode vm;
struct hdmi_video_format video_format;
struct hdmi_core_vid_config v_core_cfg;
@@ -624,16 +617,16 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
hdmi_core_init(&v_core_cfg, cfg);
- hdmi_wp_init_vid_fmt_timings(&video_format, &video_timing, cfg);
+ hdmi_wp_init_vid_fmt_timings(&video_format, &vm, cfg);
- hdmi_wp_video_config_timing(wp, &video_timing);
+ hdmi_wp_video_config_timing(wp, &vm);
/* video config */
video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422;
hdmi_wp_video_config_format(wp, &video_format);
- hdmi_wp_video_config_interface(wp, &video_timing);
+ hdmi_wp_video_config_interface(wp, &vm);
/* support limited range with 24 bit color depth for now */
hdmi_core_configure_range(core);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 203694a52d18..b783d5a0750e 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -144,87 +144,84 @@ void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
}
void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
u32 r;
bool vsync_pol, hsync_pol;
DSSDBG("Enter hdmi_wp_video_config_interface\n");
- vsync_pol = timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
- hsync_pol = timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
+ vsync_pol = !!(vm->flags & DISPLAY_FLAGS_VSYNC_HIGH);
+ hsync_pol = !!(vm->flags & DISPLAY_FLAGS_HSYNC_HIGH);
r = hdmi_read_reg(wp->base, HDMI_WP_VIDEO_CFG);
r = FLD_MOD(r, vsync_pol, 7, 7);
r = FLD_MOD(r, hsync_pol, 6, 6);
- r = FLD_MOD(r, timings->interlace, 3, 3);
+ r = FLD_MOD(r, !!(vm->flags & DISPLAY_FLAGS_INTERLACED), 3, 3);
r = FLD_MOD(r, 1, 1, 0); /* HDMI_TIMING_MASTER_24BIT */
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_CFG, r);
}
void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
u32 timing_h = 0;
u32 timing_v = 0;
- unsigned hsw_offset = 1;
+ unsigned hsync_len_offset = 1;
DSSDBG("Enter hdmi_wp_video_config_timing\n");
/*
* On OMAP4 and OMAP5 ES1 the HSW field is programmed as is. On OMAP5
- * ES2+ (including DRA7/AM5 SoCs) HSW field is programmed to hsw-1.
+ * ES2+ (including DRA7/AM5 SoCs) HSW field is programmed to hsync_len-1.
* However, we don't support OMAP5 ES1 at all, so we can just check for
* OMAP4 here.
*/
if (omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES1 ||
omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES2 ||
omapdss_get_version() == OMAPDSS_VER_OMAP4)
- hsw_offset = 0;
+ hsync_len_offset = 0;
- timing_h |= FLD_VAL(timings->hbp, 31, 20);
- timing_h |= FLD_VAL(timings->hfp, 19, 8);
- timing_h |= FLD_VAL(timings->hsw - hsw_offset, 7, 0);
+ timing_h |= FLD_VAL(vm->hback_porch, 31, 20);
+ timing_h |= FLD_VAL(vm->hfront_porch, 19, 8);
+ timing_h |= FLD_VAL(vm->hsync_len - hsync_len_offset, 7, 0);
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_H, timing_h);
- timing_v |= FLD_VAL(timings->vbp, 31, 20);
- timing_v |= FLD_VAL(timings->vfp, 19, 8);
- timing_v |= FLD_VAL(timings->vsw, 7, 0);
+ timing_v |= FLD_VAL(vm->vback_porch, 31, 20);
+ timing_v |= FLD_VAL(vm->vfront_porch, 19, 8);
+ timing_v |= FLD_VAL(vm->vsync_len, 7, 0);
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_V, timing_v);
}
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
- struct omap_video_timings *timings, struct hdmi_config *param)
+ struct videomode *vm, struct hdmi_config *param)
{
DSSDBG("Enter hdmi_wp_video_init_format\n");
video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
- video_fmt->y_res = param->timings.y_res;
- video_fmt->x_res = param->timings.x_res;
-
- timings->hbp = param->timings.hbp;
- timings->hfp = param->timings.hfp;
- timings->hsw = param->timings.hsw;
- timings->vbp = param->timings.vbp;
- timings->vfp = param->timings.vfp;
- timings->vsw = param->timings.vsw;
-
- timings->vsync_level = param->timings.vsync_level;
- timings->hsync_level = param->timings.hsync_level;
- timings->interlace = param->timings.interlace;
- timings->double_pixel = param->timings.double_pixel;
-
- if (param->timings.interlace) {
+ video_fmt->y_res = param->vm.vactive;
+ video_fmt->x_res = param->vm.hactive;
+
+ vm->hback_porch = param->vm.hback_porch;
+ vm->hfront_porch = param->vm.hfront_porch;
+ vm->hsync_len = param->vm.hsync_len;
+ vm->vback_porch = param->vm.vback_porch;
+ vm->vfront_porch = param->vm.vfront_porch;
+ vm->vsync_len = param->vm.vsync_len;
+
+ vm->flags = param->vm.flags;
+
+ if (param->vm.flags & DISPLAY_FLAGS_INTERLACED) {
video_fmt->y_res /= 2;
- timings->vbp /= 2;
- timings->vfp /= 2;
- timings->vsw /= 2;
+ vm->vback_porch /= 2;
+ vm->vfront_porch /= 2;
+ vm->vsync_len /= 2;
}
- if (param->timings.double_pixel) {
+ if (param->vm.flags & DISPLAY_FLAGS_DOUBLECLK) {
video_fmt->x_res *= 2;
- timings->hfp *= 2;
- timings->hsw *= 2;
- timings->hbp *= 2;
+ vm->hfront_porch *= 2;
+ vm->hsync_len *= 2;
+ vm->hback_porch *= 2;
}
}
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 6eaf1adbd606..b420dde8c0fb 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -290,7 +290,7 @@ struct omap_dss_dsi_videomode_timings {
struct omap_dss_dsi_config {
enum omap_dss_dsi_mode mode;
enum omap_dss_dsi_pixel_format pixel_format;
- const struct omap_video_timings *timings;
+ const struct videomode *vm;
unsigned long hs_clk_min, hs_clk_max;
unsigned long lp_clk_min, lp_clk_max;
@@ -299,48 +299,12 @@ struct omap_dss_dsi_config {
enum omap_dss_dsi_trans_mode trans_mode;
};
-struct omap_video_timings {
- /* Unit: pixels */
- u16 x_res;
- /* Unit: pixels */
- u16 y_res;
- /* Unit: Hz */
- u32 pixelclock;
- /* Unit: pixel clocks */
- u16 hsw; /* Horizontal synchronization pulse width */
- /* Unit: pixel clocks */
- u16 hfp; /* Horizontal front porch */
- /* Unit: pixel clocks */
- u16 hbp; /* Horizontal back porch */
- /* Unit: line clocks */
- u16 vsw; /* Vertical synchronization pulse width */
- /* Unit: line clocks */
- u16 vfp; /* Vertical front porch */
- /* Unit: line clocks */
- u16 vbp; /* Vertical back porch */
-
- /* Vsync logic level */
- enum omap_dss_signal_level vsync_level;
- /* Hsync logic level */
- enum omap_dss_signal_level hsync_level;
- /* Interlaced or Progressive timings */
- bool interlace;
- /* Pixel clock edge to drive LCD data */
- enum omap_dss_signal_edge data_pclk_edge;
- /* Data enable logic level */
- enum omap_dss_signal_level de_level;
- /* Pixel clock edges to drive HSYNC and VSYNC signals */
- enum omap_dss_signal_edge sync_pclk_edge;
-
- bool double_pixel;
-};
-
-/* Hardcoded timings for tv modes. Venc only uses these to
+/* Hardcoded videomodes for tv. Venc only uses these to
* identify the mode, and does not actually use the configs
* itself. However, the configs should be something that
* a normal monitor can also show */
-extern const struct omap_video_timings omap_dss_pal_timings;
-extern const struct omap_video_timings omap_dss_ntsc_timings;
+extern const struct videomode omap_dss_pal_vm;
+extern const struct videomode omap_dss_ntsc_vm;
struct omap_dss_cpr_coefs {
s16 rr, rg, rb;
@@ -502,11 +466,11 @@ struct omapdss_dpi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_data_lines)(struct omap_dss_device *dssdev, int data_lines);
};
@@ -521,11 +485,11 @@ struct omapdss_sdi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_datapairs)(struct omap_dss_device *dssdev, int datapairs);
};
@@ -540,11 +504,11 @@ struct omapdss_dvi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
};
struct omapdss_atv_ops {
@@ -557,11 +521,11 @@ struct omapdss_atv_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_type)(struct omap_dss_device *dssdev,
enum omap_dss_venc_type type);
@@ -582,11 +546,11 @@ struct omapdss_hdmi_ops {
void (*disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
bool (*detect)(struct omap_dss_device *dssdev);
@@ -692,7 +656,7 @@ struct omap_dss_device {
} phy;
struct {
- struct omap_video_timings timings;
+ struct videomode vm;
enum omap_dss_dsi_pixel_format dsi_pix_fmt;
enum omap_dss_dsi_mode dsi_mode;
@@ -785,11 +749,11 @@ struct omap_dss_driver {
int (*get_recommended_bpp)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
u32 (*get_wss)(struct omap_dss_device *dssdev);
@@ -819,11 +783,6 @@ struct omap_dss_device *omap_dss_find_device(void *data,
int (*match)(struct omap_dss_device *dssdev, void *data));
const char *omapdss_get_default_display_name(void);
-void videomode_to_omap_video_timings(const struct videomode *vm,
- struct omap_video_timings *ovt);
-void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
- struct videomode *vm);
-
int dss_feat_get_num_mgrs(void);
int dss_feat_get_num_ovls(void);
enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
@@ -852,7 +811,7 @@ void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres);
int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
void omapdss_default_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
+ struct videomode *vm);
typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
@@ -906,7 +865,7 @@ void dispc_mgr_go(enum omap_channel channel);
void dispc_mgr_set_lcd_config(enum omap_channel channel,
const struct dss_lcd_mgr_config *config);
void dispc_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings);
+ const struct videomode *vm);
void dispc_mgr_setup(enum omap_channel channel,
const struct omap_overlay_manager_info *info);
u32 dispc_mgr_gamma_size(enum omap_channel channel);
@@ -919,8 +878,7 @@ bool dispc_ovl_enabled(enum omap_plane plane);
void dispc_ovl_set_channel_out(enum omap_plane plane,
enum omap_channel channel);
int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
- bool replication, const struct omap_video_timings *mgr_timings,
- bool mem_to_mem);
+ bool replication, const struct videomode *vm, bool mem_to_mem);
enum omap_dss_output_id dispc_mgr_get_supported_outputs(enum omap_channel channel);
@@ -934,7 +892,7 @@ struct dss_mgr_ops {
int (*enable)(enum omap_channel channel);
void (*disable)(enum omap_channel channel);
void (*set_timings)(enum omap_channel channel,
- const struct omap_video_timings *timings);
+ const struct videomode *vm);
void (*set_lcd_config)(enum omap_channel channel,
const struct dss_lcd_mgr_config *config);
int (*register_framedone_handler)(enum omap_channel channel,
@@ -951,7 +909,7 @@ int dss_mgr_connect(enum omap_channel channel,
void dss_mgr_disconnect(enum omap_channel channel,
struct omap_dss_device *dst);
void dss_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings);
+ const struct videomode *vm);
void dss_mgr_set_lcd_config(enum omap_channel channel,
const struct dss_lcd_mgr_config *config);
int dss_mgr_enable(enum omap_channel channel);
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 24f859488201..a901af5a9bc3 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -201,10 +201,9 @@ void dss_mgr_disconnect(enum omap_channel channel,
}
EXPORT_SYMBOL(dss_mgr_disconnect);
-void dss_mgr_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings)
+void dss_mgr_set_timings(enum omap_channel channel, const struct videomode *vm)
{
- dss_mgr_ops->set_timings(channel, timings);
+ dss_mgr_ops->set_timings(channel, vm);
}
EXPORT_SYMBOL(dss_mgr_set_timings);
diff --git a/drivers/gpu/drm/omapdrm/dss/rfbi.c b/drivers/gpu/drm/omapdrm/dss/rfbi.c
index cd53566d75eb..09724757366a 100644
--- a/drivers/gpu/drm/omapdrm/dss/rfbi.c
+++ b/drivers/gpu/drm/omapdrm/dss/rfbi.c
@@ -113,7 +113,7 @@ static struct {
struct semaphore bus_lock;
- struct omap_video_timings timings;
+ struct videomode vm;
int pixel_size;
int data_lines;
struct rfbi_timings intf_timings;
@@ -308,15 +308,15 @@ static int rfbi_transfer_area(struct omap_dss_device *dssdev,
u32 l;
int r;
struct omap_overlay_manager *mgr = rfbi.output.manager;
- u16 width = rfbi.timings.x_res;
- u16 height = rfbi.timings.y_res;
+ u16 width = rfbi.vm.hactive;
+ u16 height = rfbi.vm.vactive;
/*BUG_ON(callback == 0);*/
BUG_ON(rfbi.framedone_callback != NULL);
DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
- dss_mgr_set_timings(mgr, &rfbi.timings);
+ dss_mgr_set_timings(mgr, &rfbi.vm);
r = dss_mgr_enable(mgr);
if (r)
@@ -777,8 +777,8 @@ static int rfbi_update(struct omap_dss_device *dssdev, void (*callback)(void *),
static void rfbi_set_size(struct omap_dss_device *dssdev, u16 w, u16 h)
{
- rfbi.timings.x_res = w;
- rfbi.timings.y_res = h;
+ rfbi.vm.hactive = w;
+ rfbi.vm.vactive = h;
}
static void rfbi_set_pixel_size(struct omap_dss_device *dssdev, int pixel_size)
@@ -854,25 +854,30 @@ static void rfbi_config_lcd_manager(struct omap_dss_device *dssdev)
dss_mgr_set_lcd_config(mgr, &mgr_config);
/*
- * Set rfbi.timings with default values, the x_res and y_res fields
+ * Set rfbi.timings with default values, the hactive and vactive fields
* are expected to be already configured by the panel driver via
* omapdss_rfbi_set_size()
*/
- rfbi.timings.hsw = 1;
- rfbi.timings.hfp = 1;
- rfbi.timings.hbp = 1;
- rfbi.timings.vsw = 1;
- rfbi.timings.vfp = 0;
- rfbi.timings.vbp = 0;
-
- rfbi.timings.interlace = false;
- rfbi.timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- rfbi.timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- rfbi.timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- rfbi.timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- rfbi.timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-
- dss_mgr_set_timings(mgr, &rfbi.timings);
+ rfbi.vm.hsync_len = 1;
+ rfbi.vm.hfront_porch = 1;
+ rfbi.vm.hback_porch = 1;
+ rfbi.vm.vsync_len = 1;
+ rfbi.vm.vfront_porch = 0;
+ rfbi.vm.vback_porch = 0;
+
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_INTERLACED;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW;
+ rfbi.vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
+ rfbi.vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ rfbi.vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
+ rfbi.vm.flags |= DISPLAY_FLAGS_DE_HIGH;
+ rfbi.vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
+ rfbi.vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
+
+ dss_mgr_set_timings(mgr, &rfbi.vm);
}
static int rfbi_display_enable(struct omap_dss_device *dssdev)
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 0a96c321ce62..b3bda2d3c08d 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -39,7 +39,7 @@ static struct {
struct regulator *vdds_sdi_reg;
struct dss_lcd_mgr_config mgr_config;
- struct omap_video_timings timings;
+ struct videomode vm;
int datapairs;
struct omap_dss_device output;
@@ -131,7 +131,7 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_dss_device *out = &sdi.output;
enum omap_channel channel = dssdev->dispc_channel;
- struct omap_video_timings *t = &sdi.timings;
+ struct videomode *vm = &sdi.vm;
unsigned long fck;
struct dispc_clock_info dispc_cinfo;
unsigned long pck;
@@ -151,10 +151,9 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
goto err_get_dispc;
/* 15.5.9.1.2 */
- t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
+ vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE | DISPLAY_FLAGS_SYNC_POSEDGE;
- r = sdi_calc_clock_div(t->pixelclock, &fck, &dispc_cinfo);
+ r = sdi_calc_clock_div(vm->pixelclock, &fck, &dispc_cinfo);
if (r)
goto err_calc_clock_div;
@@ -162,15 +161,15 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
- if (pck != t->pixelclock) {
- DSSWARN("Could not find exact pixel clock. Requested %d Hz, got %lu Hz\n",
- t->pixelclock, pck);
+ if (pck != vm->pixelclock) {
+ DSSWARN("Could not find exact pixel clock. Requested %lu Hz, got %lu Hz\n",
+ vm->pixelclock, pck);
- t->pixelclock = pck;
+ vm->pixelclock = pck;
}
- dss_mgr_set_timings(channel, t);
+ dss_mgr_set_timings(channel, vm);
r = dss_set_fck_rate(fck);
if (r)
@@ -229,26 +228,26 @@ static void sdi_display_disable(struct omap_dss_device *dssdev)
}
static void sdi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- sdi.timings = *timings;
+ sdi.vm = *vm;
}
static void sdi_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
- *timings = sdi.timings;
+ *vm = sdi.vm;
}
static int sdi_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
enum omap_channel channel = dssdev->dispc_channel;
- if (!dispc_mgr_timings_ok(channel, timings))
+ if (!dispc_mgr_timings_ok(channel, vm))
return -EINVAL;
- if (timings->pixelclock == 0)
+ if (vm->pixelclock == 0)
return -EINVAL;
return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 6eedf2118708..d74f7fcc2e46 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -262,47 +262,41 @@ static const struct venc_config venc_config_pal_bdghi = {
.fid_ext_start_y__fid_ext_offset_y = 0x01380005,
};
-const struct omap_video_timings omap_dss_pal_timings = {
- .x_res = 720,
- .y_res = 574,
+const struct videomode omap_dss_pal_vm = {
+ .hactive = 720,
+ .vactive = 574,
.pixelclock = 13500000,
- .hsw = 64,
- .hfp = 12,
- .hbp = 68,
- .vsw = 5,
- .vfp = 5,
- .vbp = 41,
-
- .interlace = true,
-
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 64,
+ .hfront_porch = 12,
+ .hback_porch = 68,
+ .vsync_len = 5,
+ .vfront_porch = 5,
+ .vback_porch = 41,
+
+ .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
+ DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE,
};
-EXPORT_SYMBOL(omap_dss_pal_timings);
+EXPORT_SYMBOL(omap_dss_pal_vm);
-const struct omap_video_timings omap_dss_ntsc_timings = {
- .x_res = 720,
- .y_res = 482,
+const struct videomode omap_dss_ntsc_vm = {
+ .hactive = 720,
+ .vactive = 482,
.pixelclock = 13500000,
- .hsw = 64,
- .hfp = 16,
- .hbp = 58,
- .vsw = 6,
- .vfp = 6,
- .vbp = 31,
-
- .interlace = true,
-
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .hsync_len = 64,
+ .hfront_porch = 16,
+ .hback_porch = 58,
+ .vsync_len = 6,
+ .vfront_porch = 6,
+ .vback_porch = 31,
+
+ .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
+ DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE,
};
-EXPORT_SYMBOL(omap_dss_ntsc_timings);
+EXPORT_SYMBOL(omap_dss_ntsc_vm);
static struct {
struct platform_device *pdev;
@@ -313,7 +307,7 @@ static struct {
struct clk *tv_dac_clk;
- struct omap_video_timings timings;
+ struct videomode vm;
enum omap_dss_venc_type type;
bool invert_polarity;
@@ -427,13 +421,12 @@ static void venc_runtime_put(void)
WARN_ON(r < 0 && r != -ENOSYS);
}
-static const struct venc_config *venc_timings_to_config(
- struct omap_video_timings *timings)
+static const struct venc_config *venc_timings_to_config(struct videomode *vm)
{
- if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_pal_vm, vm, sizeof(*vm)) == 0)
return &venc_config_pal_trm;
- if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_ntsc_vm, vm, sizeof(*vm)) == 0)
return &venc_config_ntsc_trm;
BUG();
@@ -451,7 +444,7 @@ static int venc_power_on(struct omap_dss_device *dssdev)
goto err0;
venc_reset();
- venc_write_config(venc_timings_to_config(&venc.timings));
+ venc_write_config(venc_timings_to_config(&venc.vm));
dss_set_venc_output(venc.type);
dss_set_dac_pwrdn_bgz(1);
@@ -468,7 +461,7 @@ static int venc_power_on(struct omap_dss_device *dssdev)
venc_write_reg(VENC_OUTPUT_CONTROL, l);
- dss_mgr_set_timings(channel, &venc.timings);
+ dss_mgr_set_timings(channel, &venc.vm);
r = regulator_enable(venc.vdda_dac_reg);
if (r)
@@ -546,17 +539,17 @@ static void venc_display_disable(struct omap_dss_device *dssdev)
}
static void venc_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
DSSDBG("venc_set_timings\n");
mutex_lock(&venc.venc_lock);
/* Reset WSS data when the TV standard changes. */
- if (memcmp(&venc.timings, timings, sizeof(*timings)))
+ if (memcmp(&venc.vm, vm, sizeof(*vm)))
venc.wss_data = 0;
- venc.timings = *timings;
+ venc.vm = *vm;
dispc_set_tv_pclk(13500000);
@@ -564,25 +557,25 @@ static void venc_set_timings(struct omap_dss_device *dssdev,
}
static int venc_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
DSSDBG("venc_check_timings\n");
- if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_pal_vm, vm, sizeof(*vm)) == 0)
return 0;
- if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0)
+ if (memcmp(&omap_dss_ntsc_vm, vm, sizeof(*vm)) == 0)
return 0;
return -EINVAL;
}
static void venc_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
mutex_lock(&venc.venc_lock);
- *timings = venc.timings;
+ *vm = venc.vm;
mutex_unlock(&venc.venc_lock);
}
@@ -602,7 +595,7 @@ static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
mutex_lock(&venc.venc_lock);
- config = venc_timings_to_config(&venc.timings);
+ config = venc_timings_to_config(&venc.vm);
/* Invert due to VENC_L21_WC_CTL:INV=1 */
venc.wss_data = (wss ^ 0xfffff) << 8;
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 137fe690a0da..2580e8673908 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -42,73 +42,6 @@ bool omap_connector_get_hdmi_mode(struct drm_connector *connector)
return omap_connector->hdmi_mode;
}
-void copy_timings_omap_to_drm(struct drm_display_mode *mode,
- struct omap_video_timings *timings)
-{
- mode->clock = timings->pixelclock / 1000;
-
- mode->hdisplay = timings->x_res;
- mode->hsync_start = mode->hdisplay + timings->hfp;
- mode->hsync_end = mode->hsync_start + timings->hsw;
- mode->htotal = mode->hsync_end + timings->hbp;
-
- mode->vdisplay = timings->y_res;
- mode->vsync_start = mode->vdisplay + timings->vfp;
- mode->vsync_end = mode->vsync_start + timings->vsw;
- mode->vtotal = mode->vsync_end + timings->vbp;
-
- mode->flags = 0;
-
- if (timings->interlace)
- mode->flags |= DRM_MODE_FLAG_INTERLACE;
-
- if (timings->double_pixel)
- mode->flags |= DRM_MODE_FLAG_DBLCLK;
-
- if (timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- mode->flags |= DRM_MODE_FLAG_PHSYNC;
- else
- mode->flags |= DRM_MODE_FLAG_NHSYNC;
-
- if (timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
- mode->flags |= DRM_MODE_FLAG_PVSYNC;
- else
- mode->flags |= DRM_MODE_FLAG_NVSYNC;
-}
-
-void copy_timings_drm_to_omap(struct omap_video_timings *timings,
- struct drm_display_mode *mode)
-{
- timings->pixelclock = mode->clock * 1000;
-
- timings->x_res = mode->hdisplay;
- timings->hfp = mode->hsync_start - mode->hdisplay;
- timings->hsw = mode->hsync_end - mode->hsync_start;
- timings->hbp = mode->htotal - mode->hsync_end;
-
- timings->y_res = mode->vdisplay;
- timings->vfp = mode->vsync_start - mode->vdisplay;
- timings->vsw = mode->vsync_end - mode->vsync_start;
- timings->vbp = mode->vtotal - mode->vsync_end;
-
- timings->interlace = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
- timings->double_pixel = !!(mode->flags & DRM_MODE_FLAG_DBLCLK);
-
- if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- timings->hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- else
- timings->hsync_level = OMAPDSS_SIG_ACTIVE_LOW;
-
- if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- timings->vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- else
- timings->vsync_level = OMAPDSS_SIG_ACTIVE_LOW;
-
- timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-}
-
static enum drm_connector_status omap_connector_detect(
struct drm_connector *connector, bool force)
{
@@ -185,11 +118,11 @@ static int omap_connector_get_modes(struct drm_connector *connector)
kfree(edid);
} else {
struct drm_display_mode *mode = drm_mode_create(dev);
- struct omap_video_timings timings = {0};
+ struct videomode vm = {0};
- dssdrv->get_timings(dssdev, &timings);
+ dssdrv->get_timings(dssdev, &vm);
- copy_timings_omap_to_drm(mode, &timings);
+ drm_display_mode_from_videomode(&vm, mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
@@ -207,12 +140,14 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
struct omap_connector *omap_connector = to_omap_connector(connector);
struct omap_dss_device *dssdev = omap_connector->dssdev;
struct omap_dss_driver *dssdrv = dssdev->driver;
- struct omap_video_timings timings = {0};
+ struct videomode vm = {0};
struct drm_device *dev = connector->dev;
struct drm_display_mode *new_mode;
int r, ret = MODE_BAD;
- copy_timings_drm_to_omap(&timings, mode);
+ drm_display_mode_to_videomode(mode, &vm);
+ vm.flags |= DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE;
mode->vrefresh = drm_mode_vrefresh(mode);
/*
@@ -221,13 +156,13 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
* panel's timings
*/
if (dssdrv->check_timings) {
- r = dssdrv->check_timings(dssdev, &timings);
+ r = dssdrv->check_timings(dssdev, &vm);
} else {
- struct omap_video_timings t = {0};
+ struct videomode t = {0};
dssdrv->get_timings(dssdev, &t);
- if (memcmp(&timings, &t, sizeof(struct omap_video_timings)))
+ if (memcmp(&vm, &t, sizeof(struct videomode)))
r = -EINVAL;
else
r = 0;
@@ -236,7 +171,7 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
if (!r) {
/* check if vrefresh is still valid */
new_mode = drm_mode_duplicate(dev, mode);
- new_mode->clock = timings.pixelclock / 1000;
+ new_mode->clock = vm.pixelclock / 1000;
new_mode->vrefresh = 0;
if (mode->vrefresh == drm_mode_vrefresh(new_mode))
ret = MODE_OK;
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 180f644e861e..8dea89030e66 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -34,7 +34,7 @@ struct omap_crtc {
const char *name;
enum omap_channel channel;
- struct omap_video_timings timings;
+ struct videomode vm;
struct omap_drm_irq vblank_irq;
struct omap_drm_irq error_irq;
@@ -56,10 +56,10 @@ uint32_t pipe2vbl(struct drm_crtc *crtc)
return dispc_mgr_get_vsync_irq(omap_crtc->channel);
}
-struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
+struct videomode *omap_crtc_timings(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- return &omap_crtc->timings;
+ return &omap_crtc->vm;
}
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
@@ -201,7 +201,7 @@ static int omap_crtc_dss_enable(enum omap_channel channel)
dispc_mgr_setup(omap_crtc->channel, &info);
dispc_mgr_set_timings(omap_crtc->channel,
- &omap_crtc->timings);
+ &omap_crtc->vm);
omap_crtc_set_enabled(&omap_crtc->base, true);
return 0;
@@ -215,11 +215,11 @@ static void omap_crtc_dss_disable(enum omap_channel channel)
}
static void omap_crtc_dss_set_timings(enum omap_channel channel,
- const struct omap_video_timings *timings)
+ const struct videomode *vm)
{
struct omap_crtc *omap_crtc = omap_crtcs[channel];
DBG("%s", omap_crtc->name);
- omap_crtc->timings = *timings;
+ omap_crtc->vm = *vm;
}
static void omap_crtc_dss_set_lcd_config(enum omap_channel channel,
@@ -369,7 +369,10 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
- copy_timings_drm_to_omap(&omap_crtc->timings, mode);
+ drm_display_mode_to_videomode(mode, &omap_crtc->vm);
+ omap_crtc->vm.flags |= DISPLAY_FLAGS_DE_HIGH |
+ DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE;
}
static int omap_crtc_atomic_check(struct drm_crtc *crtc,
@@ -411,19 +414,6 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
}
- if (crtc->state->color_mgmt_changed) {
- struct drm_color_lut *lut = NULL;
- uint length = 0;
-
- if (crtc->state->gamma_lut) {
- lut = (struct drm_color_lut *)
- crtc->state->gamma_lut->data;
- length = crtc->state->gamma_lut->length /
- sizeof(*lut);
- }
- dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
- }
-
if (dispc_mgr_is_enabled(omap_crtc->channel)) {
DBG("%s: GO", omap_crtc->name);
@@ -438,13 +428,14 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
}
}
-static bool omap_crtc_is_plane_prop(struct drm_device *dev,
+static bool omap_crtc_is_plane_prop(struct drm_crtc *crtc,
struct drm_property *property)
{
+ struct drm_device *dev = crtc->dev;
struct omap_drm_private *priv = dev->dev_private;
return property == priv->zorder_prop ||
- property == dev->mode_config.rotation_property;
+ property == crtc->primary->rotation_property;
}
static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
@@ -452,9 +443,7 @@ static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
struct drm_property *property,
uint64_t val)
{
- struct drm_device *dev = crtc->dev;
-
- if (omap_crtc_is_plane_prop(dev, property)) {
+ if (omap_crtc_is_plane_prop(crtc, property)) {
struct drm_plane_state *plane_state;
struct drm_plane *plane = crtc->primary;
@@ -479,9 +468,7 @@ static int omap_crtc_atomic_get_property(struct drm_crtc *crtc,
struct drm_property *property,
uint64_t *val)
{
- struct drm_device *dev = crtc->dev;
-
- if (omap_crtc_is_plane_prop(dev, property)) {
+ if (omap_crtc_is_plane_prop(crtc, property)) {
/*
* Delegate property get to the primary plane. The
* drm_atomic_plane_get_property() function isn't exported, but
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index e1cfba51cff6..39c5312b466c 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -105,7 +105,7 @@ static void omap_atomic_complete(struct omap_atomic_state_commit *commit)
dispc_runtime_put();
- drm_atomic_state_free(old_state);
+ drm_atomic_state_put(old_state);
/* Complete the commit, wake up any waiter. */
spin_lock(&priv->commit.lock);
@@ -176,6 +176,7 @@ static int omap_atomic_commit(struct drm_device *dev,
/* Swap the state, this is the point of no return. */
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
schedule_work(&commit->work);
else
@@ -292,16 +293,6 @@ static int omap_modeset_init_properties(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- if (priv->has_dmm) {
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev,
- DRM_ROTATE_0 | DRM_ROTATE_90 |
- DRM_ROTATE_180 | DRM_ROTATE_270 |
- DRM_REFLECT_X | DRM_REFLECT_Y);
- if (!dev->mode_config.rotation_property)
- return -ENOMEM;
- }
-
priv->zorder_prop = drm_property_create_range(dev, 0, "zorder", 0, 3);
if (!priv->zorder_prop)
return -ENOMEM;
@@ -752,22 +743,32 @@ static void dev_lastclose(struct drm_device *dev)
DBG("lastclose: dev=%p", dev);
- if (dev->mode_config.rotation_property) {
- /* need to restore default rotation state.. not sure
- * if there is a cleaner way to restore properties to
- * default state? Maybe a flag that properties should
- * automatically be restored to default state on
- * lastclose?
- */
- for (i = 0; i < priv->num_crtcs; i++) {
- drm_object_property_set_value(&priv->crtcs[i]->base,
- dev->mode_config.rotation_property, 0);
- }
+ /* need to restore default rotation state.. not sure
+ * if there is a cleaner way to restore properties to
+ * default state? Maybe a flag that properties should
+ * automatically be restored to default state on
+ * lastclose?
+ */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ struct drm_crtc *crtc = priv->crtcs[i];
- for (i = 0; i < priv->num_planes; i++) {
- drm_object_property_set_value(&priv->planes[i]->base,
- dev->mode_config.rotation_property, 0);
- }
+ if (!crtc->primary->rotation_property)
+ continue;
+
+ drm_object_property_set_value(&crtc->base,
+ crtc->primary->rotation_property,
+ DRM_ROTATE_0);
+ }
+
+ for (i = 0; i < priv->num_planes; i++) {
+ struct drm_plane *plane = priv->planes[i];
+
+ if (!plane->rotation_property)
+ continue;
+
+ drm_object_property_set_value(&plane->base,
+ plane->rotation_property,
+ DRM_ROTATE_0);
}
if (priv->fbdev) {
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index dcc30a98b9d4..4c51135eb9a6 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -148,7 +148,7 @@ static inline void omap_fbdev_free(struct drm_device *dev)
}
#endif
-struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc);
+struct videomode *omap_crtc_timings(struct drm_crtc *crtc);
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
void omap_crtc_pre_init(void);
void omap_crtc_pre_uninit(void);
@@ -171,11 +171,6 @@ struct drm_encoder *omap_connector_attached_encoder(
struct drm_connector *connector);
bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
-void copy_timings_omap_to_drm(struct drm_display_mode *mode,
- struct omap_video_timings *timings);
-void copy_timings_drm_to_omap(struct omap_video_timings *timings,
- struct drm_display_mode *mode);
-
uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
uint32_t max_formats, enum omap_color_mode supported_modes);
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 0bbb9c59622e..a20f30039aee 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -102,7 +102,7 @@ static void omap_encoder_disable(struct drm_encoder *encoder)
static int omap_encoder_update(struct drm_encoder *encoder,
enum omap_channel channel,
- struct omap_video_timings *timings)
+ struct videomode *vm)
{
struct drm_device *dev = encoder->dev;
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
@@ -111,13 +111,13 @@ static int omap_encoder_update(struct drm_encoder *encoder,
int ret;
if (dssdrv->check_timings) {
- ret = dssdrv->check_timings(dssdev, timings);
+ ret = dssdrv->check_timings(dssdev, vm);
} else {
- struct omap_video_timings t = {0};
+ struct videomode t = {0};
dssdrv->get_timings(dssdev, &t);
- if (memcmp(timings, &t, sizeof(struct omap_video_timings)))
+ if (memcmp(vm, &t, sizeof(struct videomode)))
ret = -EINVAL;
else
ret = 0;
@@ -129,7 +129,7 @@ static int omap_encoder_update(struct drm_encoder *encoder,
}
if (dssdrv->set_timings)
- dssdrv->set_timings(dssdev, timings);
+ dssdrv->set_timings(dssdev, vm);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 505dee0db973..d4e1e11466f8 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -336,8 +336,10 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
int i, npages = obj->size >> PAGE_SHIFT;
for (i = 0; i < npages; i++) {
- dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (omap_obj->addrs[i])
+ dma_unmap_page(obj->dev->dev,
+ omap_obj->addrs[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
}
}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 66ac8c40db26..9c43cb481e62 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -108,16 +108,12 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
win.src_x = state->src_x >> 16;
win.src_y = state->src_y >> 16;
- switch (state->rotation & DRM_ROTATE_MASK) {
- case DRM_ROTATE_90:
- case DRM_ROTATE_270:
+ if (drm_rotation_90_or_270(state->rotation)) {
win.src_w = state->src_h >> 16;
win.src_h = state->src_w >> 16;
- break;
- default:
+ } else {
win.src_w = state->src_w >> 16;
win.src_h = state->src_h >> 16;
- break;
}
/* update scanout: */
@@ -135,7 +131,9 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
/* and finally, update omapdss: */
ret = dispc_ovl_setup(omap_plane->id, &info, false,
omap_crtc_timings(state->crtc), false);
- if (WARN_ON(ret)) {
+ if (ret) {
+ dev_err(plane->dev->dev, "Failed to setup plane %s\n",
+ omap_plane->name);
dispc_ovl_enable(omap_plane->id, false);
return;
}
@@ -161,12 +159,20 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
{
struct drm_crtc_state *crtc_state;
- if (!state->crtc)
+ if (!state->fb)
return 0;
- crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ /* crtc should only be NULL when disabling (i.e., !state->fb) */
+ if (WARN_ON(!state->crtc))
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+ /* we should have a crtc state if the plane is attached to a crtc */
+ if (WARN_ON(!crtc_state))
+ return 0;
+
+ if (!crtc_state->enable)
+ return 0;
if (state->crtc_x < 0 || state->crtc_y < 0)
return -EINVAL;
@@ -177,11 +183,9 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)
return -EINVAL;
- if (state->fb) {
- if (state->rotation != DRM_ROTATE_0 &&
- !omap_framebuffer_supports_rotation(state->fb))
- return -EINVAL;
- }
+ if (state->rotation != DRM_ROTATE_0 &&
+ !omap_framebuffer_supports_rotation(state->fb))
+ return -EINVAL;
return 0;
}
@@ -215,9 +219,17 @@ void omap_plane_install_properties(struct drm_plane *plane,
struct omap_drm_private *priv = dev->dev_private;
if (priv->has_dmm) {
- struct drm_property *prop = dev->mode_config.rotation_property;
-
- drm_object_attach_property(obj, prop, 0);
+ if (!plane->rotation_property)
+ drm_plane_create_rotation_property(plane,
+ DRM_ROTATE_0,
+ DRM_ROTATE_0 | DRM_ROTATE_90 |
+ DRM_ROTATE_180 | DRM_ROTATE_270 |
+ DRM_REFLECT_X | DRM_REFLECT_Y);
+
+ /* Attach the rotation property also to the crtc object */
+ if (plane->rotation_property && obj != &plane->base)
+ drm_object_attach_property(obj, plane->rotation_property,
+ DRM_ROTATE_0);
}
drm_object_attach_property(obj, priv->zorder_prop, 0);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 5f3e5ad99de7..84995ebc6ffc 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -31,7 +31,7 @@
* Definitions taken from spice-protocol, plus kernel driver specific bits.
*/
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/workqueue.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
@@ -190,7 +190,7 @@ enum {
* spice-protocol/qxl_dev.h */
#define QXL_MAX_RES 96
struct qxl_release {
- struct fence base;
+ struct dma_fence base;
int id;
int type;
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index cd83f050cf3e..50b4e522f05f 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -21,7 +21,7 @@
*/
#include "qxl_drv.h"
#include "qxl_object.h"
-#include <trace/events/fence.h>
+#include <trace/events/dma_fence.h>
/*
* drawable cmd cache - allocate a bunch of VRAM pages, suballocate
@@ -40,23 +40,24 @@
static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
-static const char *qxl_get_driver_name(struct fence *fence)
+static const char *qxl_get_driver_name(struct dma_fence *fence)
{
return "qxl";
}
-static const char *qxl_get_timeline_name(struct fence *fence)
+static const char *qxl_get_timeline_name(struct dma_fence *fence)
{
return "release";
}
-static bool qxl_nop_signaling(struct fence *fence)
+static bool qxl_nop_signaling(struct dma_fence *fence)
{
/* fences are always automatically signaled, so just pretend we did this.. */
return true;
}
-static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
+static long qxl_fence_wait(struct dma_fence *fence, bool intr,
+ signed long timeout)
{
struct qxl_device *qdev;
struct qxl_release *release;
@@ -71,7 +72,7 @@ static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
retry:
sc++;
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto signaled;
qxl_io_notify_oom(qdev);
@@ -80,11 +81,11 @@ retry:
if (!qxl_queue_garbage_collect(qdev, true))
break;
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto signaled;
}
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
goto signaled;
if (have_drawable_releases || sc < 4) {
@@ -96,9 +97,9 @@ retry:
return 0;
if (have_drawable_releases && sc > 300) {
- FENCE_WARN(fence, "failed to wait on release %llu "
- "after spincount %d\n",
- fence->context & ~0xf0000000, sc);
+ DMA_FENCE_WARN(fence, "failed to wait on release %llu "
+ "after spincount %d\n",
+ fence->context & ~0xf0000000, sc);
goto signaled;
}
goto retry;
@@ -115,7 +116,7 @@ signaled:
return end - cur;
}
-static const struct fence_ops qxl_fence_ops = {
+static const struct dma_fence_ops qxl_fence_ops = {
.get_driver_name = qxl_get_driver_name,
.get_timeline_name = qxl_get_timeline_name,
.enable_signaling = qxl_nop_signaling,
@@ -192,8 +193,8 @@ qxl_release_free(struct qxl_device *qdev,
WARN_ON(list_empty(&release->bos));
qxl_release_free_list(release);
- fence_signal(&release->base);
- fence_put(&release->base);
+ dma_fence_signal(&release->base);
+ dma_fence_put(&release->base);
} else {
qxl_release_free_list(release);
kfree(release);
@@ -453,9 +454,9 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
* Since we never really allocated a context and we don't want to conflict,
* set the highest bits. This will break if we really allow exporting of dma-bufs.
*/
- fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
- release->id | 0xf0000000, release->base.seqno);
- trace_fence_emit(&release->base);
+ dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
+ release->id | 0xf0000000, release->base.seqno);
+ trace_dma_fence_emit(&release->base);
driver = bdev->driver;
glob = bo->glob;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index e26c82db948b..11761330a6b8 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -387,6 +387,7 @@ static struct ttm_bo_driver qxl_bo_driver = {
.ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate,
.invalidate_caches = &qxl_invalidate_caches,
.init_mem_type = &qxl_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &qxl_evict_flags,
.move = &qxl_bo_move,
.verify_access = &qxl_verify_access,
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 56bb758f4e33..fa4f8f008e4d 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -28,6 +28,7 @@
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_audio.h"
+#include "radeon_asic.h"
#include "atom.h"
#include <linux/backlight.h>
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index d960d3915408..f8b05090232a 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -27,6 +27,7 @@
*/
#include <drm/drmP.h>
#include "radeon.h"
+#include "radeon_asic.h"
#include "evergreend.h"
#include "evergreen_reg_safe.h"
#include "cayman_reg_safe.h"
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index b69c8de35bd3..595a19736458 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <drm/drmP.h>
#include "radeon.h"
+#include "radeon_asic.h"
#include "r600d.h"
#include "r600_reg_safe.h"
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1b0dcad916b0..44e0c5ed6418 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -66,7 +66,7 @@
#include <linux/kref.h>
#include <linux/interval_tree.h>
#include <linux/hashtable.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
@@ -367,7 +367,7 @@ struct radeon_fence_driver {
};
struct radeon_fence {
- struct fence base;
+ struct dma_fence base;
struct radeon_device *rdev;
uint64_t seq;
@@ -746,7 +746,7 @@ struct radeon_flip_work {
uint64_t base;
struct drm_pending_vblank_event *event;
struct radeon_bo *old_rbo;
- struct fence *fence;
+ struct dma_fence *fence;
bool async;
};
@@ -2514,9 +2514,9 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
/*
* Cast helper
*/
-extern const struct fence_ops radeon_fence_ops;
+extern const struct dma_fence_ops radeon_fence_ops;
-static inline struct radeon_fence *to_radeon_fence(struct fence *f)
+static inline struct radeon_fence *to_radeon_fence(struct dma_fence *f)
{
struct radeon_fence *__f = container_of(f, struct radeon_fence, base);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5df3ec73021b..4134759a6823 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -29,6 +29,7 @@
#include "atom.h"
#include "atom-bits.h"
+#include "radeon_asic.h"
extern void
radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 38e396dae0a9..c1135feb93c1 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -29,6 +29,7 @@
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
/* 10 khz */
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index eb92aef46e3c..0be8d5cd7826 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1320,7 +1320,7 @@ int radeon_device_init(struct radeon_device *rdev,
for (i = 0; i < RADEON_NUM_RINGS; i++) {
rdev->ring[i].idx = i;
}
- rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
+ rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
@@ -1651,7 +1651,10 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
radeon_suspend(rdev);
radeon_hpd_fini(rdev);
- /* evict remaining vram memory */
+ /* evict remaining vram memory
+ * This second call to evict vram is to evict the gart page table
+ * using the CPU.
+ */
radeon_bo_evict_vram(rdev);
radeon_agp_suspend(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index cdb8cb568c15..e7409e8a9f87 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -437,7 +437,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
down_read(&rdev->exclusive_lock);
}
} else
- r = fence_wait(work->fence, false);
+ r = dma_fence_wait(work->fence, false);
if (r)
DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
@@ -447,7 +447,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
* confused about which BO the CRTC is scanning out
*/
- fence_put(work->fence);
+ dma_fence_put(work->fence);
work->fence = NULL;
}
@@ -542,7 +542,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
+ work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo);
@@ -617,7 +617,7 @@ pflip_cleanup:
cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
- fence_put(work->fence);
+ dma_fence_put(work->fence);
kfree(work);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index de504ea29c06..6d1237d6e1b8 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -223,7 +223,8 @@ radeon_dp_mst_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-struct drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
+static struct
+drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -341,7 +342,8 @@ const struct drm_dp_mst_topology_cbs mst_cbs = {
.hotplug = radeon_dp_mst_hotplug,
};
-struct radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
+static struct
+radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
@@ -597,7 +599,7 @@ static const struct drm_encoder_helper_funcs radeon_mst_helper_funcs = {
.commit = radeon_mst_encoder_commit,
};
-void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+static void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
kfree(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0daad446d2c7..f65f29911dca 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -89,13 +89,13 @@ static struct fb_ops radeonfb_ops = {
};
-int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
+int radeon_align_pitch(struct radeon_device *rdev, int width, int cpp, bool tiled)
{
int aligned = width;
int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
int pitch_mask = 0;
- switch (bpp / 8) {
+ switch (cpp) {
case 1:
pitch_mask = align_large ? 255 : 127;
break;
@@ -110,7 +110,7 @@ int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tile
aligned += pitch_mask;
aligned &= ~pitch_mask;
- return aligned;
+ return aligned * cpp;
}
static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
@@ -139,13 +139,13 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
int ret;
int aligned_size, size;
int height = mode_cmd->height;
- u32 bpp, depth;
+ u32 cpp;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
/* need to align pitch with crtc limits */
- mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
- fb_tiled) * ((bpp + 1) / 8);
+ mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, cpp,
+ fb_tiled);
if (rdev->family >= CHIP_R600)
height = ALIGN(mode_cmd->height, 8);
@@ -165,11 +165,11 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
tiling_flags = RADEON_TILING_MACRO;
#ifdef __BIG_ENDIAN
- switch (bpp) {
- case 32:
+ switch (cpp) {
+ case 4:
tiling_flags |= RADEON_TILING_SWAP_32BIT;
break;
- case 16:
+ case 2:
tiling_flags |= RADEON_TILING_SWAP_16BIT;
default:
break;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 7ef075acde9c..ef09f0a63754 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -141,8 +141,10 @@ int radeon_fence_emit(struct radeon_device *rdev,
(*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
(*fence)->ring = ring;
(*fence)->is_vm_update = false;
- fence_init(&(*fence)->base, &radeon_fence_ops,
- &rdev->fence_queue.lock, rdev->fence_context + ring, seq);
+ dma_fence_init(&(*fence)->base, &radeon_fence_ops,
+ &rdev->fence_queue.lock,
+ rdev->fence_context + ring,
+ seq);
radeon_fence_ring_emit(rdev, ring, *fence);
trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
radeon_fence_schedule_check(rdev, ring);
@@ -169,18 +171,18 @@ static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl
*/
seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
if (seq >= fence->seq) {
- int ret = fence_signal_locked(&fence->base);
+ int ret = dma_fence_signal_locked(&fence->base);
if (!ret)
- FENCE_TRACE(&fence->base, "signaled from irq context\n");
+ DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n");
else
- FENCE_TRACE(&fence->base, "was already signaled\n");
+ DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
} else
- FENCE_TRACE(&fence->base, "pending\n");
+ DMA_FENCE_TRACE(&fence->base, "pending\n");
return 0;
}
@@ -351,7 +353,7 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
return false;
}
-static bool radeon_fence_is_signaled(struct fence *f)
+static bool radeon_fence_is_signaled(struct dma_fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
struct radeon_device *rdev = fence->rdev;
@@ -381,7 +383,7 @@ static bool radeon_fence_is_signaled(struct fence *f)
* to fence_queue that checks if this fence is signaled, and if so it
* signals the fence and removes itself.
*/
-static bool radeon_fence_enable_signaling(struct fence *f)
+static bool radeon_fence_enable_signaling(struct dma_fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
struct radeon_device *rdev = fence->rdev;
@@ -414,9 +416,9 @@ static bool radeon_fence_enable_signaling(struct fence *f)
fence->fence_wake.private = NULL;
fence->fence_wake.func = radeon_fence_check_signaled;
__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
- fence_get(f);
+ dma_fence_get(f);
- FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
+ DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
return true;
}
@@ -436,9 +438,9 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
int ret;
- ret = fence_signal(&fence->base);
+ ret = dma_fence_signal(&fence->base);
if (!ret)
- FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
+ DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
return true;
}
return false;
@@ -552,7 +554,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
* exclusive_lock is not held in that case.
*/
if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
- return fence_wait(&fence->base, intr);
+ return dma_fence_wait(&fence->base, intr);
seq[fence->ring] = fence->seq;
r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
@@ -560,9 +562,9 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
return r;
}
- r_sig = fence_signal(&fence->base);
+ r_sig = dma_fence_signal(&fence->base);
if (!r_sig)
- FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
+ DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
return r;
}
@@ -697,7 +699,7 @@ int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
*/
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
{
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
return fence;
}
@@ -714,7 +716,7 @@ void radeon_fence_unref(struct radeon_fence **fence)
*fence = NULL;
if (tmp) {
- fence_put(&tmp->base);
+ dma_fence_put(&tmp->base);
}
}
@@ -1028,12 +1030,12 @@ int radeon_debugfs_fence_init(struct radeon_device *rdev)
#endif
}
-static const char *radeon_fence_get_driver_name(struct fence *fence)
+static const char *radeon_fence_get_driver_name(struct dma_fence *fence)
{
return "radeon";
}
-static const char *radeon_fence_get_timeline_name(struct fence *f)
+static const char *radeon_fence_get_timeline_name(struct dma_fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
switch (fence->ring) {
@@ -1051,16 +1053,16 @@ static const char *radeon_fence_get_timeline_name(struct fence *f)
static inline bool radeon_test_signaled(struct radeon_fence *fence)
{
- return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
+ return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
}
struct radeon_wait_cb {
- struct fence_cb base;
+ struct dma_fence_cb base;
struct task_struct *task;
};
static void
-radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
+radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct radeon_wait_cb *wait =
container_of(cb, struct radeon_wait_cb, base);
@@ -1068,7 +1070,7 @@ radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
wake_up_process(wait->task);
}
-static signed long radeon_fence_default_wait(struct fence *f, bool intr,
+static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr,
signed long t)
{
struct radeon_fence *fence = to_radeon_fence(f);
@@ -1077,7 +1079,7 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr,
cb.task = current;
- if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
+ if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
return t;
while (t > 0) {
@@ -1105,12 +1107,12 @@ static signed long radeon_fence_default_wait(struct fence *f, bool intr,
}
__set_current_state(TASK_RUNNING);
- fence_remove_callback(f, &cb.base);
+ dma_fence_remove_callback(f, &cb.base);
return t;
}
-const struct fence_ops radeon_fence_ops = {
+const struct dma_fence_ops radeon_fence_ops = {
.get_driver_name = radeon_fence_get_driver_name,
.get_timeline_name = radeon_fence_get_timeline_name,
.enable_signaling = radeon_fence_enable_signaling,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index deb9511725c9..0bcffd8a7bd3 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -745,7 +745,8 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
uint32_t handle;
int r;
- args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
+ args->pitch = radeon_align_pitch(rdev, args->width,
+ DIV_ROUND_UP(args->bpp, 8), 0);
args->size = args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 868c3ba2efaa..222a1fa41d7c 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -27,6 +27,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
#include <linux/backlight.h>
#ifdef CONFIG_PMAC_BACKLIGHT
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 4b6542538ff9..326ad068c15a 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -47,6 +47,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev);
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
static void radeon_pm_update_profile(struct radeon_device *rdev);
static void radeon_pm_set_clocks(struct radeon_device *rdev);
+static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev);
int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
@@ -79,6 +80,8 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
}
mutex_unlock(&rdev->pm.mutex);
+ /* allow new DPM state to be picked */
+ radeon_pm_compute_clocks_dpm(rdev);
} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
mutex_lock(&rdev->pm.mutex);
@@ -882,7 +885,8 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
/* balanced states don't exist at the moment */
if (dpm_state == POWER_STATE_TYPE_BALANCED)
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+ dpm_state = rdev->pm.dpm.ac_power ?
+ POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY;
restart_search:
/* Pick the best power state based on current conditions */
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 02ac8a1de4ff..be5d7a38d3aa 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -92,7 +92,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
bool shared)
{
struct reservation_object_list *flist;
- struct fence *f;
+ struct dma_fence *f;
struct radeon_fence *fence;
unsigned i;
int r = 0;
@@ -103,7 +103,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
else if (f)
- r = fence_wait(f, true);
+ r = dma_fence_wait(f, true);
flist = reservation_object_get_list(resv);
if (shared || !flist || r)
@@ -116,7 +116,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
else
- r = fence_wait(f, true);
+ r = dma_fence_wait(f, true);
if (r)
break;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 3de5e6e21662..0cf03ccbf0a7 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -863,6 +863,7 @@ static struct ttm_bo_driver radeon_bo_driver = {
.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
.invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &radeon_evict_flags,
.move = &radeon_bo_move,
.verify_access = &radeon_verify_access,
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 0cd0e7bdee55..d34d1cf33895 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -467,7 +467,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
{
int32_t *msg, msg_type, handle;
unsigned img_size = 0;
- struct fence *f;
+ struct dma_fence *f;
void *ptr;
int i, r;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index e402be8821c4..143280dc0851 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -7858,7 +7858,7 @@ static void si_program_aspm(struct radeon_device *rdev)
}
}
-int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
+static int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
{
unsigned i;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 392c7e6de042..4220d621f55f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -272,7 +272,7 @@ static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
drm_atomic_helper_cleanup_planes(dev, old_state);
- drm_atomic_state_free(old_state);
+ drm_atomic_state_put(old_state);
/* Complete the commit, wake up any waiter. */
spin_lock(&rcdu->commit.wait.lock);
@@ -338,6 +338,7 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
/* Swap the state, this is the point of no return. */
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
schedule_work(&commit->work);
else
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 3c58669a06ce..6f7f9c59f05b 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -1,7 +1,6 @@
config DRM_ROCKCHIP
tristate "DRM Support for Rockchip"
depends on DRM && ROCKCHIP_IOMMU
- depends on RESET_CONTROLLER
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 8c8cbe837e61..6fe161192bb4 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -20,6 +20,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
@@ -388,7 +389,7 @@ static void rockchip_add_endpoints(struct device *dev,
continue;
}
- component_match_add(dev, match, compare_of, remote);
+ drm_of_component_match_add(dev, match, compare_of, remote);
of_node_put(remote);
}
}
@@ -437,7 +438,8 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
}
of_node_put(iommu);
- component_match_add(dev, &match, compare_of, port->parent);
+ drm_of_component_match_add(dev, &match, compare_of,
+ port->parent);
of_node_put(port);
}
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index 3dc0d8ff95ec..2db89bed52e8 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -1004,6 +1004,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
kvb_addr = memdup_user(cmdbuf->vb_addr, cmdbuf->vb_size);
if (IS_ERR(kvb_addr)) {
ret = PTR_ERR(kvb_addr);
+ kvb_addr = NULL;
goto done;
}
cmdbuf->vb_addr = kvb_addr;
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 9df308565f6c..d614701bdcc7 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_of.h>
#include "sti_crtc.h"
#include "sti_drv.h"
@@ -184,7 +185,7 @@ static void sti_atomic_complete(struct sti_private *private,
drm_atomic_helper_wait_for_vblanks(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
static void sti_atomic_work(struct work_struct *work)
@@ -237,6 +238,7 @@ static int sti_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
sti_atomic_schedule(private, state);
else
@@ -443,8 +445,8 @@ static int sti_platform_probe(struct platform_device *pdev)
child_np = of_get_next_available_child(node, NULL);
while (child_np) {
- component_match_add(dev, &match, compare_of, child_np);
- of_node_put(child_np);
+ drm_of_component_match_add(dev, &match, compare_of,
+ child_np);
child_np = of_get_next_available_child(node, child_np);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 0da9862ad8ed..b3c4ad605e81 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -18,6 +18,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_of.h>
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
@@ -239,7 +240,7 @@ static int sun4i_drv_add_endpoints(struct device *dev,
/* Add current component */
DRM_DEBUG_DRIVER("Adding component %s\n",
of_node_full_name(node));
- component_match_add(dev, match, compare_of, node);
+ drm_of_component_match_add(dev, match, compare_of, node);
count++;
}
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 63ebb154b9b5..bbf5a4b7e0b6 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -3,7 +3,6 @@ config DRM_TEGRA
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
depends on COMMON_CLK
depends on DRM
- depends on RESET_CONTROLLER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 8ab47b502d83..a9630c2d6cb3 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -63,7 +63,7 @@ static void tegra_atomic_complete(struct tegra_drm *tegra,
drm_atomic_helper_wait_for_vblanks(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
}
static void tegra_atomic_work(struct work_struct *work)
@@ -96,6 +96,7 @@ static int tegra_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(state, true);
+ drm_atomic_state_get(state);
if (nonblock)
tegra_atomic_schedule(tegra, state);
else
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 52ebe8fc1784..822531ebd4b0 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -72,16 +72,14 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_gem_cma_object *gem;
- unsigned int depth, bpp;
dma_addr_t start, end;
u64 dma_base_and_ceiling;
- drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
gem = drm_fb_cma_get_gem_obj(fb, 0);
start = gem->paddr + fb->offsets[0] +
crtc->y * fb->pitches[0] +
- crtc->x * bpp / 8;
+ crtc->x * drm_format_plane_cpp(fb->pixel_format, 0);
end = start + (crtc->mode.vdisplay * fb->pitches[0]);
@@ -461,16 +459,16 @@ static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (info->tft_alt_mode)
reg |= LCDC_TFT_ALT_ENABLE;
if (priv->rev == 2) {
- unsigned int depth, bpp;
-
- drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
- switch (bpp) {
- case 16:
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB565:
break;
- case 32:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB8888:
reg |= LCDC_V2_TFT_24BPP_UNPACK;
/* fallthrough */
- case 24:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_RGB888:
reg |= LCDC_V2_TFT_24BPP_MODE;
break;
default:
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index a694977c32f4..147fb28287ae 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -143,8 +143,6 @@ static int tilcdc_commit(struct drm_device *dev,
drm_atomic_helper_cleanup_planes(dev, state);
- drm_atomic_state_free(state);
-
return 0;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 68e895021005..06a4c584f3cb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -10,6 +10,7 @@
#include <linux/component.h>
#include <linux/of_graph.h>
+#include <drm/drm_of.h>
#include "tilcdc_drv.h"
#include "tilcdc_external.h"
@@ -160,7 +161,8 @@ int tilcdc_get_external_components(struct device *dev,
dev_dbg(dev, "Subdevice node '%s' found\n", node->name);
if (match)
- component_match_add(dev, match, dev_match_of, node);
+ drm_of_component_match_add(dev, match, dev_match_of,
+ node);
of_node_put(node);
count++;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index 74c65fa859b2..8a6a50d74aff 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -39,7 +39,7 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane,
{
struct drm_crtc_state *crtc_state;
struct drm_plane_state *old_state = plane->state;
- unsigned int depth, bpp;
+ unsigned int pitch;
if (!state->crtc)
return 0;
@@ -68,8 +68,9 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- drm_fb_get_bpp_depth(state->fb->pixel_format, &depth, &bpp);
- if (state->fb->pitches[0] != crtc_state->mode.hdisplay * bpp / 8) {
+ pitch = crtc_state->mode.hdisplay *
+ drm_format_plane_cpp(state->fb->pixel_format, 0);
+ if (state->fb->pitches[0] != pitch) {
dev_err(plane->dev->dev,
"Invalid pitch: fb and crtc widths must be the same");
return -EINVAL;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index fc6217dfe401..f6ff579e8918 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -148,7 +148,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
BUG_ON(!list_empty(&bo->ddestroy));
ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count);
- fence_put(bo->moving);
+ dma_fence_put(bo->moving);
if (bo->resv == &bo->ttm_resv)
reservation_object_fini(&bo->ttm_resv);
mutex_destroy(&bo->wu_mutex);
@@ -426,20 +426,20 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
{
struct reservation_object_list *fobj;
- struct fence *fence;
+ struct dma_fence *fence;
int i;
fobj = reservation_object_get_list(bo->resv);
fence = reservation_object_get_excl(bo->resv);
if (fence && !fence->ops->signaled)
- fence_enable_sw_signaling(fence);
+ dma_fence_enable_sw_signaling(fence);
for (i = 0; fobj && i < fobj->shared_count; ++i) {
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(bo->resv));
if (!fence->ops->signaled)
- fence_enable_sw_signaling(fence);
+ dma_fence_enable_sw_signaling(fence);
}
}
@@ -717,6 +717,20 @@ out:
return ret;
}
+bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place)
+{
+ /* Don't evict this BO if it's outside of the
+ * requested placement range
+ */
+ if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
+ (place->lpfn && place->lpfn <= bo->mem.start))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(ttm_bo_eviction_valuable);
+
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
const struct ttm_place *place,
@@ -731,21 +745,16 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
ret = __ttm_bo_reserve(bo, false, true, NULL);
- if (!ret) {
- if (place && (place->fpfn || place->lpfn)) {
- /* Don't evict this BO if it's outside of the
- * requested placement range
- */
- if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
- (place->lpfn && place->lpfn <= bo->mem.start)) {
- __ttm_bo_unreserve(bo);
- ret = -EBUSY;
- continue;
- }
- }
+ if (ret)
+ continue;
- break;
+ if (place && !bdev->driver->eviction_valuable(bo, place)) {
+ __ttm_bo_unreserve(bo);
+ ret = -EBUSY;
+ continue;
}
+
+ break;
}
if (ret) {
@@ -792,11 +801,11 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
- struct fence *fence;
+ struct dma_fence *fence;
int ret;
spin_lock(&man->move_lock);
- fence = fence_get(man->move);
+ fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
if (fence) {
@@ -806,7 +815,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
if (unlikely(ret))
return ret;
- fence_put(bo->moving);
+ dma_fence_put(bo->moving);
bo->moving = fence;
}
@@ -1286,7 +1295,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
- struct fence *fence;
+ struct dma_fence *fence;
int ret;
/*
@@ -1309,12 +1318,12 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_unlock(&glob->lru_lock);
spin_lock(&man->move_lock);
- fence = fence_get(man->move);
+ fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
if (fence) {
- ret = fence_wait(fence, false);
- fence_put(fence);
+ ret = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
if (ret) {
if (allow_errors) {
return ret;
@@ -1343,7 +1352,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
mem_type);
return ret;
}
- fence_put(man->move);
+ dma_fence_put(man->move);
man->use_type = false;
man->has_type = false;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index bf6e21655c57..d0459b392e5e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -644,7 +644,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- struct fence *fence,
+ struct dma_fence *fence,
bool evict,
struct ttm_mem_reg *new_mem)
{
@@ -674,8 +674,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
* operation has completed.
*/
- fence_put(bo->moving);
- bo->moving = fence_get(fence);
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get(fence);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
@@ -706,7 +706,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
- struct fence *fence, bool evict,
+ struct dma_fence *fence, bool evict,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -730,8 +730,8 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
* operation has completed.
*/
- fence_put(bo->moving);
- bo->moving = fence_get(fence);
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get(fence);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
@@ -761,16 +761,16 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
*/
spin_lock(&from->move_lock);
- if (!from->move || fence_is_later(fence, from->move)) {
- fence_put(from->move);
- from->move = fence_get(fence);
+ if (!from->move || dma_fence_is_later(fence, from->move)) {
+ dma_fence_put(from->move);
+ from->move = dma_fence_get(fence);
}
spin_unlock(&from->move_lock);
ttm_bo_free_old_node(bo);
- fence_put(bo->moving);
- bo->moving = fence_get(fence);
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get(fence);
} else {
/**
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index a6ed9d5e5167..4748aedc933a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -54,7 +54,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
/*
* Quick non-stalling check for idle.
*/
- if (fence_is_signaled(bo->moving))
+ if (dma_fence_is_signaled(bo->moving))
goto out_clear;
/*
@@ -67,14 +67,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
goto out_unlock;
up_read(&vma->vm_mm->mmap_sem);
- (void) fence_wait(bo->moving, true);
+ (void) dma_fence_wait(bo->moving, true);
goto out_unlock;
}
/*
* Ordinary wait.
*/
- ret = fence_wait(bo->moving, true);
+ ret = dma_fence_wait(bo->moving, true);
if (unlikely(ret != 0)) {
ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE;
@@ -82,7 +82,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
}
out_clear:
- fence_put(bo->moving);
+ dma_fence_put(bo->moving);
bo->moving = NULL;
out_unlock:
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index a80717b35dc6..d35bc491e8de 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -179,7 +179,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
- struct list_head *list, struct fence *fence)
+ struct list_head *list,
+ struct dma_fence *fence)
{
struct ttm_validate_buffer *entry;
struct ttm_buffer_object *bo;
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index c1f65c6c8e60..f31f72af8551 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -61,7 +61,7 @@ vc4_atomic_complete_commit(struct vc4_commit *c)
drm_atomic_helper_cleanup_planes(dev, state);
- drm_atomic_state_free(state);
+ drm_atomic_state_put(state);
up(&vc4->async_modeset);
@@ -173,6 +173,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
* current layout.
*/
+ drm_atomic_state_get(state);
if (nonblock) {
vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
vc4_atomic_complete_commit_seqno_cb);
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 5c57c1ffa1f9..488909a21ed8 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -28,56 +28,57 @@
#define VGEM_FENCE_TIMEOUT (10*HZ)
struct vgem_fence {
- struct fence base;
+ struct dma_fence base;
struct spinlock lock;
struct timer_list timer;
};
-static const char *vgem_fence_get_driver_name(struct fence *fence)
+static const char *vgem_fence_get_driver_name(struct dma_fence *fence)
{
return "vgem";
}
-static const char *vgem_fence_get_timeline_name(struct fence *fence)
+static const char *vgem_fence_get_timeline_name(struct dma_fence *fence)
{
return "unbound";
}
-static bool vgem_fence_signaled(struct fence *fence)
+static bool vgem_fence_signaled(struct dma_fence *fence)
{
return false;
}
-static bool vgem_fence_enable_signaling(struct fence *fence)
+static bool vgem_fence_enable_signaling(struct dma_fence *fence)
{
return true;
}
-static void vgem_fence_release(struct fence *base)
+static void vgem_fence_release(struct dma_fence *base)
{
struct vgem_fence *fence = container_of(base, typeof(*fence), base);
del_timer_sync(&fence->timer);
- fence_free(&fence->base);
+ dma_fence_free(&fence->base);
}
-static void vgem_fence_value_str(struct fence *fence, char *str, int size)
+static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size)
{
snprintf(str, size, "%u", fence->seqno);
}
-static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
+static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str,
int size)
{
- snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0);
+ snprintf(str, size, "%u",
+ dma_fence_is_signaled(fence) ? fence->seqno : 0);
}
-static const struct fence_ops vgem_fence_ops = {
+static const struct dma_fence_ops vgem_fence_ops = {
.get_driver_name = vgem_fence_get_driver_name,
.get_timeline_name = vgem_fence_get_timeline_name,
.enable_signaling = vgem_fence_enable_signaling,
.signaled = vgem_fence_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = vgem_fence_release,
.fence_value_str = vgem_fence_value_str,
@@ -88,11 +89,11 @@ static void vgem_fence_timeout(unsigned long data)
{
struct vgem_fence *fence = (struct vgem_fence *)data;
- fence_signal(&fence->base);
+ dma_fence_signal(&fence->base);
}
-static struct fence *vgem_fence_create(struct vgem_file *vfile,
- unsigned int flags)
+static struct dma_fence *vgem_fence_create(struct vgem_file *vfile,
+ unsigned int flags)
{
struct vgem_fence *fence;
@@ -101,8 +102,8 @@ static struct fence *vgem_fence_create(struct vgem_file *vfile,
return NULL;
spin_lock_init(&fence->lock);
- fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
- fence_context_alloc(1), 1);
+ dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
+ dma_fence_context_alloc(1), 1);
setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
@@ -157,7 +158,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
struct vgem_file *vfile = file->driver_priv;
struct reservation_object *resv;
struct drm_gem_object *obj;
- struct fence *fence;
+ struct dma_fence *fence;
int ret;
if (arg->flags & ~VGEM_FENCE_WRITE)
@@ -209,8 +210,8 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
}
err_fence:
if (ret) {
- fence_signal(fence);
- fence_put(fence);
+ dma_fence_signal(fence);
+ dma_fence_put(fence);
}
err:
drm_gem_object_unreference_unlocked(obj);
@@ -239,7 +240,7 @@ int vgem_fence_signal_ioctl(struct drm_device *dev,
{
struct vgem_file *vfile = file->driver_priv;
struct drm_vgem_fence_signal *arg = data;
- struct fence *fence;
+ struct dma_fence *fence;
int ret = 0;
if (arg->flags)
@@ -253,11 +254,11 @@ int vgem_fence_signal_ioctl(struct drm_device *dev,
if (IS_ERR(fence))
return PTR_ERR(fence);
- if (fence_is_signaled(fence))
+ if (dma_fence_is_signaled(fence))
ret = -ETIMEDOUT;
- fence_signal(fence);
- fence_put(fence);
+ dma_fence_signal(fence);
+ dma_fence_put(fence);
return ret;
}
@@ -271,8 +272,8 @@ int vgem_fence_open(struct vgem_file *vfile)
static int __vgem_fence_idr_fini(int id, void *p, void *data)
{
- fence_signal(p);
- fence_put(p);
+ dma_fence_signal(p);
+ dma_fence_put(p);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index e1afc3d3f8d9..81d1807ac228 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -1,10 +1,10 @@
config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
depends on DRM && VIRTIO
- select DRM_KMS_HELPER
- select DRM_TTM
+ select DRM_KMS_HELPER
+ select DRM_TTM
help
This is the virtual GPU driver for virtio. It can be used with
- QEMU based VMMs (like KVM or Xen).
+ QEMU based VMMs (like KVM or Xen).
If unsure say M.
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index ae59080d63d1..ec1ebdcfe80b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -82,7 +82,7 @@ struct virtio_gpu_fence_driver {
};
struct virtio_gpu_fence {
- struct fence f;
+ struct dma_fence f;
struct virtio_gpu_fence_driver *drv;
struct list_head node;
uint64_t seq;
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index f3f70fa8a4c7..23353521f903 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -26,22 +26,22 @@
#include <drm/drmP.h>
#include "virtgpu_drv.h"
-static const char *virtio_get_driver_name(struct fence *f)
+static const char *virtio_get_driver_name(struct dma_fence *f)
{
return "virtio_gpu";
}
-static const char *virtio_get_timeline_name(struct fence *f)
+static const char *virtio_get_timeline_name(struct dma_fence *f)
{
return "controlq";
}
-static bool virtio_enable_signaling(struct fence *f)
+static bool virtio_enable_signaling(struct dma_fence *f)
{
return true;
}
-static bool virtio_signaled(struct fence *f)
+static bool virtio_signaled(struct dma_fence *f)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
@@ -50,26 +50,26 @@ static bool virtio_signaled(struct fence *f)
return false;
}
-static void virtio_fence_value_str(struct fence *f, char *str, int size)
+static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
snprintf(str, size, "%llu", fence->seq);
}
-static void virtio_timeline_value_str(struct fence *f, char *str, int size)
+static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq));
}
-static const struct fence_ops virtio_fence_ops = {
+static const struct dma_fence_ops virtio_fence_ops = {
.get_driver_name = virtio_get_driver_name,
.get_timeline_name = virtio_get_timeline_name,
.enable_signaling = virtio_enable_signaling,
.signaled = virtio_signaled,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.fence_value_str = virtio_fence_value_str,
.timeline_value_str = virtio_timeline_value_str,
};
@@ -88,9 +88,9 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
spin_lock_irqsave(&drv->lock, irq_flags);
(*fence)->drv = drv;
(*fence)->seq = ++drv->sync_seq;
- fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
- drv->context, (*fence)->seq);
- fence_get(&(*fence)->f);
+ dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
+ drv->context, (*fence)->seq);
+ dma_fence_get(&(*fence)->f);
list_add_tail(&(*fence)->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags);
@@ -111,9 +111,9 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
if (last_seq < fence->seq)
continue;
- fence_signal_locked(&fence->f);
+ dma_fence_signal_locked(&fence->f);
list_del(&fence->node);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
spin_unlock_irqrestore(&drv->lock, irq_flags);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 818478b4c4f0..61f3a963af95 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -172,7 +172,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
/* fence the command bo */
virtio_gpu_unref_list(&validate_list);
drm_free_large(buflist);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
return 0;
out_unresv:
@@ -298,7 +298,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
drm_gem_object_release(obj);
if (vgdev->has_virgl_3d) {
virtio_gpu_unref_list(&validate_list);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
return ret;
}
@@ -309,13 +309,13 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
if (vgdev->has_virgl_3d) {
virtio_gpu_unref_list(&validate_list);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
return 0;
fail_unref:
if (vgdev->has_virgl_3d) {
virtio_gpu_unref_list(&validate_list);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
//fail_obj:
// drm_gem_object_handle_unreference_unlocked(obj);
@@ -383,7 +383,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
out_unres:
virtio_gpu_object_unreserve(qobj);
out:
@@ -431,7 +431,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
args->level, &box, &fence);
reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
}
out_unres:
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 036b0fbae0fb..1235519853f4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -159,7 +159,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
- vgdev->fence_drv.context = fence_context_alloc(1);
+ vgdev->fence_drv.context = dma_fence_context_alloc(1);
spin_lock_init(&vgdev->fence_drv.lock);
INIT_LIST_HEAD(&vgdev->fence_drv.fences);
INIT_LIST_HEAD(&vgdev->cap_cache);
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index ba28c0f6f28a..cb75f0663ba0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -152,7 +152,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (!ret) {
reservation_object_add_excl_fence(bo->tbo.resv,
&fence->f);
- fence_put(&fence->f);
+ dma_fence_put(&fence->f);
fence = NULL;
virtio_gpu_object_unreserve(bo);
virtio_gpu_object_wait(bo, false);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 80482ac5f95d..4a1de9f81193 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -425,6 +425,7 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
.ttm_tt_unpopulate = &virtio_gpu_ttm_tt_unpopulate,
.invalidate_caches = &virtio_gpu_invalidate_caches,
.init_mem_type = &virtio_gpu_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &virtio_gpu_evict_flags,
.move = &virtio_gpu_bo_move,
.verify_access = &virtio_gpu_verify_access,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 78b75ee3c931..c894a48a74a6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -849,6 +849,7 @@ struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
.invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = vmw_evict_flags,
.move = NULL,
.verify_access = vmw_verify_access,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 26ac8e80a478..6541dd8b82dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -108,7 +108,7 @@ fman_from_fence(struct vmw_fence_obj *fence)
* objects with actions attached to them.
*/
-static void vmw_fence_obj_destroy(struct fence *f)
+static void vmw_fence_obj_destroy(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
@@ -123,17 +123,17 @@ static void vmw_fence_obj_destroy(struct fence *f)
fence->destroy(fence);
}
-static const char *vmw_fence_get_driver_name(struct fence *f)
+static const char *vmw_fence_get_driver_name(struct dma_fence *f)
{
return "vmwgfx";
}
-static const char *vmw_fence_get_timeline_name(struct fence *f)
+static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
{
return "svga";
}
-static bool vmw_fence_enable_signaling(struct fence *f)
+static bool vmw_fence_enable_signaling(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
@@ -152,12 +152,12 @@ static bool vmw_fence_enable_signaling(struct fence *f)
}
struct vmwgfx_wait_cb {
- struct fence_cb base;
+ struct dma_fence_cb base;
struct task_struct *task;
};
static void
-vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
+vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct vmwgfx_wait_cb *wait =
container_of(cb, struct vmwgfx_wait_cb, base);
@@ -167,7 +167,7 @@ vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
static void __vmw_fences_update(struct vmw_fence_manager *fman);
-static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
+static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
@@ -197,7 +197,7 @@ static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
while (ret > 0) {
__vmw_fences_update(fman);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
break;
if (intr)
@@ -225,7 +225,7 @@ out:
return ret;
}
-static struct fence_ops vmw_fence_ops = {
+static struct dma_fence_ops vmw_fence_ops = {
.get_driver_name = vmw_fence_get_driver_name,
.get_timeline_name = vmw_fence_get_timeline_name,
.enable_signaling = vmw_fence_enable_signaling,
@@ -298,7 +298,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
fman->event_fence_action_size =
ttm_round_pot(sizeof(struct vmw_event_fence_action));
mutex_init(&fman->goal_irq_mutex);
- fman->ctx = fence_context_alloc(1);
+ fman->ctx = dma_fence_context_alloc(1);
return fman;
}
@@ -326,8 +326,8 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
unsigned long irq_flags;
int ret = 0;
- fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
- fman->ctx, seqno);
+ dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
+ fman->ctx, seqno);
INIT_LIST_HEAD(&fence->seq_passed_actions);
fence->destroy = destroy;
@@ -431,7 +431,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
u32 goal_seqno;
u32 *fifo_mem;
- if (fence_is_signaled_locked(&fence->base))
+ if (dma_fence_is_signaled_locked(&fence->base))
return false;
fifo_mem = fman->dev_priv->mmio_virt;
@@ -459,7 +459,7 @@ rerun:
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
list_del_init(&fence->head);
- fence_signal_locked(&fence->base);
+ dma_fence_signal_locked(&fence->base);
INIT_LIST_HEAD(&action_list);
list_splice_init(&fence->seq_passed_actions,
&action_list);
@@ -500,18 +500,18 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
{
struct vmw_fence_manager *fman = fman_from_fence(fence);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
return 1;
vmw_fences_update(fman);
- return fence_is_signaled(&fence->base);
+ return dma_fence_is_signaled(&fence->base);
}
int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
bool interruptible, unsigned long timeout)
{
- long ret = fence_wait_timeout(&fence->base, interruptible, timeout);
+ long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
if (likely(ret > 0))
return 0;
@@ -530,7 +530,7 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
static void vmw_fence_destroy(struct vmw_fence_obj *fence)
{
- fence_free(&fence->base);
+ dma_fence_free(&fence->base);
}
int vmw_fence_create(struct vmw_fence_manager *fman,
@@ -669,7 +669,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
struct vmw_fence_obj *fence =
list_entry(fman->fence_list.prev, struct vmw_fence_obj,
head);
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
spin_unlock_irq(&fman->lock);
ret = vmw_fence_obj_wait(fence, false, false,
@@ -677,7 +677,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
if (unlikely(ret != 0)) {
list_del_init(&fence->head);
- fence_signal(&fence->base);
+ dma_fence_signal(&fence->base);
INIT_LIST_HEAD(&action_list);
list_splice_init(&fence->seq_passed_actions,
&action_list);
@@ -685,7 +685,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
}
BUG_ON(!list_empty(&fence->head));
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
spin_lock_irq(&fman->lock);
}
spin_unlock_irq(&fman->lock);
@@ -884,7 +884,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
spin_lock_irqsave(&fman->lock, irq_flags);
fman->pending_actions[action->type]++;
- if (fence_is_signaled_locked(&fence->base)) {
+ if (dma_fence_is_signaled_locked(&fence->base)) {
struct list_head action_list;
INIT_LIST_HEAD(&action_list);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 83ae301ee141..d9d85aa6ed20 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -27,7 +27,7 @@
#ifndef _VMWGFX_FENCE_H_
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
@@ -52,7 +52,7 @@ struct vmw_fence_action {
};
struct vmw_fence_obj {
- struct fence base;
+ struct dma_fence base;
struct list_head head;
struct list_head seq_passed_actions;
@@ -71,14 +71,14 @@ vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
*fence_p = NULL;
if (fence)
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
}
static inline struct vmw_fence_obj *
vmw_fence_obj_reference(struct vmw_fence_obj *fence)
{
if (fence)
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
return fence;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bf28ccc150df..c965514b82be 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -980,14 +980,22 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_dma_buffer *bo = NULL;
struct ttm_base_object *user_obj;
struct drm_mode_fb_cmd mode_cmd;
+ const struct drm_format_info *info;
int ret;
+ info = drm_format_info(mode_cmd2->pixel_format);
+ if (!info || !info->depth) {
+ DRM_ERROR("Unsupported framebuffer format %s\n",
+ drm_get_format_name(mode_cmd2->pixel_format));
+ return ERR_PTR(-EINVAL);
+ }
+
mode_cmd.width = mode_cmd2->width;
mode_cmd.height = mode_cmd2->height;
mode_cmd.pitch = mode_cmd2->pitches[0];
mode_cmd.handle = mode_cmd2->handles[0];
- drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
- &mode_cmd.bpp);
+ mode_cmd.depth = info->depth;
+ mode_cmd.bpp = info->cpp[0] * 8;
/**
* This code should be conditioned on Screen Objects not being used.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 52ca1c9d070e..8e86d6d4141b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -575,7 +575,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
long lret;
lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
- nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
+ nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
else if (lret < 0)
@@ -1454,7 +1454,7 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
if (fence == NULL) {
vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
reservation_object_add_excl_fence(bo->resv, &fence->base);
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
} else
reservation_object_add_excl_fence(bo->resv, &fence->base);
}
diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/ipu-v3/Kconfig
index aefdff95356d..08766c6e7856 100644
--- a/drivers/gpu/ipu-v3/Kconfig
+++ b/drivers/gpu/ipu-v3/Kconfig
@@ -1,7 +1,6 @@
config IMX_IPUV3_CORE
tristate "IPUv3 core support"
depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM
- depends on RESET_CONTROLLER
select GENERIC_IRQ_CHIP
help
Choose this if you have a i.MX5/6 system and want to use the Image
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 1887f199ccb7..77657a8c0cd4 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -1022,21 +1022,16 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
unsigned int io_state;
- char *kbuf, *curr_pos;
+ char kbuf[64], *curr_pos;
size_t remaining = count;
int ret_val;
int i;
-
- kbuf = kmalloc(count + 1, GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
-
- if (copy_from_user(kbuf, buf, count)) {
- kfree(kbuf);
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+ if (copy_from_user(kbuf, buf, count))
return -EFAULT;
- }
curr_pos = kbuf;
kbuf[count] = '\0'; /* Just to make sure... */
@@ -1259,11 +1254,9 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
goto done;
}
/* If we got here, the message written is not part of the protocol! */
- kfree(kbuf);
return -EPROTO;
done:
- kfree(kbuf);
return ret_val;
}
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 162689227a23..1cf907ecded4 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -533,6 +533,10 @@ hdmi_picture_aspect_get_name(enum hdmi_picture_aspect picture_aspect)
return "4:3";
case HDMI_PICTURE_ASPECT_16_9:
return "16:9";
+ case HDMI_PICTURE_ASPECT_64_27:
+ return "64:27";
+ case HDMI_PICTURE_ASPECT_256_135:
+ return "256:135";
case HDMI_PICTURE_ASPECT_RESERVED:
return "Reserved";
}
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index 8a1076beecd3..db992c684f09 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -88,6 +88,15 @@ static int of_parse_display_timing(const struct device_node *np,
dt->flags |= val ? DISPLAY_FLAGS_PIXDATA_POSEDGE :
DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ if (!of_property_read_u32(np, "syncclk-active", &val))
+ dt->flags |= val ? DISPLAY_FLAGS_SYNC_POSEDGE :
+ DISPLAY_FLAGS_SYNC_NEGEDGE;
+ else if (dt->flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE))
+ dt->flags |= dt->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE ?
+ DISPLAY_FLAGS_SYNC_POSEDGE :
+ DISPLAY_FLAGS_SYNC_NEGEDGE;
+
if (of_property_read_bool(np, "interlaced"))
dt->flags |= DISPLAY_FLAGS_INTERLACED;
if (of_property_read_bool(np, "doublescan"))
diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h
new file mode 100644
index 000000000000..3629b2734db6
--- /dev/null
+++ b/include/drm/bridge/mhl.h
@@ -0,0 +1,291 @@
+/*
+ * Defines for Mobile High-Definition Link (MHL) interface
+ *
+ * Copyright (C) 2015, Samsung Electronics, Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * Based on MHL driver for Android devices.
+ * Copyright (C) 2013-2014 Silicon Image, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MHL_H__
+#define __MHL_H__
+
+/* Device Capabilities Registers */
+enum {
+ MHL_DCAP_DEV_STATE,
+ MHL_DCAP_MHL_VERSION,
+ MHL_DCAP_CAT,
+ MHL_DCAP_ADOPTER_ID_H,
+ MHL_DCAP_ADOPTER_ID_L,
+ MHL_DCAP_VID_LINK_MODE,
+ MHL_DCAP_AUD_LINK_MODE,
+ MHL_DCAP_VIDEO_TYPE,
+ MHL_DCAP_LOG_DEV_MAP,
+ MHL_DCAP_BANDWIDTH,
+ MHL_DCAP_FEATURE_FLAG,
+ MHL_DCAP_DEVICE_ID_H,
+ MHL_DCAP_DEVICE_ID_L,
+ MHL_DCAP_SCRATCHPAD_SIZE,
+ MHL_DCAP_INT_STAT_SIZE,
+ MHL_DCAP_RESERVED,
+ MHL_DCAP_SIZE
+};
+
+#define MHL_DCAP_CAT_SINK 0x01
+#define MHL_DCAP_CAT_SOURCE 0x02
+#define MHL_DCAP_CAT_POWER 0x10
+#define MHL_DCAP_CAT_PLIM(x) ((x) << 5)
+
+#define MHL_DCAP_VID_LINK_RGB444 0x01
+#define MHL_DCAP_VID_LINK_YCBCR444 0x02
+#define MHL_DCAP_VID_LINK_YCBCR422 0x04
+#define MHL_DCAP_VID_LINK_PPIXEL 0x08
+#define MHL_DCAP_VID_LINK_ISLANDS 0x10
+#define MHL_DCAP_VID_LINK_VGA 0x20
+#define MHL_DCAP_VID_LINK_16BPP 0x40
+
+#define MHL_DCAP_AUD_LINK_2CH 0x01
+#define MHL_DCAP_AUD_LINK_8CH 0x02
+
+#define MHL_DCAP_VT_GRAPHICS 0x00
+#define MHL_DCAP_VT_PHOTO 0x02
+#define MHL_DCAP_VT_CINEMA 0x04
+#define MHL_DCAP_VT_GAMES 0x08
+#define MHL_DCAP_SUPP_VT 0x80
+
+#define MHL_DCAP_LD_DISPLAY 0x01
+#define MHL_DCAP_LD_VIDEO 0x02
+#define MHL_DCAP_LD_AUDIO 0x04
+#define MHL_DCAP_LD_MEDIA 0x08
+#define MHL_DCAP_LD_TUNER 0x10
+#define MHL_DCAP_LD_RECORD 0x20
+#define MHL_DCAP_LD_SPEAKER 0x40
+#define MHL_DCAP_LD_GUI 0x80
+#define MHL_DCAP_LD_ALL 0xFF
+
+#define MHL_DCAP_FEATURE_RCP_SUPPORT 0x01
+#define MHL_DCAP_FEATURE_RAP_SUPPORT 0x02
+#define MHL_DCAP_FEATURE_SP_SUPPORT 0x04
+#define MHL_DCAP_FEATURE_UCP_SEND_SUPPOR 0x08
+#define MHL_DCAP_FEATURE_UCP_RECV_SUPPORT 0x10
+#define MHL_DCAP_FEATURE_RBP_SUPPORT 0x40
+
+/* Extended Device Capabilities Registers */
+enum {
+ MHL_XDC_ECBUS_SPEEDS,
+ MHL_XDC_TMDS_SPEEDS,
+ MHL_XDC_ECBUS_ROLES,
+ MHL_XDC_LOG_DEV_MAPX,
+ MHL_XDC_SIZE
+};
+
+#define MHL_XDC_ECBUS_S_075 0x01
+#define MHL_XDC_ECBUS_S_8BIT 0x02
+#define MHL_XDC_ECBUS_S_12BIT 0x04
+#define MHL_XDC_ECBUS_D_150 0x10
+#define MHL_XDC_ECBUS_D_8BIT 0x20
+
+#define MHL_XDC_TMDS_000 0x00
+#define MHL_XDC_TMDS_150 0x01
+#define MHL_XDC_TMDS_300 0x02
+#define MHL_XDC_TMDS_600 0x04
+
+/* MHL_XDC_ECBUS_ROLES flags */
+#define MHL_XDC_DEV_HOST 0x01
+#define MHL_XDC_DEV_DEVICE 0x02
+#define MHL_XDC_DEV_CHARGER 0x04
+#define MHL_XDC_HID_HOST 0x08
+#define MHL_XDC_HID_DEVICE 0x10
+
+/* MHL_XDC_LOG_DEV_MAPX flags */
+#define MHL_XDC_LD_PHONE 0x01
+
+/* Device Status Registers */
+enum {
+ MHL_DST_CONNECTED_RDY,
+ MHL_DST_LINK_MODE,
+ MHL_DST_VERSION,
+ MHL_DST_SIZE
+};
+
+/* Offset of DEVSTAT registers */
+#define MHL_DST_OFFSET 0x30
+#define MHL_DST_REG(name) (MHL_DST_OFFSET + MHL_DST_##name)
+
+#define MHL_DST_CONN_DCAP_RDY 0x01
+#define MHL_DST_CONN_XDEVCAPP_SUPP 0x02
+#define MHL_DST_CONN_POW_STAT 0x04
+#define MHL_DST_CONN_PLIM_STAT_MASK 0x38
+
+#define MHL_DST_LM_CLK_MODE_MASK 0x07
+#define MHL_DST_LM_CLK_MODE_PACKED_PIXEL 0x02
+#define MHL_DST_LM_CLK_MODE_NORMAL 0x03
+#define MHL_DST_LM_PATH_EN_MASK 0x08
+#define MHL_DST_LM_PATH_ENABLED 0x08
+#define MHL_DST_LM_PATH_DISABLED 0x00
+#define MHL_DST_LM_MUTED_MASK 0x10
+
+/* Extended Device Status Registers */
+enum {
+ MHL_XDS_CURR_ECBUS_MODE,
+ MHL_XDS_AVLINK_MODE_STATUS,
+ MHL_XDS_AVLINK_MODE_CONTROL,
+ MHL_XDS_MULTI_SINK_STATUS,
+ MHL_XDS_SIZE
+};
+
+/* Offset of XDEVSTAT registers */
+#define MHL_XDS_OFFSET 0x90
+#define MHL_XDS_REG(name) (MHL_XDS_OFFSET + MHL_XDS_##name)
+
+/* MHL_XDS_REG_CURR_ECBUS_MODE flags */
+#define MHL_XDS_SLOT_MODE_8BIT 0x00
+#define MHL_XDS_SLOT_MODE_6BIT 0x01
+#define MHL_XDS_ECBUS_S 0x04
+#define MHL_XDS_ECBUS_D 0x08
+
+#define MHL_XDS_LINK_CLOCK_75MHZ 0x00
+#define MHL_XDS_LINK_CLOCK_150MHZ 0x10
+#define MHL_XDS_LINK_CLOCK_300MHZ 0x20
+#define MHL_XDS_LINK_CLOCK_600MHZ 0x30
+
+#define MHL_XDS_LINK_STATUS_NO_SIGNAL 0x00
+#define MHL_XDS_LINK_STATUS_CRU_LOCKED 0x01
+#define MHL_XDS_LINK_STATUS_TMDS_NORMAL 0x02
+#define MHL_XDS_LINK_STATUS_TMDS_RESERVED 0x03
+
+#define MHL_XDS_LINK_RATE_1_5_GBPS 0x00
+#define MHL_XDS_LINK_RATE_3_0_GBPS 0x01
+#define MHL_XDS_LINK_RATE_6_0_GBPS 0x02
+#define MHL_XDS_ATT_CAPABLE 0x08
+
+#define MHL_XDS_SINK_STATUS_1_HPD_LOW 0x00
+#define MHL_XDS_SINK_STATUS_1_HPD_HIGH 0x01
+#define MHL_XDS_SINK_STATUS_2_HPD_LOW 0x00
+#define MHL_XDS_SINK_STATUS_2_HPD_HIGH 0x04
+#define MHL_XDS_SINK_STATUS_3_HPD_LOW 0x00
+#define MHL_XDS_SINK_STATUS_3_HPD_HIGH 0x10
+#define MHL_XDS_SINK_STATUS_4_HPD_LOW 0x00
+#define MHL_XDS_SINK_STATUS_4_HPD_HIGH 0x40
+
+/* Interrupt Registers */
+enum {
+ MHL_INT_RCHANGE,
+ MHL_INT_DCHANGE,
+ MHL_INT_SIZE
+};
+
+/* Offset of DEVSTAT registers */
+#define MHL_INT_OFFSET 0x20
+#define MHL_INT_REG(name) (MHL_INT_OFFSET + MHL_INT_##name)
+
+#define MHL_INT_RC_DCAP_CHG 0x01
+#define MHL_INT_RC_DSCR_CHG 0x02
+#define MHL_INT_RC_REQ_WRT 0x04
+#define MHL_INT_RC_GRT_WRT 0x08
+#define MHL_INT_RC_3D_REQ 0x10
+#define MHL_INT_RC_FEAT_REQ 0x20
+#define MHL_INT_RC_FEAT_COMPLETE 0x40
+
+#define MHL_INT_DC_EDID_CHG 0x02
+
+enum {
+ MHL_ACK = 0x33, /* Command or Data byte acknowledge */
+ MHL_NACK = 0x34, /* Command or Data byte not acknowledge */
+ MHL_ABORT = 0x35, /* Transaction abort */
+ MHL_WRITE_STAT = 0xe0, /* Write one status register */
+ MHL_SET_INT = 0x60, /* Write one interrupt register */
+ MHL_READ_DEVCAP_REG = 0x61, /* Read one register */
+ MHL_GET_STATE = 0x62, /* Read CBUS revision level from follower */
+ MHL_GET_VENDOR_ID = 0x63, /* Read vendor ID value from follower */
+ MHL_SET_HPD = 0x64, /* Set Hot Plug Detect in follower */
+ MHL_CLR_HPD = 0x65, /* Clear Hot Plug Detect in follower */
+ MHL_SET_CAP_ID = 0x66, /* Set Capture ID for downstream device */
+ MHL_GET_CAP_ID = 0x67, /* Get Capture ID from downstream device */
+ MHL_MSC_MSG = 0x68, /* VS command to send RCP sub-commands */
+ MHL_GET_SC1_ERRORCODE = 0x69, /* Get Vendor-Specific error code */
+ MHL_GET_DDC_ERRORCODE = 0x6A, /* Get DDC channel command error code */
+ MHL_GET_MSC_ERRORCODE = 0x6B, /* Get MSC command error code */
+ MHL_WRITE_BURST = 0x6C, /* Write 1-16 bytes to responder's scratchpad */
+ MHL_GET_SC3_ERRORCODE = 0x6D, /* Get channel 3 command error code */
+ MHL_WRITE_XSTAT = 0x70, /* Write one extended status register */
+ MHL_READ_XDEVCAP_REG = 0x71, /* Read one extended devcap register */
+ /* let the rest of these float, they are software specific */
+ MHL_READ_EDID_BLOCK,
+ MHL_SEND_3D_REQ_OR_FEAT_REQ,
+ MHL_READ_DEVCAP,
+ MHL_READ_XDEVCAP
+};
+
+/* MSC message types */
+enum {
+ MHL_MSC_MSG_RCP = 0x10, /* RCP sub-command */
+ MHL_MSC_MSG_RCPK = 0x11, /* RCP Acknowledge sub-command */
+ MHL_MSC_MSG_RCPE = 0x12, /* RCP Error sub-command */
+ MHL_MSC_MSG_RAP = 0x20, /* Mode Change Warning sub-command */
+ MHL_MSC_MSG_RAPK = 0x21, /* MCW Acknowledge sub-command */
+ MHL_MSC_MSG_RBP = 0x22, /* Remote Button Protocol sub-command */
+ MHL_MSC_MSG_RBPK = 0x23, /* RBP Acknowledge sub-command */
+ MHL_MSC_MSG_RBPE = 0x24, /* RBP Error sub-command */
+ MHL_MSC_MSG_UCP = 0x30, /* UCP sub-command */
+ MHL_MSC_MSG_UCPK = 0x31, /* UCP Acknowledge sub-command */
+ MHL_MSC_MSG_UCPE = 0x32, /* UCP Error sub-command */
+ MHL_MSC_MSG_RUSB = 0x40, /* Request USB host role */
+ MHL_MSC_MSG_RUSBK = 0x41, /* Acknowledge request for USB host role */
+ MHL_MSC_MSG_RHID = 0x42, /* Request HID host role */
+ MHL_MSC_MSG_RHIDK = 0x43, /* Acknowledge request for HID host role */
+ MHL_MSC_MSG_ATT = 0x50, /* Request attention sub-command */
+ MHL_MSC_MSG_ATTK = 0x51, /* ATT Acknowledge sub-command */
+ MHL_MSC_MSG_BIST_TRIGGER = 0x60,
+ MHL_MSC_MSG_BIST_REQUEST_STAT = 0x61,
+ MHL_MSC_MSG_BIST_READY = 0x62,
+ MHL_MSC_MSG_BIST_STOP = 0x63,
+};
+
+/* RAP action codes */
+#define MHL_RAP_POLL 0x00 /* Just do an ack */
+#define MHL_RAP_CONTENT_ON 0x10 /* Turn content stream ON */
+#define MHL_RAP_CONTENT_OFF 0x11 /* Turn content stream OFF */
+#define MHL_RAP_CBUS_MODE_DOWN 0x20
+#define MHL_RAP_CBUS_MODE_UP 0x21
+
+/* RAPK status codes */
+#define MHL_RAPK_NO_ERR 0x00 /* RAP action recognized & supported */
+#define MHL_RAPK_UNRECOGNIZED 0x01 /* Unknown RAP action code received */
+#define MHL_RAPK_UNSUPPORTED 0x02 /* Rcvd RAP action code not supported */
+#define MHL_RAPK_BUSY 0x03 /* Responder too busy to respond */
+
+/*
+ * Error status codes for RCPE messages
+ */
+/* No error. (Not allowed in RCPE messages) */
+#define MHL_RCPE_STATUS_NO_ERROR 0x00
+/* Unsupported/unrecognized key code */
+#define MHL_RCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01
+/* Responder busy. Initiator may retry message */
+#define MHL_RCPE_STATUS_BUSY 0x02
+
+/*
+ * Error status codes for RBPE messages
+ */
+/* No error. (Not allowed in RBPE messages) */
+#define MHL_RBPE_STATUS_NO_ERROR 0x00
+/* Unsupported/unrecognized button code */
+#define MHL_RBPE_STATUS_INEFFECTIVE_BUTTON_CODE 0x01
+/* Responder busy. Initiator may retry message */
+#define MHL_RBPE_STATUS_BUSY 0x02
+
+/*
+ * Error status codes for UCPE messages
+ */
+/* No error. (Not allowed in UCPE messages) */
+#define MHL_UCPE_STATUS_NO_ERROR 0x00
+/* Unsupported/unrecognized key code */
+#define MHL_UCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01
+
+#endif /* __MHL_H__ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 672644031bd5..e336e3901876 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -57,7 +57,7 @@
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
@@ -362,7 +362,7 @@ struct drm_ioctl_desc {
struct drm_pending_event {
struct completion *completion;
struct drm_event *event;
- struct fence *fence;
+ struct dma_fence *fence;
struct list_head link;
struct list_head pending_link;
struct drm_file *file_priv;
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 9701f2dfb784..fc8af53b18aa 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -153,6 +153,7 @@ struct __drm_connnectors_state {
/**
* struct drm_atomic_state - the global state object for atomic updates
+ * @ref: count of all references to this state (will not be freed until zero)
* @dev: parent DRM device
* @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
@@ -164,6 +165,8 @@ struct __drm_connnectors_state {
* @acquire_ctx: acquire context for this atomic modeset state update
*/
struct drm_atomic_state {
+ struct kref ref;
+
struct drm_device *dev;
bool allow_modeset : 1;
bool legacy_cursor_update : 1;
@@ -193,7 +196,33 @@ static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit)
struct drm_atomic_state * __must_check
drm_atomic_state_alloc(struct drm_device *dev);
void drm_atomic_state_clear(struct drm_atomic_state *state);
-void drm_atomic_state_free(struct drm_atomic_state *state);
+
+/**
+ * drm_atomic_state_get - acquire a reference to the atomic state
+ * @state: The atomic state
+ *
+ * Returns a new reference to the @state
+ */
+static inline struct drm_atomic_state *
+drm_atomic_state_get(struct drm_atomic_state *state)
+{
+ kref_get(&state->ref);
+ return state;
+}
+
+void __drm_atomic_state_free(struct kref *ref);
+
+/**
+ * drm_atomic_state_put - release a reference to the atomic state
+ * @state: The atomic state
+ *
+ * This releases a reference to @state which is freed after removing the
+ * final reference. No locking required and callable from any context.
+ */
+static inline void drm_atomic_state_put(struct drm_atomic_state *state)
+{
+ kref_put(&state->ref, __drm_atomic_state_free);
+}
int __must_check
drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state);
@@ -365,8 +394,17 @@ int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
*
* To give drivers flexibility struct &drm_crtc_state has 3 booleans to track
* whether the state CRTC changed enough to need a full modeset cycle:
- * connectors_changed, mode_changed and active_change. This helper simply
+ * connectors_changed, mode_changed and active_changed. This helper simply
* combines these three to compute the overall need for a modeset for @state.
+ *
+ * The atomic helper code sets these booleans, but drivers can and should
+ * change them appropriately to accurately represent whether a modeset is
+ * really needed. In general, drivers should avoid full modesets whenever
+ * possible.
+ *
+ * For example if the CRTC mode has changed, and the hardware is able to enact
+ * the requested mode change without going through a full modeset, the driver
+ * should clear mode_changed during its ->atomic_check.
*/
static inline bool
drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h
index 36baa175de99..13221cf9b3eb 100644
--- a/include/drm/drm_blend.h
+++ b/include/drm/drm_blend.h
@@ -47,8 +47,14 @@ struct drm_atomic_state;
#define DRM_REFLECT_Y BIT(5)
#define DRM_REFLECT_MASK (DRM_REFLECT_X | DRM_REFLECT_Y)
-struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
- unsigned int supported_rotations);
+static inline bool drm_rotation_90_or_270(unsigned int rotation)
+{
+ return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270);
+}
+
+int drm_plane_create_rotation_property(struct drm_plane *plane,
+ unsigned int rotation,
+ unsigned int supported_rotations);
unsigned int drm_rotation_simplify(unsigned int rotation,
unsigned int supported_rotations);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 0aa292526567..fa1aa214c8ea 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -47,13 +47,14 @@
#include <drm/drm_plane.h>
#include <drm/drm_blend.h>
#include <drm/drm_color_mgmt.h>
+#include <drm/drm_debugfs_crc.h>
struct drm_device;
struct drm_mode_set;
struct drm_file;
struct drm_clip_rect;
struct device_node;
-struct fence;
+struct dma_fence;
struct edid;
static inline int64_t U642I64(uint64_t val)
@@ -116,6 +117,11 @@ struct drm_plane_helper_funcs;
* never return in a failure from the ->atomic_check callback. Userspace assumes
* that a DPMS On will always succeed. In other words: @enable controls resource
* assignment, @active controls the actual hardware state.
+ *
+ * The three booleans active_changed, connectors_changed and mode_changed are
+ * intended to indicate whether a full modeset is needed, rather than strictly
+ * describing what has changed in a commit.
+ * See also: drm_atomic_crtc_needs_modeset()
*/
struct drm_crtc_state {
struct drm_crtc *crtc;
@@ -564,6 +570,30 @@ struct drm_crtc_funcs {
* before data structures are torndown.
*/
void (*early_unregister)(struct drm_crtc *crtc);
+
+ /**
+ * @set_crc_source:
+ *
+ * Changes the source of CRC checksums of frames at the request of
+ * userspace, typically for testing purposes. The sources available are
+ * specific of each driver and a %NULL value indicates that CRC
+ * generation is to be switched off.
+ *
+ * When CRC generation is enabled, the driver should call
+ * drm_crtc_add_crc_entry() at each frame, providing any information
+ * that characterizes the frame contents in the crcN arguments, as
+ * provided from the configured source. Drivers must accept a "auto"
+ * source name that will select a default source for this CRTC.
+ *
+ * This callback is optional if the driver does not support any CRC
+ * generation functionality.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+ int (*set_crc_source)(struct drm_crtc *crtc, const char *source,
+ size_t *values_cnt);
};
/**
@@ -680,6 +710,22 @@ struct drm_crtc {
* context.
*/
struct drm_modeset_acquire_ctx *acquire_ctx;
+
+#ifdef CONFIG_DEBUG_FS
+ /**
+ * @debugfs_entry:
+ *
+ * Debugfs directory for this CRTC.
+ */
+ struct dentry *debugfs_entry;
+
+ /**
+ * @crc:
+ *
+ * Configuration settings of CRC capture.
+ */
+ struct drm_crtc_crc crc;
+#endif
};
/**
@@ -1110,11 +1156,6 @@ struct drm_mode_config {
*/
struct drm_property *plane_type_property;
/**
- * @rotation_property: Optional property for planes or CRTCs to specifiy
- * rotation.
- */
- struct drm_property *rotation_property;
- /**
* @prop_src_x: Default atomic plane property for the plane source
* position in the connected &drm_framebuffer.
*/
@@ -1354,7 +1395,7 @@ static inline unsigned int drm_crtc_index(const struct drm_crtc *crtc)
* Given a registered CRTC, return the mask bit of that CRTC for an
* encoder's possible_crtcs field.
*/
-static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc)
+static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc)
{
return 1 << drm_crtc_index(crtc);
}
diff --git a/include/drm/drm_debugfs_crc.h b/include/drm/drm_debugfs_crc.h
new file mode 100644
index 000000000000..7d63b1d4adb9
--- /dev/null
+++ b/include/drm/drm_debugfs_crc.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright © 2016 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __DRM_DEBUGFS_CRC_H__
+#define __DRM_DEBUGFS_CRC_H__
+
+#define DRM_MAX_CRC_NR 10
+
+/**
+ * struct drm_crtc_crc_entry - entry describing a frame's content
+ * @has_frame_counter: whether the source was able to provide a frame number
+ * @frame: number of the frame this CRC is about, if @has_frame_counter is true
+ * @crc: array of values that characterize the frame
+ */
+struct drm_crtc_crc_entry {
+ bool has_frame_counter;
+ uint32_t frame;
+ uint32_t crcs[DRM_MAX_CRC_NR];
+};
+
+#define DRM_CRC_ENTRIES_NR 128
+
+/**
+ * struct drm_crtc_crc - data supporting CRC capture on a given CRTC
+ * @lock: protects the fields in this struct
+ * @source: name of the currently configured source of CRCs
+ * @opened: whether userspace has opened the data file for reading
+ * @entries: array of entries, with size of %DRM_CRC_ENTRIES_NR
+ * @head: head of circular queue
+ * @tail: tail of circular queue
+ * @values_cnt: number of CRC values per entry, up to %DRM_MAX_CRC_NR
+ * @wq: workqueue used to synchronize reading and writing
+ */
+struct drm_crtc_crc {
+ spinlock_t lock;
+ const char *source;
+ bool opened;
+ struct drm_crtc_crc_entry *entries;
+ int head, tail;
+ size_t values_cnt;
+ wait_queue_head_t wq;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
+ uint32_t frame, uint32_t *crcs);
+#else
+static inline int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
+ uint32_t frame, uint32_t *crcs)
+{
+ return -EINVAL;
+}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+#endif /* __DRM_DEBUGFS_CRC_H__ */
diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/drm_dp_dual_mode_helper.h
index e8a9dfd0e055..4c42db81fcb4 100644
--- a/include/drm/drm_dp_dual_mode_helper.h
+++ b/include/drm/drm_dp_dual_mode_helper.h
@@ -40,6 +40,8 @@
#define DP_DUAL_MODE_REV_TYPE2 0x00
#define DP_DUAL_MODE_TYPE_MASK 0xf0
#define DP_DUAL_MODE_TYPE_TYPE2 0xa0
+/* This field is marked reserved in dual mode spec, used in LSPCON */
+#define DP_DUAL_MODE_TYPE_HAS_DPCD 0x08
#define DP_DUAL_MODE_IEEE_OUI 0x11 /* 11-13*/
#define DP_DUAL_IEEE_OUI_LEN 3
#define DP_DUAL_DEVICE_ID 0x14 /* 14-19 */
@@ -55,6 +57,11 @@
#define DP_DUAL_MODE_CEC_ENABLE 0x01
#define DP_DUAL_MODE_I2C_SPEED_CTRL 0x22
+/* LSPCON specific registers, defined by MCA */
+#define DP_DUAL_MODE_LSPCON_MODE_CHANGE 0x40
+#define DP_DUAL_MODE_LSPCON_CURRENT_MODE 0x41
+#define DP_DUAL_MODE_LSPCON_MODE_PCON 0x1
+
struct i2c_adapter;
ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
@@ -63,6 +70,20 @@ ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
u8 offset, const void *buffer, size_t size);
/**
+ * enum drm_lspcon_mode
+ * @DRM_LSPCON_MODE_INVALID: No LSPCON.
+ * @DRM_LSPCON_MODE_LS: Level shifter mode of LSPCON
+ * which drives DP++ to HDMI 1.4 conversion.
+ * @DRM_LSPCON_MODE_PCON: Protocol converter mode of LSPCON
+ * which drives DP++ to HDMI 2.0 active conversion.
+ */
+enum drm_lspcon_mode {
+ DRM_LSPCON_MODE_INVALID,
+ DRM_LSPCON_MODE_LS,
+ DRM_LSPCON_MODE_PCON,
+};
+
+/**
* enum drm_dp_dual_mode_type - Type of the DP dual mode adaptor
* @DRM_DP_DUAL_MODE_NONE: No DP dual mode adaptor
* @DRM_DP_DUAL_MODE_UNKNOWN: Could be either none or type 1 DVI adaptor
@@ -70,6 +91,7 @@ ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
* @DRM_DP_DUAL_MODE_TYPE1_HDMI: Type 1 HDMI adaptor
* @DRM_DP_DUAL_MODE_TYPE2_DVI: Type 2 DVI adaptor
* @DRM_DP_DUAL_MODE_TYPE2_HDMI: Type 2 HDMI adaptor
+ * @DRM_DP_DUAL_MODE_LSPCON: Level shifter / protocol converter
*/
enum drm_dp_dual_mode_type {
DRM_DP_DUAL_MODE_NONE,
@@ -78,6 +100,7 @@ enum drm_dp_dual_mode_type {
DRM_DP_DUAL_MODE_TYPE1_HDMI,
DRM_DP_DUAL_MODE_TYPE2_DVI,
DRM_DP_DUAL_MODE_TYPE2_HDMI,
+ DRM_DP_DUAL_MODE_LSPCON,
};
enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter);
@@ -89,4 +112,8 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter, bool enable);
const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type);
+int drm_lspcon_get_mode(struct i2c_adapter *adapter,
+ enum drm_lspcon_mode *current_mode);
+int drm_lspcon_set_mode(struct i2c_adapter *adapter,
+ enum drm_lspcon_mode reqd_mode);
#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 2a79882cb68e..55bbeb0ff594 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -690,6 +690,12 @@ drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED;
}
+static inline bool
+drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT;
+}
+
/*
* DisplayPort AUX channel
*/
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 387e33a4d6ee..c7438ff0d609 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -189,7 +189,7 @@ static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
}
/* FIXME: We have an include file mess still, drm_crtc.h needs untangling. */
-static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc);
+static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc);
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 30c30fa87ee8..dc0aafab9ffd 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -25,8 +25,29 @@
#include <linux/types.h>
#include <uapi/drm/drm_fourcc.h>
+/**
+ * struct drm_format_info - information about a DRM format
+ * @format: 4CC format identifier (DRM_FORMAT_*)
+ * @depth: Color depth (number of bits per pixel excluding padding bits),
+ * valid for a subset of RGB formats only. This is a legacy field, do not
+ * use in new code and set to 0 for new formats.
+ * @num_planes: Number of color planes (1 to 3)
+ * @cpp: Number of bytes per pixel (per plane)
+ * @hsub: Horizontal chroma subsampling factor
+ * @vsub: Vertical chroma subsampling factor
+ */
+struct drm_format_info {
+ u32 format;
+ u8 depth;
+ u8 num_planes;
+ u8 cpp[3];
+ u8 hsub;
+ u8 vsub;
+};
+
+const struct drm_format_info *__drm_format_info(u32 format);
+const struct drm_format_info *drm_format_info(u32 format);
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
-void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp);
int drm_format_num_planes(uint32_t format);
int drm_format_plane_cpp(uint32_t format, int plane);
int drm_format_horz_chroma_subsampling(uint32_t format);
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index 3fd87b386ed7..26a64805cc15 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -4,6 +4,7 @@
#include <linux/of_graph.h>
struct component_master_ops;
+struct component_match;
struct device;
struct drm_device;
struct drm_encoder;
@@ -12,6 +13,10 @@ struct device_node;
#ifdef CONFIG_OF
extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port);
+extern void drm_of_component_match_add(struct device *master,
+ struct component_match **matchptr,
+ int (*compare)(struct device *, void *),
+ struct device_node *node);
extern int drm_of_component_probe(struct device *dev,
int (*compare_of)(struct device *, void *),
const struct component_master_ops *m_ops);
@@ -25,6 +30,14 @@ static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
return 0;
}
+static inline void
+drm_of_component_match_add(struct device *master,
+ struct component_match **matchptr,
+ int (*compare)(struct device *, void *),
+ struct device_node *node)
+{
+}
+
static inline int
drm_of_component_probe(struct device *dev,
int (*compare_of)(struct device *, void *),
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 8b4dc62470ff..0bed92c5dbd8 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -65,7 +65,7 @@ struct drm_plane_state {
struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */
struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */
- struct fence *fence;
+ struct dma_fence *fence;
/* Signed dest location allows it to be partially off screen */
int32_t crtc_x, crtc_y;
@@ -94,7 +94,6 @@ struct drm_plane_state {
struct drm_atomic_state *state;
};
-
/**
* struct drm_plane_funcs - driver plane control functions
*/
@@ -392,6 +391,7 @@ enum drm_plane_type {
* @type: type of plane (overlay, primary, cursor)
* @state: current atomic state for this plane
* @zpos_property: zpos property for this plane
+ * @rotation_property: rotation property for this plane
* @helper_private: mid-layer private data
*/
struct drm_plane {
@@ -438,6 +438,7 @@ struct drm_plane {
struct drm_plane_state *state;
struct drm_property *zpos_property;
+ struct drm_property *rotation_property;
};
#define obj_to_plane(x) container_of(x, struct drm_plane, base)
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index b46fa0ef3005..545c6e0fea7d 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -64,7 +64,7 @@ struct i915_audio_component_ops {
* Called from audio driver. After audio driver sets the
* sample rate, it will call this function to set n/cts
*/
- int (*sync_audio_rate)(struct device *, int port, int rate);
+ int (*sync_audio_rate)(struct device *, int port, int pipe, int rate);
/**
* @get_eld: fill the audio state and ELD bytes for the given port
*
@@ -77,7 +77,7 @@ struct i915_audio_component_ops {
* Note that the returned size may be over @max_bytes. Then it
* implies that only a part of ELD has been copied to the buffer.
*/
- int (*get_eld)(struct device *, int port, bool *enabled,
+ int (*get_eld)(struct device *, int port, int pipe, bool *enabled,
unsigned char *buf, int max_bytes);
};
@@ -97,7 +97,7 @@ struct i915_audio_component_audio_ops {
* status accordingly (even when the HDA controller is in power save
* mode).
*/
- void (*pin_eld_notify)(void *audio_ptr, int port);
+ void (*pin_eld_notify)(void *audio_ptr, int port, int pipe);
};
/**
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 9eb940d6755f..652e45be97c8 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -47,6 +47,8 @@ struct drm_mm_node;
struct ttm_placement;
+struct ttm_place;
+
/**
* struct ttm_bus_placement
*
@@ -209,7 +211,7 @@ struct ttm_buffer_object {
* Members protected by a bo reservation.
*/
- struct fence *moving;
+ struct dma_fence *moving;
struct drm_vma_offset_node vma_node;
@@ -396,6 +398,17 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
int resched);
/**
+ * ttm_bo_eviction_valuable
+ *
+ * @bo: The buffer object to evict
+ * @place: the placement we need to make room for
+ *
+ * Check if it is valuable to evict the BO to make room for the given placement.
+ */
+bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place);
+
+/**
* ttm_bo_synccpu_write_grab
*
* @bo: The buffer object:
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 4f0a92185995..cdbdb40eb5bd 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -303,7 +303,7 @@ struct ttm_mem_type_manager {
/*
* Protected by @move_lock.
*/
- struct fence *move;
+ struct dma_fence *move;
};
/**
@@ -371,9 +371,21 @@ struct ttm_bo_driver {
* submission as a consequence.
*/
- int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
- int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man);
+ int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags);
+ int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man);
+
+ /**
+ * struct ttm_bo_driver member eviction_valuable
+ *
+ * @bo: the buffer object to be evicted
+ * @place: placement we need room for
+ *
+ * Check with the driver if it is valuable to evict a BO to make room
+ * for a certain placement.
+ */
+ bool (*eviction_valuable)(struct ttm_buffer_object *bo,
+ const struct ttm_place *place);
/**
* struct ttm_bo_driver member evict_flags:
*
@@ -384,8 +396,9 @@ struct ttm_bo_driver {
* finished, they'll end up in bo->mem.flags
*/
- void(*evict_flags) (struct ttm_buffer_object *bo,
- struct ttm_placement *placement);
+ void (*evict_flags)(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement);
+
/**
* struct ttm_bo_driver member move:
*
@@ -399,10 +412,9 @@ struct ttm_bo_driver {
*
* Move a buffer between two memory regions.
*/
- int (*move) (struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem);
+ int (*move)(struct ttm_buffer_object *bo, bool evict,
+ bool interruptible, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
/**
* struct ttm_bo_driver_member verify_access
@@ -416,8 +428,8 @@ struct ttm_bo_driver {
* access for all buffer objects.
* This function should return 0 if access is granted, -EPERM otherwise.
*/
- int (*verify_access) (struct ttm_buffer_object *bo,
- struct file *filp);
+ int (*verify_access)(struct ttm_buffer_object *bo,
+ struct file *filp);
/* hook to notify driver about a driver move so it
* can do tiling things */
@@ -430,7 +442,7 @@ struct ttm_bo_driver {
/**
* notify the driver that we're about to swap out this bo
*/
- void (*swap_notify) (struct ttm_buffer_object *bo);
+ void (*swap_notify)(struct ttm_buffer_object *bo);
/**
* Driver callback on when mapping io memory (for bo_move_memcpy
@@ -438,8 +450,10 @@ struct ttm_bo_driver {
* the mapping is not use anymore. io_mem_reserve & io_mem_free
* are balanced.
*/
- int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
- void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+ int (*io_mem_reserve)(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
+ void (*io_mem_free)(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
/**
* Optional driver callback for when BO is removed from the LRU.
@@ -1025,7 +1039,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
*/
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- struct fence *fence, bool evict,
+ struct dma_fence *fence, bool evict,
struct ttm_mem_reg *new_mem);
/**
@@ -1040,7 +1054,7 @@ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
* immediately or hang it on a temporary buffer object.
*/
int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
- struct fence *fence, bool evict,
+ struct dma_fence *fence, bool evict,
struct ttm_mem_reg *new_mem);
/**
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index b620c317c772..47f35b8e6d09 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -114,6 +114,6 @@ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct list_head *list,
- struct fence *fence);
+ struct dma_fence *fence);
#endif
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index e0b0741ae671..8daeb3ce0016 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -30,7 +30,7 @@
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include <linux/fs.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/wait.h>
struct device;
@@ -143,7 +143,7 @@ struct dma_buf {
wait_queue_head_t poll;
struct dma_buf_poll_cb_t {
- struct fence_cb cb;
+ struct dma_fence_cb cb;
wait_queue_head_t *poll;
unsigned long active;
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
new file mode 100644
index 000000000000..5900945f962d
--- /dev/null
+++ b/include/linux/dma-fence-array.h
@@ -0,0 +1,86 @@
+/*
+ * fence-array: aggregates fence to be waited together
+ *
+ * Copyright (C) 2016 Collabora Ltd
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Authors:
+ * Gustavo Padovan <gustavo@padovan.org>
+ * Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_DMA_FENCE_ARRAY_H
+#define __LINUX_DMA_FENCE_ARRAY_H
+
+#include <linux/dma-fence.h>
+
+/**
+ * struct dma_fence_array_cb - callback helper for fence array
+ * @cb: fence callback structure for signaling
+ * @array: reference to the parent fence array object
+ */
+struct dma_fence_array_cb {
+ struct dma_fence_cb cb;
+ struct dma_fence_array *array;
+};
+
+/**
+ * struct dma_fence_array - fence to represent an array of fences
+ * @base: fence base class
+ * @lock: spinlock for fence handling
+ * @num_fences: number of fences in the array
+ * @num_pending: fences in the array still pending
+ * @fences: array of the fences
+ */
+struct dma_fence_array {
+ struct dma_fence base;
+
+ spinlock_t lock;
+ unsigned num_fences;
+ atomic_t num_pending;
+ struct dma_fence **fences;
+};
+
+extern const struct dma_fence_ops dma_fence_array_ops;
+
+/**
+ * dma_fence_is_array - check if a fence is from the array subsclass
+ * @fence: fence to test
+ *
+ * Return true if it is a dma_fence_array and false otherwise.
+ */
+static inline bool dma_fence_is_array(struct dma_fence *fence)
+{
+ return fence->ops == &dma_fence_array_ops;
+}
+
+/**
+ * to_dma_fence_array - cast a fence to a dma_fence_array
+ * @fence: fence to cast to a dma_fence_array
+ *
+ * Returns NULL if the fence is not a dma_fence_array,
+ * or the dma_fence_array otherwise.
+ */
+static inline struct dma_fence_array *
+to_dma_fence_array(struct dma_fence *fence)
+{
+ if (fence->ops != &dma_fence_array_ops)
+ return NULL;
+
+ return container_of(fence, struct dma_fence_array, base);
+}
+
+struct dma_fence_array *dma_fence_array_create(int num_fences,
+ struct dma_fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any);
+
+#endif /* __LINUX_DMA_FENCE_ARRAY_H */
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
new file mode 100644
index 000000000000..ba60c043a5d3
--- /dev/null
+++ b/include/linux/dma-fence.h
@@ -0,0 +1,437 @@
+/*
+ * Fence mechanism for dma-buf to allow for asynchronous dma access
+ *
+ * Copyright (C) 2012 Canonical Ltd
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_DMA_FENCE_H
+#define __LINUX_DMA_FENCE_H
+
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/kref.h>
+#include <linux/sched.h>
+#include <linux/printk.h>
+#include <linux/rcupdate.h>
+
+struct dma_fence;
+struct dma_fence_ops;
+struct dma_fence_cb;
+
+/**
+ * struct dma_fence - software synchronization primitive
+ * @refcount: refcount for this fence
+ * @ops: dma_fence_ops associated with this fence
+ * @rcu: used for releasing fence with kfree_rcu
+ * @cb_list: list of all callbacks to call
+ * @lock: spin_lock_irqsave used for locking
+ * @context: execution context this fence belongs to, returned by
+ * dma_fence_context_alloc()
+ * @seqno: the sequence number of this fence inside the execution context,
+ * can be compared to decide which fence would be signaled later.
+ * @flags: A mask of DMA_FENCE_FLAG_* defined below
+ * @timestamp: Timestamp when the fence was signaled.
+ * @status: Optional, only valid if < 0, must be set before calling
+ * dma_fence_signal, indicates that the fence has completed with an error.
+ *
+ * the flags member must be manipulated and read using the appropriate
+ * atomic ops (bit_*), so taking the spinlock will not be needed most
+ * of the time.
+ *
+ * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled
+ * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called
+ * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
+ * implementer of the fence for its own purposes. Can be used in different
+ * ways by different fence implementers, so do not rely on this.
+ *
+ * Since atomic bitops are used, this is not guaranteed to be the case.
+ * Particularly, if the bit was set, but dma_fence_signal was called right
+ * before this bit was set, it would have been able to set the
+ * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
+ * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting
+ * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
+ * after dma_fence_signal was called, any enable_signaling call will have either
+ * been completed, or never called at all.
+ */
+struct dma_fence {
+ struct kref refcount;
+ const struct dma_fence_ops *ops;
+ struct rcu_head rcu;
+ struct list_head cb_list;
+ spinlock_t *lock;
+ u64 context;
+ unsigned seqno;
+ unsigned long flags;
+ ktime_t timestamp;
+ int status;
+};
+
+enum dma_fence_flag_bits {
+ DMA_FENCE_FLAG_SIGNALED_BIT,
+ DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ DMA_FENCE_FLAG_USER_BITS, /* must always be last member */
+};
+
+typedef void (*dma_fence_func_t)(struct dma_fence *fence,
+ struct dma_fence_cb *cb);
+
+/**
+ * struct dma_fence_cb - callback for dma_fence_add_callback
+ * @node: used by dma_fence_add_callback to append this struct to fence::cb_list
+ * @func: dma_fence_func_t to call
+ *
+ * This struct will be initialized by dma_fence_add_callback, additional
+ * data can be passed along by embedding dma_fence_cb in another struct.
+ */
+struct dma_fence_cb {
+ struct list_head node;
+ dma_fence_func_t func;
+};
+
+/**
+ * struct dma_fence_ops - operations implemented for fence
+ * @get_driver_name: returns the driver name.
+ * @get_timeline_name: return the name of the context this fence belongs to.
+ * @enable_signaling: enable software signaling of fence.
+ * @signaled: [optional] peek whether the fence is signaled, can be null.
+ * @wait: custom wait implementation, or dma_fence_default_wait.
+ * @release: [optional] called on destruction of fence, can be null
+ * @fill_driver_data: [optional] callback to fill in free-form debug info
+ * Returns amount of bytes filled, or -errno.
+ * @fence_value_str: [optional] fills in the value of the fence as a string
+ * @timeline_value_str: [optional] fills in the current value of the timeline
+ * as a string
+ *
+ * Notes on enable_signaling:
+ * For fence implementations that have the capability for hw->hw
+ * signaling, they can implement this op to enable the necessary
+ * irqs, or insert commands into cmdstream, etc. This is called
+ * in the first wait() or add_callback() path to let the fence
+ * implementation know that there is another driver waiting on
+ * the signal (ie. hw->sw case).
+ *
+ * This function can be called called from atomic context, but not
+ * from irq context, so normal spinlocks can be used.
+ *
+ * A return value of false indicates the fence already passed,
+ * or some failure occurred that made it impossible to enable
+ * signaling. True indicates successful enabling.
+ *
+ * fence->status may be set in enable_signaling, but only when false is
+ * returned.
+ *
+ * Calling dma_fence_signal before enable_signaling is called allows
+ * for a tiny race window in which enable_signaling is called during,
+ * before, or after dma_fence_signal. To fight this, it is recommended
+ * that before enable_signaling returns true an extra reference is
+ * taken on the fence, to be released when the fence is signaled.
+ * This will mean dma_fence_signal will still be called twice, but
+ * the second time will be a noop since it was already signaled.
+ *
+ * Notes on signaled:
+ * May set fence->status if returning true.
+ *
+ * Notes on wait:
+ * Must not be NULL, set to dma_fence_default_wait for default implementation.
+ * the dma_fence_default_wait implementation should work for any fence, as long
+ * as enable_signaling works correctly.
+ *
+ * Must return -ERESTARTSYS if the wait is intr = true and the wait was
+ * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
+ * timed out. Can also return other error values on custom implementations,
+ * which should be treated as if the fence is signaled. For example a hardware
+ * lockup could be reported like that.
+ *
+ * Notes on release:
+ * Can be NULL, this function allows additional commands to run on
+ * destruction of the fence. Can be called from irq context.
+ * If pointer is set to NULL, kfree will get called instead.
+ */
+
+struct dma_fence_ops {
+ const char * (*get_driver_name)(struct dma_fence *fence);
+ const char * (*get_timeline_name)(struct dma_fence *fence);
+ bool (*enable_signaling)(struct dma_fence *fence);
+ bool (*signaled)(struct dma_fence *fence);
+ signed long (*wait)(struct dma_fence *fence,
+ bool intr, signed long timeout);
+ void (*release)(struct dma_fence *fence);
+
+ int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
+ void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
+ void (*timeline_value_str)(struct dma_fence *fence,
+ char *str, int size);
+};
+
+void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, unsigned seqno);
+
+void dma_fence_release(struct kref *kref);
+void dma_fence_free(struct dma_fence *fence);
+
+/**
+ * dma_fence_put - decreases refcount of the fence
+ * @fence: [in] fence to reduce refcount of
+ */
+static inline void dma_fence_put(struct dma_fence *fence)
+{
+ if (fence)
+ kref_put(&fence->refcount, dma_fence_release);
+}
+
+/**
+ * dma_fence_get - increases refcount of the fence
+ * @fence: [in] fence to increase refcount of
+ *
+ * Returns the same fence, with refcount increased by 1.
+ */
+static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
+{
+ if (fence)
+ kref_get(&fence->refcount);
+ return fence;
+}
+
+/**
+ * dma_fence_get_rcu - get a fence from a reservation_object_list with
+ * rcu read lock
+ * @fence: [in] fence to increase refcount of
+ *
+ * Function returns NULL if no refcount could be obtained, or the fence.
+ */
+static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
+{
+ if (kref_get_unless_zero(&fence->refcount))
+ return fence;
+ else
+ return NULL;
+}
+
+/**
+ * dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence
+ * @fence: [in] pointer to fence to increase refcount of
+ *
+ * Function returns NULL if no refcount could be obtained, or the fence.
+ * This function handles acquiring a reference to a fence that may be
+ * reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU),
+ * so long as the caller is using RCU on the pointer to the fence.
+ *
+ * An alternative mechanism is to employ a seqlock to protect a bunch of
+ * fences, such as used by struct reservation_object. When using a seqlock,
+ * the seqlock must be taken before and checked after a reference to the
+ * fence is acquired (as shown here).
+ *
+ * The caller is required to hold the RCU read lock.
+ */
+static inline struct dma_fence *
+dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
+{
+ do {
+ struct dma_fence *fence;
+
+ fence = rcu_dereference(*fencep);
+ if (!fence || !dma_fence_get_rcu(fence))
+ return NULL;
+
+ /* The atomic_inc_not_zero() inside dma_fence_get_rcu()
+ * provides a full memory barrier upon success (such as now).
+ * This is paired with the write barrier from assigning
+ * to the __rcu protected fence pointer so that if that
+ * pointer still matches the current fence, we know we
+ * have successfully acquire a reference to it. If it no
+ * longer matches, we are holding a reference to some other
+ * reallocated pointer. This is possible if the allocator
+ * is using a freelist like SLAB_DESTROY_BY_RCU where the
+ * fence remains valid for the RCU grace period, but it
+ * may be reallocated. When using such allocators, we are
+ * responsible for ensuring the reference we get is to
+ * the right fence, as below.
+ */
+ if (fence == rcu_access_pointer(*fencep))
+ return rcu_pointer_handoff(fence);
+
+ dma_fence_put(fence);
+ } while (1);
+}
+
+int dma_fence_signal(struct dma_fence *fence);
+int dma_fence_signal_locked(struct dma_fence *fence);
+signed long dma_fence_default_wait(struct dma_fence *fence,
+ bool intr, signed long timeout);
+int dma_fence_add_callback(struct dma_fence *fence,
+ struct dma_fence_cb *cb,
+ dma_fence_func_t func);
+bool dma_fence_remove_callback(struct dma_fence *fence,
+ struct dma_fence_cb *cb);
+void dma_fence_enable_sw_signaling(struct dma_fence *fence);
+
+/**
+ * dma_fence_is_signaled_locked - Return an indication if the fence
+ * is signaled yet.
+ * @fence: [in] the fence to check
+ *
+ * Returns true if the fence was already signaled, false if not. Since this
+ * function doesn't enable signaling, it is not guaranteed to ever return
+ * true if dma_fence_add_callback, dma_fence_wait or
+ * dma_fence_enable_sw_signaling haven't been called before.
+ *
+ * This function requires fence->lock to be held.
+ */
+static inline bool
+dma_fence_is_signaled_locked(struct dma_fence *fence)
+{
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return true;
+
+ if (fence->ops->signaled && fence->ops->signaled(fence)) {
+ dma_fence_signal_locked(fence);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * dma_fence_is_signaled - Return an indication if the fence is signaled yet.
+ * @fence: [in] the fence to check
+ *
+ * Returns true if the fence was already signaled, false if not. Since this
+ * function doesn't enable signaling, it is not guaranteed to ever return
+ * true if dma_fence_add_callback, dma_fence_wait or
+ * dma_fence_enable_sw_signaling haven't been called before.
+ *
+ * It's recommended for seqno fences to call dma_fence_signal when the
+ * operation is complete, it makes it possible to prevent issues from
+ * wraparound between time of issue and time of use by checking the return
+ * value of this function before calling hardware-specific wait instructions.
+ */
+static inline bool
+dma_fence_is_signaled(struct dma_fence *fence)
+{
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return true;
+
+ if (fence->ops->signaled && fence->ops->signaled(fence)) {
+ dma_fence_signal(fence);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * dma_fence_is_later - return if f1 is chronologically later than f2
+ * @f1: [in] the first fence from the same context
+ * @f2: [in] the second fence from the same context
+ *
+ * Returns true if f1 is chronologically later than f2. Both fences must be
+ * from the same context, since a seqno is not re-used across contexts.
+ */
+static inline bool dma_fence_is_later(struct dma_fence *f1,
+ struct dma_fence *f2)
+{
+ if (WARN_ON(f1->context != f2->context))
+ return false;
+
+ return (int)(f1->seqno - f2->seqno) > 0;
+}
+
+/**
+ * dma_fence_later - return the chronologically later fence
+ * @f1: [in] the first fence from the same context
+ * @f2: [in] the second fence from the same context
+ *
+ * Returns NULL if both fences are signaled, otherwise the fence that would be
+ * signaled last. Both fences must be from the same context, since a seqno is
+ * not re-used across contexts.
+ */
+static inline struct dma_fence *dma_fence_later(struct dma_fence *f1,
+ struct dma_fence *f2)
+{
+ if (WARN_ON(f1->context != f2->context))
+ return NULL;
+
+ /*
+ * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never
+ * have been set if enable_signaling wasn't called, and enabling that
+ * here is overkill.
+ */
+ if (dma_fence_is_later(f1, f2))
+ return dma_fence_is_signaled(f1) ? NULL : f1;
+ else
+ return dma_fence_is_signaled(f2) ? NULL : f2;
+}
+
+signed long dma_fence_wait_timeout(struct dma_fence *,
+ bool intr, signed long timeout);
+signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
+ uint32_t count,
+ bool intr, signed long timeout);
+
+/**
+ * dma_fence_wait - sleep until the fence gets signaled
+ * @fence: [in] the fence to wait on
+ * @intr: [in] if true, do an interruptible wait
+ *
+ * This function will return -ERESTARTSYS if interrupted by a signal,
+ * or 0 if the fence was signaled. Other error values may be
+ * returned on custom implementations.
+ *
+ * Performs a synchronous wait on this fence. It is assumed the caller
+ * directly or indirectly holds a reference to the fence, otherwise the
+ * fence might be freed before return, resulting in undefined behavior.
+ */
+static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
+{
+ signed long ret;
+
+ /* Since dma_fence_wait_timeout cannot timeout with
+ * MAX_SCHEDULE_TIMEOUT, only valid return values are
+ * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
+ */
+ ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
+
+ return ret < 0 ? ret : 0;
+}
+
+u64 dma_fence_context_alloc(unsigned num);
+
+#define DMA_FENCE_TRACE(f, fmt, args...) \
+ do { \
+ struct dma_fence *__ff = (f); \
+ if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \
+ pr_info("f %llu#%u: " fmt, \
+ __ff->context, __ff->seqno, ##args); \
+ } while (0)
+
+#define DMA_FENCE_WARN(f, fmt, args...) \
+ do { \
+ struct dma_fence *__ff = (f); \
+ pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
+ ##args); \
+ } while (0)
+
+#define DMA_FENCE_ERR(f, fmt, args...) \
+ do { \
+ struct dma_fence *__ff = (f); \
+ pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
+ ##args); \
+ } while (0)
+
+#endif /* __LINUX_DMA_FENCE_H */
diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h
deleted file mode 100644
index a44794e508df..000000000000
--- a/include/linux/fence-array.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * fence-array: aggregates fence to be waited together
- *
- * Copyright (C) 2016 Collabora Ltd
- * Copyright (C) 2016 Advanced Micro Devices, Inc.
- * Authors:
- * Gustavo Padovan <gustavo@padovan.org>
- * Christian König <christian.koenig@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __LINUX_FENCE_ARRAY_H
-#define __LINUX_FENCE_ARRAY_H
-
-#include <linux/fence.h>
-
-/**
- * struct fence_array_cb - callback helper for fence array
- * @cb: fence callback structure for signaling
- * @array: reference to the parent fence array object
- */
-struct fence_array_cb {
- struct fence_cb cb;
- struct fence_array *array;
-};
-
-/**
- * struct fence_array - fence to represent an array of fences
- * @base: fence base class
- * @lock: spinlock for fence handling
- * @num_fences: number of fences in the array
- * @num_pending: fences in the array still pending
- * @fences: array of the fences
- */
-struct fence_array {
- struct fence base;
-
- spinlock_t lock;
- unsigned num_fences;
- atomic_t num_pending;
- struct fence **fences;
-};
-
-extern const struct fence_ops fence_array_ops;
-
-/**
- * fence_is_array - check if a fence is from the array subsclass
- *
- * Return true if it is a fence_array and false otherwise.
- */
-static inline bool fence_is_array(struct fence *fence)
-{
- return fence->ops == &fence_array_ops;
-}
-
-/**
- * to_fence_array - cast a fence to a fence_array
- * @fence: fence to cast to a fence_array
- *
- * Returns NULL if the fence is not a fence_array,
- * or the fence_array otherwise.
- */
-static inline struct fence_array *to_fence_array(struct fence *fence)
-{
- if (fence->ops != &fence_array_ops)
- return NULL;
-
- return container_of(fence, struct fence_array, base);
-}
-
-struct fence_array *fence_array_create(int num_fences, struct fence **fences,
- u64 context, unsigned seqno,
- bool signal_on_any);
-
-#endif /* __LINUX_FENCE_ARRAY_H */
diff --git a/include/linux/fence.h b/include/linux/fence.h
deleted file mode 100644
index 0d763053f97a..000000000000
--- a/include/linux/fence.h
+++ /dev/null
@@ -1,378 +0,0 @@
-/*
- * Fence mechanism for dma-buf to allow for asynchronous dma access
- *
- * Copyright (C) 2012 Canonical Ltd
- * Copyright (C) 2012 Texas Instruments
- *
- * Authors:
- * Rob Clark <robdclark@gmail.com>
- * Maarten Lankhorst <maarten.lankhorst@canonical.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __LINUX_FENCE_H
-#define __LINUX_FENCE_H
-
-#include <linux/err.h>
-#include <linux/wait.h>
-#include <linux/list.h>
-#include <linux/bitops.h>
-#include <linux/kref.h>
-#include <linux/sched.h>
-#include <linux/printk.h>
-#include <linux/rcupdate.h>
-
-struct fence;
-struct fence_ops;
-struct fence_cb;
-
-/**
- * struct fence - software synchronization primitive
- * @refcount: refcount for this fence
- * @ops: fence_ops associated with this fence
- * @rcu: used for releasing fence with kfree_rcu
- * @cb_list: list of all callbacks to call
- * @lock: spin_lock_irqsave used for locking
- * @context: execution context this fence belongs to, returned by
- * fence_context_alloc()
- * @seqno: the sequence number of this fence inside the execution context,
- * can be compared to decide which fence would be signaled later.
- * @flags: A mask of FENCE_FLAG_* defined below
- * @timestamp: Timestamp when the fence was signaled.
- * @status: Optional, only valid if < 0, must be set before calling
- * fence_signal, indicates that the fence has completed with an error.
- *
- * the flags member must be manipulated and read using the appropriate
- * atomic ops (bit_*), so taking the spinlock will not be needed most
- * of the time.
- *
- * FENCE_FLAG_SIGNALED_BIT - fence is already signaled
- * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called*
- * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
- * implementer of the fence for its own purposes. Can be used in different
- * ways by different fence implementers, so do not rely on this.
- *
- * Since atomic bitops are used, this is not guaranteed to be the case.
- * Particularly, if the bit was set, but fence_signal was called right
- * before this bit was set, it would have been able to set the
- * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
- * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting
- * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
- * after fence_signal was called, any enable_signaling call will have either
- * been completed, or never called at all.
- */
-struct fence {
- struct kref refcount;
- const struct fence_ops *ops;
- struct rcu_head rcu;
- struct list_head cb_list;
- spinlock_t *lock;
- u64 context;
- unsigned seqno;
- unsigned long flags;
- ktime_t timestamp;
- int status;
-};
-
-enum fence_flag_bits {
- FENCE_FLAG_SIGNALED_BIT,
- FENCE_FLAG_ENABLE_SIGNAL_BIT,
- FENCE_FLAG_USER_BITS, /* must always be last member */
-};
-
-typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb);
-
-/**
- * struct fence_cb - callback for fence_add_callback
- * @node: used by fence_add_callback to append this struct to fence::cb_list
- * @func: fence_func_t to call
- *
- * This struct will be initialized by fence_add_callback, additional
- * data can be passed along by embedding fence_cb in another struct.
- */
-struct fence_cb {
- struct list_head node;
- fence_func_t func;
-};
-
-/**
- * struct fence_ops - operations implemented for fence
- * @get_driver_name: returns the driver name.
- * @get_timeline_name: return the name of the context this fence belongs to.
- * @enable_signaling: enable software signaling of fence.
- * @signaled: [optional] peek whether the fence is signaled, can be null.
- * @wait: custom wait implementation, or fence_default_wait.
- * @release: [optional] called on destruction of fence, can be null
- * @fill_driver_data: [optional] callback to fill in free-form debug info
- * Returns amount of bytes filled, or -errno.
- * @fence_value_str: [optional] fills in the value of the fence as a string
- * @timeline_value_str: [optional] fills in the current value of the timeline
- * as a string
- *
- * Notes on enable_signaling:
- * For fence implementations that have the capability for hw->hw
- * signaling, they can implement this op to enable the necessary
- * irqs, or insert commands into cmdstream, etc. This is called
- * in the first wait() or add_callback() path to let the fence
- * implementation know that there is another driver waiting on
- * the signal (ie. hw->sw case).
- *
- * This function can be called called from atomic context, but not
- * from irq context, so normal spinlocks can be used.
- *
- * A return value of false indicates the fence already passed,
- * or some failure occurred that made it impossible to enable
- * signaling. True indicates successful enabling.
- *
- * fence->status may be set in enable_signaling, but only when false is
- * returned.
- *
- * Calling fence_signal before enable_signaling is called allows
- * for a tiny race window in which enable_signaling is called during,
- * before, or after fence_signal. To fight this, it is recommended
- * that before enable_signaling returns true an extra reference is
- * taken on the fence, to be released when the fence is signaled.
- * This will mean fence_signal will still be called twice, but
- * the second time will be a noop since it was already signaled.
- *
- * Notes on signaled:
- * May set fence->status if returning true.
- *
- * Notes on wait:
- * Must not be NULL, set to fence_default_wait for default implementation.
- * the fence_default_wait implementation should work for any fence, as long
- * as enable_signaling works correctly.
- *
- * Must return -ERESTARTSYS if the wait is intr = true and the wait was
- * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
- * timed out. Can also return other error values on custom implementations,
- * which should be treated as if the fence is signaled. For example a hardware
- * lockup could be reported like that.
- *
- * Notes on release:
- * Can be NULL, this function allows additional commands to run on
- * destruction of the fence. Can be called from irq context.
- * If pointer is set to NULL, kfree will get called instead.
- */
-
-struct fence_ops {
- const char * (*get_driver_name)(struct fence *fence);
- const char * (*get_timeline_name)(struct fence *fence);
- bool (*enable_signaling)(struct fence *fence);
- bool (*signaled)(struct fence *fence);
- signed long (*wait)(struct fence *fence, bool intr, signed long timeout);
- void (*release)(struct fence *fence);
-
- int (*fill_driver_data)(struct fence *fence, void *data, int size);
- void (*fence_value_str)(struct fence *fence, char *str, int size);
- void (*timeline_value_str)(struct fence *fence, char *str, int size);
-};
-
-void fence_init(struct fence *fence, const struct fence_ops *ops,
- spinlock_t *lock, u64 context, unsigned seqno);
-
-void fence_release(struct kref *kref);
-void fence_free(struct fence *fence);
-
-/**
- * fence_get - increases refcount of the fence
- * @fence: [in] fence to increase refcount of
- *
- * Returns the same fence, with refcount increased by 1.
- */
-static inline struct fence *fence_get(struct fence *fence)
-{
- if (fence)
- kref_get(&fence->refcount);
- return fence;
-}
-
-/**
- * fence_get_rcu - get a fence from a reservation_object_list with rcu read lock
- * @fence: [in] fence to increase refcount of
- *
- * Function returns NULL if no refcount could be obtained, or the fence.
- */
-static inline struct fence *fence_get_rcu(struct fence *fence)
-{
- if (kref_get_unless_zero(&fence->refcount))
- return fence;
- else
- return NULL;
-}
-
-/**
- * fence_put - decreases refcount of the fence
- * @fence: [in] fence to reduce refcount of
- */
-static inline void fence_put(struct fence *fence)
-{
- if (fence)
- kref_put(&fence->refcount, fence_release);
-}
-
-int fence_signal(struct fence *fence);
-int fence_signal_locked(struct fence *fence);
-signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout);
-int fence_add_callback(struct fence *fence, struct fence_cb *cb,
- fence_func_t func);
-bool fence_remove_callback(struct fence *fence, struct fence_cb *cb);
-void fence_enable_sw_signaling(struct fence *fence);
-
-/**
- * fence_is_signaled_locked - Return an indication if the fence is signaled yet.
- * @fence: [in] the fence to check
- *
- * Returns true if the fence was already signaled, false if not. Since this
- * function doesn't enable signaling, it is not guaranteed to ever return
- * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
- * haven't been called before.
- *
- * This function requires fence->lock to be held.
- */
-static inline bool
-fence_is_signaled_locked(struct fence *fence)
-{
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return true;
-
- if (fence->ops->signaled && fence->ops->signaled(fence)) {
- fence_signal_locked(fence);
- return true;
- }
-
- return false;
-}
-
-/**
- * fence_is_signaled - Return an indication if the fence is signaled yet.
- * @fence: [in] the fence to check
- *
- * Returns true if the fence was already signaled, false if not. Since this
- * function doesn't enable signaling, it is not guaranteed to ever return
- * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
- * haven't been called before.
- *
- * It's recommended for seqno fences to call fence_signal when the
- * operation is complete, it makes it possible to prevent issues from
- * wraparound between time of issue and time of use by checking the return
- * value of this function before calling hardware-specific wait instructions.
- */
-static inline bool
-fence_is_signaled(struct fence *fence)
-{
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return true;
-
- if (fence->ops->signaled && fence->ops->signaled(fence)) {
- fence_signal(fence);
- return true;
- }
-
- return false;
-}
-
-/**
- * fence_is_later - return if f1 is chronologically later than f2
- * @f1: [in] the first fence from the same context
- * @f2: [in] the second fence from the same context
- *
- * Returns true if f1 is chronologically later than f2. Both fences must be
- * from the same context, since a seqno is not re-used across contexts.
- */
-static inline bool fence_is_later(struct fence *f1, struct fence *f2)
-{
- if (WARN_ON(f1->context != f2->context))
- return false;
-
- return (int)(f1->seqno - f2->seqno) > 0;
-}
-
-/**
- * fence_later - return the chronologically later fence
- * @f1: [in] the first fence from the same context
- * @f2: [in] the second fence from the same context
- *
- * Returns NULL if both fences are signaled, otherwise the fence that would be
- * signaled last. Both fences must be from the same context, since a seqno is
- * not re-used across contexts.
- */
-static inline struct fence *fence_later(struct fence *f1, struct fence *f2)
-{
- if (WARN_ON(f1->context != f2->context))
- return NULL;
-
- /*
- * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been
- * set if enable_signaling wasn't called, and enabling that here is
- * overkill.
- */
- if (fence_is_later(f1, f2))
- return fence_is_signaled(f1) ? NULL : f1;
- else
- return fence_is_signaled(f2) ? NULL : f2;
-}
-
-signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
-signed long fence_wait_any_timeout(struct fence **fences, uint32_t count,
- bool intr, signed long timeout);
-
-/**
- * fence_wait - sleep until the fence gets signaled
- * @fence: [in] the fence to wait on
- * @intr: [in] if true, do an interruptible wait
- *
- * This function will return -ERESTARTSYS if interrupted by a signal,
- * or 0 if the fence was signaled. Other error values may be
- * returned on custom implementations.
- *
- * Performs a synchronous wait on this fence. It is assumed the caller
- * directly or indirectly holds a reference to the fence, otherwise the
- * fence might be freed before return, resulting in undefined behavior.
- */
-static inline signed long fence_wait(struct fence *fence, bool intr)
-{
- signed long ret;
-
- /* Since fence_wait_timeout cannot timeout with
- * MAX_SCHEDULE_TIMEOUT, only valid return values are
- * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
- */
- ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
-
- return ret < 0 ? ret : 0;
-}
-
-u64 fence_context_alloc(unsigned num);
-
-#define FENCE_TRACE(f, fmt, args...) \
- do { \
- struct fence *__ff = (f); \
- if (IS_ENABLED(CONFIG_FENCE_TRACE)) \
- pr_info("f %llu#%u: " fmt, \
- __ff->context, __ff->seqno, ##args); \
- } while (0)
-
-#define FENCE_WARN(f, fmt, args...) \
- do { \
- struct fence *__ff = (f); \
- pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
- ##args); \
- } while (0)
-
-#define FENCE_ERR(f, fmt, args...) \
- do { \
- struct fence *__ff = (f); \
- pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
- ##args); \
- } while (0)
-
-#endif /* __LINUX_FENCE_H */
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index e9744202fa29..edbb4fc674ed 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -78,6 +78,8 @@ enum hdmi_picture_aspect {
HDMI_PICTURE_ASPECT_NONE,
HDMI_PICTURE_ASPECT_4_3,
HDMI_PICTURE_ASPECT_16_9,
+ HDMI_PICTURE_ASPECT_64_27,
+ HDMI_PICTURE_ASPECT_256_135,
HDMI_PICTURE_ASPECT_RESERVED,
};
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index b0f305e77b7f..2e313cca08f0 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -40,7 +40,7 @@
#define _LINUX_RESERVATION_H
#include <linux/ww_mutex.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/slab.h>
#include <linux/seqlock.h>
#include <linux/rcupdate.h>
@@ -59,7 +59,7 @@ extern const char reservation_seqcount_string[];
struct reservation_object_list {
struct rcu_head rcu;
u32 shared_count, shared_max;
- struct fence __rcu *shared[];
+ struct dma_fence __rcu *shared[];
};
/**
@@ -74,7 +74,7 @@ struct reservation_object {
struct ww_mutex lock;
seqcount_t seq;
- struct fence __rcu *fence_excl;
+ struct dma_fence __rcu *fence_excl;
struct reservation_object_list __rcu *fence;
struct reservation_object_list *staged;
};
@@ -107,7 +107,7 @@ reservation_object_fini(struct reservation_object *obj)
{
int i;
struct reservation_object_list *fobj;
- struct fence *excl;
+ struct dma_fence *excl;
/*
* This object should be dead and all references must have
@@ -115,12 +115,12 @@ reservation_object_fini(struct reservation_object *obj)
*/
excl = rcu_dereference_protected(obj->fence_excl, 1);
if (excl)
- fence_put(excl);
+ dma_fence_put(excl);
fobj = rcu_dereference_protected(obj->fence, 1);
if (fobj) {
for (i = 0; i < fobj->shared_count; ++i)
- fence_put(rcu_dereference_protected(fobj->shared[i], 1));
+ dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1));
kfree(fobj);
}
@@ -155,7 +155,7 @@ reservation_object_get_list(struct reservation_object *obj)
* RETURNS
* The exclusive fence or NULL
*/
-static inline struct fence *
+static inline struct dma_fence *
reservation_object_get_excl(struct reservation_object *obj)
{
return rcu_dereference_protected(obj->fence_excl,
@@ -173,10 +173,10 @@ reservation_object_get_excl(struct reservation_object *obj)
* RETURNS
* The exclusive fence or NULL if none
*/
-static inline struct fence *
+static inline struct dma_fence *
reservation_object_get_excl_rcu(struct reservation_object *obj)
{
- struct fence *fence;
+ struct dma_fence *fence;
unsigned seq;
retry:
seq = read_seqcount_begin(&obj->seq);
@@ -186,22 +186,22 @@ retry:
rcu_read_unlock();
goto retry;
}
- fence = fence_get(fence);
+ fence = dma_fence_get(fence);
rcu_read_unlock();
return fence;
}
int reservation_object_reserve_shared(struct reservation_object *obj);
void reservation_object_add_shared_fence(struct reservation_object *obj,
- struct fence *fence);
+ struct dma_fence *fence);
void reservation_object_add_excl_fence(struct reservation_object *obj,
- struct fence *fence);
+ struct dma_fence *fence);
int reservation_object_get_fences_rcu(struct reservation_object *obj,
- struct fence **pfence_excl,
+ struct dma_fence **pfence_excl,
unsigned *pshared_count,
- struct fence ***pshared);
+ struct dma_fence ***pshared);
long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
bool wait_all, bool intr,
diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h
index a1ba6a5ccdd6..c58c535d12a8 100644
--- a/include/linux/seqno-fence.h
+++ b/include/linux/seqno-fence.h
@@ -20,7 +20,7 @@
#ifndef __LINUX_SEQNO_FENCE_H
#define __LINUX_SEQNO_FENCE_H
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#include <linux/dma-buf.h>
enum seqno_fence_condition {
@@ -29,15 +29,15 @@ enum seqno_fence_condition {
};
struct seqno_fence {
- struct fence base;
+ struct dma_fence base;
- const struct fence_ops *ops;
+ const struct dma_fence_ops *ops;
struct dma_buf *sync_buf;
uint32_t seqno_ofs;
enum seqno_fence_condition condition;
};
-extern const struct fence_ops seqno_fence_ops;
+extern const struct dma_fence_ops seqno_fence_ops;
/**
* to_seqno_fence - cast a fence to a seqno_fence
@@ -47,7 +47,7 @@ extern const struct fence_ops seqno_fence_ops;
* or the seqno_fence otherwise.
*/
static inline struct seqno_fence *
-to_seqno_fence(struct fence *fence)
+to_seqno_fence(struct dma_fence *fence)
{
if (fence->ops != &seqno_fence_ops)
return NULL;
@@ -83,9 +83,9 @@ to_seqno_fence(struct fence *fence)
* dma-buf for sync_buf, since mapping or unmapping the sync_buf to the
* device's vm can be expensive.
*
- * It is recommended for creators of seqno_fence to call fence_signal
+ * It is recommended for creators of seqno_fence to call dma_fence_signal()
* before destruction. This will prevent possible issues from wraparound at
- * time of issue vs time of check, since users can check fence_is_signaled
+ * time of issue vs time of check, since users can check dma_fence_is_signaled()
* before submitting instructions for the hardware to wait on the fence.
* However, when ops.enable_signaling is not called, it doesn't have to be
* done as soon as possible, just before there's any real danger of seqno
@@ -96,18 +96,18 @@ seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock,
struct dma_buf *sync_buf, uint32_t context,
uint32_t seqno_ofs, uint32_t seqno,
enum seqno_fence_condition cond,
- const struct fence_ops *ops)
+ const struct dma_fence_ops *ops)
{
BUG_ON(!fence || !sync_buf || !ops);
BUG_ON(!ops->wait || !ops->enable_signaling ||
!ops->get_driver_name || !ops->get_timeline_name);
/*
- * ops is used in fence_init for get_driver_name, so needs to be
+ * ops is used in dma_fence_init for get_driver_name, so needs to be
* initialized first
*/
fence->ops = ops;
- fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
+ dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
get_dma_buf(sync_buf);
fence->sync_buf = sync_buf;
fence->seqno_ofs = seqno_ofs;
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
index aa17ccfc2f57..3e3ab84fc4cd 100644
--- a/include/linux/sync_file.h
+++ b/include/linux/sync_file.h
@@ -18,8 +18,8 @@
#include <linux/ktime.h>
#include <linux/list.h>
#include <linux/spinlock.h>
-#include <linux/fence.h>
-#include <linux/fence-array.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
/**
* struct sync_file - sync file to export to the userspace
@@ -41,13 +41,13 @@ struct sync_file {
wait_queue_head_t wq;
- struct fence *fence;
- struct fence_cb cb;
+ struct dma_fence *fence;
+ struct dma_fence_cb cb;
};
-#define POLL_ENABLED FENCE_FLAG_USER_BITS
+#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS
-struct sync_file *sync_file_create(struct fence *fence);
-struct fence *sync_file_get_fence(int fd);
+struct sync_file *sync_file_create(struct dma_fence *fence);
+struct dma_fence *sync_file_get_fence(int fd);
#endif /* _LINUX_SYNC_H */
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index 796cabf6be5e..5ab972e116ec 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -10,8 +10,9 @@
int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
-int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid, int rate);
-int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
+int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
+ int dev_id, int rate);
+int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
bool *audio_enabled, char *buffer, int max_bytes);
int snd_hdac_i915_init(struct hdac_bus *bus);
int snd_hdac_i915_exit(struct hdac_bus *bus);
@@ -29,13 +30,13 @@ static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
{
}
static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec,
- hda_nid_t nid, int rate)
+ hda_nid_t nid, int dev_id, int rate)
{
return 0;
}
static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
- bool *audio_enabled, char *buffer,
- int max_bytes)
+ int dev_id, bool *audio_enabled,
+ char *buffer, int max_bytes)
{
return -ENODEV;
}
diff --git a/include/trace/events/fence.h b/include/trace/events/dma_fence.h
index d6dfa05ba322..1157cb4c3c6f 100644
--- a/include/trace/events/fence.h
+++ b/include/trace/events/dma_fence.h
@@ -1,17 +1,17 @@
#undef TRACE_SYSTEM
-#define TRACE_SYSTEM fence
+#define TRACE_SYSTEM dma_fence
#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_FENCE_H
+#define _TRACE_DMA_FENCE_H
#include <linux/tracepoint.h>
-struct fence;
+struct dma_fence;
-TRACE_EVENT(fence_annotate_wait_on,
+TRACE_EVENT(dma_fence_annotate_wait_on,
/* fence: the fence waiting on f1, f1: the fence to be waited on. */
- TP_PROTO(struct fence *fence, struct fence *f1),
+ TP_PROTO(struct dma_fence *fence, struct dma_fence *f1),
TP_ARGS(fence, f1),
@@ -48,9 +48,9 @@ TRACE_EVENT(fence_annotate_wait_on,
__entry->waiting_context, __entry->waiting_seqno)
);
-DECLARE_EVENT_CLASS(fence,
+DECLARE_EVENT_CLASS(dma_fence,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence),
@@ -73,56 +73,56 @@ DECLARE_EVENT_CLASS(fence,
__entry->seqno)
);
-DEFINE_EVENT(fence, fence_emit,
+DEFINE_EVENT(dma_fence, dma_fence_emit,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(fence, fence_init,
+DEFINE_EVENT(dma_fence, dma_fence_init,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(fence, fence_destroy,
+DEFINE_EVENT(dma_fence, dma_fence_destroy,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(fence, fence_enable_signal,
+DEFINE_EVENT(dma_fence, dma_fence_enable_signal,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(fence, fence_signaled,
+DEFINE_EVENT(dma_fence, dma_fence_signaled,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(fence, fence_wait_start,
+DEFINE_EVENT(dma_fence, dma_fence_wait_start,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(fence, fence_wait_end,
+DEFINE_EVENT(dma_fence, dma_fence_wait_end,
- TP_PROTO(struct fence *fence),
+ TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-#endif /* _TRACE_FENCE_H */
+#endif /* _TRACE_DMA_FENCE_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index d6b5a21f3d3c..4684f378f046 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -81,6 +81,8 @@ extern "C" {
#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3)
/* Flag that create shadow bo(GTT) while allocating vram bo */
#define AMDGPU_GEM_CREATE_SHADOW (1 << 4)
+/* Flag that allocating the BO should use linear VRAM */
+#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@@ -436,6 +438,7 @@ struct drm_amdgpu_cs_chunk_data {
*
*/
#define AMDGPU_IDS_FLAGS_FUSION 0x1
+#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
/* indicate if acceleration can be working */
#define AMDGPU_INFO_ACCEL_WORKING 0x00
@@ -487,6 +490,10 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_VIS_VRAM_USAGE 0x17
/* number of TTM buffer evictions */
#define AMDGPU_INFO_NUM_EVICTIONS 0x18
+/* Query memory about VRAM and GTT domains */
+#define AMDGPU_INFO_MEMORY 0x19
+/* Query vce clock table */
+#define AMDGPU_INFO_VCE_CLOCK_TABLE 0x1A
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@@ -572,6 +579,34 @@ struct drm_amdgpu_info_vram_gtt {
__u64 gtt_size;
};
+struct drm_amdgpu_heap_info {
+ /** max. physical memory */
+ __u64 total_heap_size;
+
+ /** Theoretical max. available memory in the given heap */
+ __u64 usable_heap_size;
+
+ /**
+ * Number of bytes allocated in the heap. This includes all processes
+ * and private allocations in the kernel. It changes when new buffers
+ * are allocated, freed, and moved. It cannot be larger than
+ * heap_size.
+ */
+ __u64 heap_usage;
+
+ /**
+ * Theoretical possible max. size of buffer which
+ * could be allocated in the given heap
+ */
+ __u64 max_allocation;
+};
+
+struct drm_amdgpu_memory_info {
+ struct drm_amdgpu_heap_info vram;
+ struct drm_amdgpu_heap_info cpu_accessible_vram;
+ struct drm_amdgpu_heap_info gtt;
+};
+
struct drm_amdgpu_info_firmware {
__u32 ver;
__u32 feature;
@@ -645,6 +680,24 @@ struct drm_amdgpu_info_hw_ip {
__u32 _pad;
};
+#define AMDGPU_VCE_CLOCK_TABLE_ENTRIES 6
+
+struct drm_amdgpu_info_vce_clock_table_entry {
+ /** System clock */
+ __u32 sclk;
+ /** Memory clock */
+ __u32 mclk;
+ /** VCE clock */
+ __u32 eclk;
+ __u32 pad;
+};
+
+struct drm_amdgpu_info_vce_clock_table {
+ struct drm_amdgpu_info_vce_clock_table_entry entries[AMDGPU_VCE_CLOCK_TABLE_ENTRIES];
+ __u32 num_valid_entries;
+ __u32 pad;
+};
+
/*
* Supported GPU families
*/
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index df0e3504c349..084b50a02dc5 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -77,6 +77,25 @@ extern "C" {
#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
+/* Picture aspect ratio options */
+#define DRM_MODE_PICTURE_ASPECT_NONE 0
+#define DRM_MODE_PICTURE_ASPECT_4_3 1
+#define DRM_MODE_PICTURE_ASPECT_16_9 2
+#define DRM_MODE_PICTURE_ASPECT_64_27 3
+#define DRM_MODE_PICTURE_ASPECT_256_135 4
+
+/* Aspect ratio flag bitmask (4 bits 22:19) */
+#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19)
+#define DRM_MODE_FLAG_PIC_AR_NONE \
+ (DRM_MODE_PICTURE_ASPECT_NONE<<19)
+#define DRM_MODE_FLAG_PIC_AR_4_3 \
+ (DRM_MODE_PICTURE_ASPECT_4_3<<19)
+#define DRM_MODE_FLAG_PIC_AR_16_9 \
+ (DRM_MODE_PICTURE_ASPECT_16_9<<19)
+#define DRM_MODE_FLAG_PIC_AR_64_27 \
+ (DRM_MODE_PICTURE_ASPECT_64_27<<19)
+#define DRM_MODE_FLAG_PIC_AR_256_135 \
+ (DRM_MODE_PICTURE_ASPECT_256_135<<19)
/* DPMS flags */
/* bit compatible with the xorg definitions. */
@@ -92,11 +111,6 @@ extern "C" {
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
-/* Picture aspect ratio options */
-#define DRM_MODE_PICTURE_ASPECT_NONE 0
-#define DRM_MODE_PICTURE_ASPECT_4_3 1
-#define DRM_MODE_PICTURE_ASPECT_16_9 2
-
/* Dithering mode options */
#define DRM_MODE_DITHERING_OFF 0
#define DRM_MODE_DITHERING_ON 1
diff --git a/include/video/display_timing.h b/include/video/display_timing.h
index 28d9d0d566ca..3d289e990aca 100644
--- a/include/video/display_timing.h
+++ b/include/video/display_timing.h
@@ -28,6 +28,10 @@ enum display_flags {
DISPLAY_FLAGS_INTERLACED = BIT(8),
DISPLAY_FLAGS_DOUBLESCAN = BIT(9),
DISPLAY_FLAGS_DOUBLECLK = BIT(10),
+ /* drive sync on pos. edge */
+ DISPLAY_FLAGS_SYNC_POSEDGE = BIT(11),
+ /* drive sync on neg. edge */
+ DISPLAY_FLAGS_SYNC_NEGEDGE = BIT(12),
};
/*
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index c9af022676c2..0659bf389489 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -193,6 +193,7 @@ static int pin2port(struct hdac_device *codec, hda_nid_t pin_nid)
* snd_hdac_sync_audio_rate - Set N/CTS based on the sample rate
* @codec: HDA codec
* @nid: the pin widget NID
+ * @dev_id: device identifier
* @rate: the sample rate to set
*
* This function is supposed to be used only by a HD-audio controller
@@ -201,18 +202,20 @@ static int pin2port(struct hdac_device *codec, hda_nid_t pin_nid)
* This function sets N/CTS value based on the given sample rate.
* Returns zero for success, or a negative error code.
*/
-int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid, int rate)
+int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
+ int dev_id, int rate)
{
struct hdac_bus *bus = codec->bus;
struct i915_audio_component *acomp = bus->audio_component;
- int port;
+ int port, pipe;
if (!acomp || !acomp->ops || !acomp->ops->sync_audio_rate)
return -ENODEV;
port = pin2port(codec, nid);
if (port < 0)
return -EINVAL;
- return acomp->ops->sync_audio_rate(acomp->dev, port, rate);
+ pipe = dev_id;
+ return acomp->ops->sync_audio_rate(acomp->dev, port, pipe, rate);
}
EXPORT_SYMBOL_GPL(snd_hdac_sync_audio_rate);
@@ -220,6 +223,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_sync_audio_rate);
* snd_hdac_acomp_get_eld - Get the audio state and ELD via component
* @codec: HDA codec
* @nid: the pin widget NID
+ * @dev_id: device identifier
* @audio_enabled: the pointer to store the current audio state
* @buffer: the buffer pointer to store ELD bytes
* @max_bytes: the max bytes to be stored on @buffer
@@ -236,12 +240,12 @@ EXPORT_SYMBOL_GPL(snd_hdac_sync_audio_rate);
* thus it may be over @max_bytes. If it's over @max_bytes, it implies
* that only a part of ELD bytes have been fetched.
*/
-int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
+int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
bool *audio_enabled, char *buffer, int max_bytes)
{
struct hdac_bus *bus = codec->bus;
struct i915_audio_component *acomp = bus->audio_component;
- int port;
+ int port, pipe;
if (!acomp || !acomp->ops || !acomp->ops->get_eld)
return -ENODEV;
@@ -249,7 +253,9 @@ int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
port = pin2port(codec, nid);
if (port < 0)
return -EINVAL;
- return acomp->ops->get_eld(acomp->dev, port, audio_enabled,
+
+ pipe = dev_id;
+ return acomp->ops->get_eld(acomp->dev, port, pipe, audio_enabled,
buffer, max_bytes);
}
EXPORT_SYMBOL_GPL(snd_hdac_acomp_get_eld);
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 56e5204ac9c1..cf9bc042fe96 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1485,7 +1485,7 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
mutex_lock(&per_pin->lock);
eld->monitor_present = false;
- size = snd_hdac_acomp_get_eld(&codec->core, per_pin->pin_nid,
+ size = snd_hdac_acomp_get_eld(&codec->core, per_pin->pin_nid, -1,
&eld->monitor_present, eld->eld_buffer,
ELD_MAX_SIZE);
if (size > 0) {
@@ -1744,7 +1744,8 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
/* Call sync_audio_rate to set the N/CTS/M manually if necessary */
/* Todo: add DP1.2 MST audio support later */
if (codec_has_acomp(codec))
- snd_hdac_sync_audio_rate(&codec->core, pin_nid, runtime->rate);
+ snd_hdac_sync_audio_rate(&codec->core, pin_nid, -1,
+ runtime->rate);
non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
mutex_lock(&per_pin->lock);
@@ -2290,7 +2291,7 @@ static void haswell_set_power_state(struct hda_codec *codec, hda_nid_t fg,
snd_hda_codec_set_power_to_all(codec, fg, power_state);
}
-static void intel_pin_eld_notify(void *audio_ptr, int port)
+static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
{
struct hda_codec *codec = audio_ptr;
int pin_nid;
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index c602c4960924..0c6228a0bf95 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1368,7 +1368,7 @@ static int hdac_hdmi_parse_and_map_nid(struct hdac_ext_device *edev,
return hdac_hdmi_init_dai_map(edev);
}
-static void hdac_hdmi_eld_notify_cb(void *aptr, int port)
+static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
{
struct hdac_ext_device *edev = aptr;
struct hdac_hdmi_priv *hdmi = edev->private_data;