summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-03 19:52:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-03 19:52:08 -0700
commitb44f2fd87919b5ae6e1756d4c7ba2cbba22238e1 (patch)
tree01ce17e44375c3f7707640bb44d6e012bab878c4 /drivers/gpu/drm/i915
parent12b68040a5e468068fd7f4af1150eab8f6e96235 (diff)
parent5493ee1919eae4f49d62276cf5986b7f7c7aa8f6 (diff)
downloadlinux-b44f2fd87919b5ae6e1756d4c7ba2cbba22238e1.tar.bz2
Merge tag 'drm-next-2022-08-03' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "Highlights: - New driver for logicvc - which is a display IP core. - EDID parser rework to add new extensions - fbcon scrolling improvements - i915 has some more DG2 work but not enabled by default, but should have enough features for userspace to work now. Otherwise it's lots of work all over the place. Detailed summary: New driver: - logicvc vfio: - use aperture API core: - of: Add data-lane helpers and convert drivers - connector: Remove deprecated ida_simple_get() media: - Add various RGB666 and RGB888 format constants panel: - Add HannStar HSD101PWW - Add ETML0700Y5DHA dma-buf: - add sync-file API - set dma mask for udmabuf devices fbcon: - Improve scrolling performance - Sanitize input fbdev: - device unregistering fixes - vesa: Support COMPILE_TEST - Disable firmware-device registration when first native driver loads aperture: - fix segfault during hot-unplug - export for use with other subsystems client: - use driver validated modes dp: - aux: make probing more reliable - mst: Read extended DPCD capabilities during system resume - Support waiting for HDP signal - Port-validation fixes edid: - CEA data-block iterators - struct drm_edid introduction - implement HF-EEODB extension gem: - don't use fb format non-existing planes probe-helper: - use 640x480 as displayport fallback scheduler: - don't kill jobs in interrupt context bridge: - Add support for i.MX8qxp and i.MX8qm - lots of fixes/cleanups - Add TI-DLPC3433 - fy07024di26a30d: Optional GPIO reset - ldb: Add reg and reg-name properties to bindings, Kconfig fixes - lt9611: Fix display sensing; - tc358767: DSI/DPI refactoring and DSI-to-eDP support, DSI lane handling - tc358775: Fix clock settings - ti-sn65dsi83: Allow GPIO to sleep - adv7511: I2C fixes - anx7625: Fix error handling; DPI fixes; Implement HDP timeout via callback - fsl-ldb: Drop DE flip - ti-sn65dsi86: Convert to atomic modesetting amdgpu: - use atomic fence helpers in DM - fix VRAM address calculations - export CRTC bpc via debugfs - Initial devcoredump support - Enable high priority gfx queue on asics which support it - Adjust GART size on newer APUs for S/G display - Soft reset for GFX 11 / SDMA 6 - Add gfxoff status query for vangogh - Fix timestamps for cursor only commits - Adjust GART size on newer APUs for S/G display - fix buddy memory corruption amdkfd: - MMU notifier fixes - P2P DMA support using dma-buf - Add available memory IOCTL - HMM profiler support - Simplify GPUVM validation - Unified memory for CWSR save/restore area i915: - General driver clean-up - DG2 enabling (still under force probe) - DG2 small BAR memory support - HuC loading support - DG2 workarounds - DG2/ATS-M device IDs added - Ponte Vecchio prep work and new blitter engines - add Meteorlake support - Fix sparse warnings - DMC MMIO range checks - Audio related fixes - Runtime PM fixes - PSR fixes - Media freq factor and per-gt enhancements - DSI fixes for ICL+ - Disable DMC flip queue handlers - ADL_P voltage swing updates - Use more the VBT for panel information - Fix on Type-C ports with TBT mode - Improve fastset and allow seamless M/N changes - Accept more fixed modes with VRR/DMRRS panels - Disable connector polling for a headless SKU - ADL-S display PLL w/a - Enable THP on Icelake and beyond - Fix i915_gem_object_ggtt_pin_ww regression on old platforms - Expose per tile media freq factor in sysfs - Fix dma_resv fence handling in multi-batch execbuf - Improve on suspend / resume time with VT-d enabled - export CRTC bpc settings via debugfs msm: - gpu: a619 support - gpu: Fix for unclocked GMU register access - gpu: Devcore dump enhancements - client utilization via fdinfo support - fix fence rollover issue - gem: Lockdep false-positive warning fix - gem: Switch to pfn mappings - WB support on sc7180 - dp: dropped custom bulk clock implementation - fix link retraining on resolution change - hdmi: dropped obsolete GPIO support tegra: - context isolation for host1x engines - tegra234 soc support mediatek: - add vdosys0/1 for mt8195 - add MT8195 dp_intf driver exynos: - Fix resume function issue of exynos decon driver by calling clk_disable_unprepare() properly if clk_prepare_enable() failed. nouveau: - set of misc fixes/cleanups - display cleanups gma500: - Cleanup connector I2C handling hyperv: - Unify VRAM allocation of Gen1 and Gen2 meson: - Support YUV422 output; Refcount fixes mgag200: - Support damage clipping - Support gamma handling - Protect concurrent HW access - Fixes to connector - Store model-specific limits in device-info structure - fix PCI register init panfrost: - Valhall support r128: - Fix bit-shift overflow rockchip: - Locking fixes in error path ssd130x: - Fix built-in linkage udl: - Always advertize VGA connector ast: - Support multiple outputs - fix black screen on resume sun4i: - HDMI PHY cleanups vc4: - Add support for BCM2711 vkms: - Allocate output buffer with vmalloc() mcde: - Fix ref-count leak mxsfb/lcdif: - Support i.MX8MP LCD controller stm/ltdc: - Support dynamic Z order - Support mirroring ingenic: - Fix display at maximum resolution" * tag 'drm-next-2022-08-03' of git://anongit.freedesktop.org/drm/drm: (1480 commits) drm/amd/display: Fix a compilation failure on PowerPC caused by FPU code drm/amdgpu: enable support for psp 13.0.4 block drm/amdgpu: add files for PSP 13.0.4 drm/amdgpu: add header files for MP 13.0.4 drm/amdgpu: correct RLC_RLCS_BOOTLOAD_STATUS offset and index drm/amdgpu: send msg to IMU for the front-door loading drm/amdkfd: use time_is_before_jiffies(a + b) to replace "jiffies - a > b" drm/amdgpu: fix hive reference leak when reflecting psp topology info drm/amd/pm: enable GFX ULV feature support for SMU13.0.0 drm/amd/pm: update driver if header for SMU 13.0.0 drm/amdgpu: move mes self test after drm sched re-started drm/amdgpu: drop non-necessary call trace dump drm/amdgpu: enable VCN cg and JPEG cg/pg drm/amdgpu: vcn_4_0_2 video codec query drm/amdgpu: add VCN_4_0_2 firmware support drm/amdgpu: add VCN function in NBIO v7.7 drm/amdgpu: fix a vcn4 boot poll bug in emulation mode drm/amd/amdgpu: add memory training support for PSP_V13 drm/amdkfd: remove an unnecessary amdgpu_bo_ref drm/amd/pm: Add get_gfx_off_status interface for yellow carp ...
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile6
-rw-r--r--drivers/gpu/drm/i915/TODO.txt2
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c22
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c4
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c1
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio_regs.h160
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c657
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c314
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c54
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c37
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c1846
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h90
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c103
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h65
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c95
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c89
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c407
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c734
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c246
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c66
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c91
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c129
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c80
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c77
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h67
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h4
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c1
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c28
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c47
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c44
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c99
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c50
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.h3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c7
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c250
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c141
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c87
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c143
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_regs.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c627
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c132
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_gmch.h27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c267
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_gmch.c654
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_gmch.h46
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c522
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.h58
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h83
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c177
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h45
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c118
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c450
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h92
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c30
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c192
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c9
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c323
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c93
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c62
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c28
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c97
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c26
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c138
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/i915_active.c2
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c54
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.h2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h99
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h39
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c69
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h7
-rw-r--r--drivers/gpu/drm/i915/i915_params.c2
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c160
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c158
-rw-r--r--drivers/gpu/drm/i915/i915_query.c42
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h235
-rw-r--r--drivers/gpu/drm/i915/i915_request.c57
-rw-r--r--drivers/gpu/drm/i915/i915_request.h2
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/i915_tasklet.h43
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c31
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.h3
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h40
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c64
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c17
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h50
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c2
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c18
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h4
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c16
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h8
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c93
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.h20
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c180
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h14
-rw-r--r--drivers/gpu/drm/i915/intel_step.c70
-rw-r--r--drivers/gpu/drm/i915/intel_step.h4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c378
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h8
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c2
192 files changed, 8338 insertions, 5197 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index d2b18f03a33c..522ef9b4aff3 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -103,6 +103,7 @@ gt-y += \
gt/intel_gt_debugfs.o \
gt/intel_gt_engines_debugfs.o \
gt/intel_gt_irq.o \
+ gt/intel_gt_mcr.o \
gt/intel_gt_pm.o \
gt/intel_gt_pm_debugfs.o \
gt/intel_gt_pm_irq.o \
@@ -129,7 +130,7 @@ gt-y += \
gt/shmem_utils.o \
gt/sysfs_engines.o
# x86 intel-gtt module support
-gt-$(CONFIG_X86) += gt/intel_gt_gmch.o
+gt-$(CONFIG_X86) += gt/intel_ggtt_gmch.o
# autogenerated null render state
gt-y += \
gt/gen6_renderstate.o \
@@ -220,6 +221,7 @@ i915-y += \
display/intel_combo_phy.o \
display/intel_connector.o \
display/intel_crtc.o \
+ display/intel_crtc_state_dump.o \
display/intel_cursor.o \
display/intel_display.o \
display/intel_display_power.o \
@@ -242,6 +244,8 @@ i915-y += \
display/intel_hdcp.o \
display/intel_hotplug.o \
display/intel_lpe_audio.o \
+ display/intel_modeset_verify.o \
+ display/intel_modeset_setup.o \
display/intel_overlay.o \
display/intel_pch_display.o \
display/intel_pch_refclk.o \
diff --git a/drivers/gpu/drm/i915/TODO.txt b/drivers/gpu/drm/i915/TODO.txt
index 81a82c9c203f..879b08ca32b3 100644
--- a/drivers/gpu/drm/i915/TODO.txt
+++ b/drivers/gpu/drm/i915/TODO.txt
@@ -37,5 +37,5 @@ Smaller things:
https://lore.kernel.org/linux-mm/20210301083320.943079-1-hch@lst.de/
-- tasklet helpers in i915_gem.h also look a bit misplaced and should
+- tasklet helpers in i915_tasklet.h also look a bit misplaced and should
probably be moved to tasklet headers.
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 5a957acebfd6..82ad8fe7440c 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -395,26 +395,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
- if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
- pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
- /*
- * This is a big fat ugly hack.
- *
- * Some machines in UEFI boot mode provide us a VBT that has 18
- * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
- * unknown we fail to light up. Yet the same BIOS boots up with
- * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
- * max, not what it tells us to use.
- *
- * Note: This will still be broken if the eDP panel is not lit
- * up by the BIOS, and thus we can't get the mode at module
- * load.
- */
- drm_dbg_kms(&dev_priv->drm,
- "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
- pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
- dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
- }
+ if (intel_dp_is_edp(intel_dp))
+ intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp);
}
static void
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 38014e0cc9ad..861dcd2eb890 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -28,7 +28,7 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
if (IS_BROADWELL(i915)) {
drm_WARN_ON(&i915->drm,
- snb_pcode_write(i915, DISPLAY_IPS_CONTROL,
+ snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL,
IPS_ENABLE | IPS_PCODE_CONTROL));
/*
* Quoting Art Runyan: "its not safe to expect any particular
@@ -62,7 +62,7 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
if (IS_BROADWELL(i915)) {
drm_WARN_ON(&i915->drm,
- snb_pcode_write(i915, DISPLAY_IPS_CONTROL, 0));
+ snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, 0));
/*
* Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 7fe1a4e57654..592e5adfed8b 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -5,6 +5,7 @@
#include <linux/kernel.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 19bf717fd4cb..5dcfa7feffa9 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1862,7 +1862,8 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ struct intel_connector *connector = intel_dsi->attached_connector;
+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
u32 tlpx_ns;
u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
u32 ths_prepare_ns, tclk_trail_ns;
@@ -2049,6 +2050,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
/* attach connector to encoder */
intel_connector_attach_encoder(intel_connector, encoder);
+ intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL, NULL);
+
mutex_lock(&dev->mode_config.mutex);
intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
mutex_unlock(&dev->mode_config.mutex);
@@ -2062,13 +2065,13 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
intel_backlight_setup(intel_connector, INVALID_PIPE);
- if (dev_priv->vbt.dsi.config->dual_link)
+ if (intel_connector->panel.vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
else
intel_dsi->ports = BIT(port);
- intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
- intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
+ intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
+ intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
for_each_dsi_port(port, intel_dsi->ports) {
struct intel_dsi_host *host;
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index f0f0dfce27ce..6c9ee905f132 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -30,6 +30,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_audio.h"
+#include "intel_audio_regs.h"
#include "intel_cdclk.h"
#include "intel_crtc.h"
#include "intel_de.h"
diff --git a/drivers/gpu/drm/i915/display/intel_audio_regs.h b/drivers/gpu/drm/i915/display/intel_audio_regs.h
new file mode 100644
index 000000000000..d1e5844e3484
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_audio_regs.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_AUDIO_REGS_H__
+#define __INTEL_AUDIO_REGS_H__
+
+#include "i915_reg_defs.h"
+
+#define G4X_AUD_VID_DID _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020)
+#define INTEL_AUDIO_DEVCL 0x808629FB
+#define INTEL_AUDIO_DEVBLC 0x80862801
+#define INTEL_AUDIO_DEVCTG 0x80862802
+
+#define G4X_AUD_CNTL_ST _MMIO(0x620B4)
+#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
+#define G4X_ELDV_DEVCTG (1 << 14)
+#define G4X_ELD_ADDR_MASK (0xf << 5)
+#define G4X_ELD_ACK (1 << 4)
+#define G4X_HDMIW_HDMIEDID _MMIO(0x6210C)
+
+#define _IBX_HDMIW_HDMIEDID_A 0xE2050
+#define _IBX_HDMIW_HDMIEDID_B 0xE2150
+#define IBX_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _IBX_HDMIW_HDMIEDID_A, \
+ _IBX_HDMIW_HDMIEDID_B)
+#define _IBX_AUD_CNTL_ST_A 0xE20B4
+#define _IBX_AUD_CNTL_ST_B 0xE21B4
+#define IBX_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CNTL_ST_A, \
+ _IBX_AUD_CNTL_ST_B)
+#define IBX_ELD_BUFFER_SIZE_MASK (0x1f << 10)
+#define IBX_ELD_ADDRESS_MASK (0x1f << 5)
+#define IBX_ELD_ACK (1 << 4)
+#define IBX_AUD_CNTL_ST2 _MMIO(0xE20C0)
+#define IBX_CP_READY(port) ((1 << 1) << (((port) - 1) * 4))
+#define IBX_ELD_VALID(port) ((1 << 0) << (((port) - 1) * 4))
+
+#define _CPT_HDMIW_HDMIEDID_A 0xE5050
+#define _CPT_HDMIW_HDMIEDID_B 0xE5150
+#define CPT_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _CPT_HDMIW_HDMIEDID_A, _CPT_HDMIW_HDMIEDID_B)
+#define _CPT_AUD_CNTL_ST_A 0xE50B4
+#define _CPT_AUD_CNTL_ST_B 0xE51B4
+#define CPT_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CNTL_ST_A, _CPT_AUD_CNTL_ST_B)
+#define CPT_AUD_CNTRL_ST2 _MMIO(0xE50C0)
+
+#define _VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
+#define _VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
+#define VLV_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _VLV_HDMIW_HDMIEDID_A, _VLV_HDMIW_HDMIEDID_B)
+#define _VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
+#define _VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
+#define VLV_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CNTL_ST_A, _VLV_AUD_CNTL_ST_B)
+#define VLV_AUD_CNTL_ST2 _MMIO(VLV_DISPLAY_BASE + 0x620C0)
+
+#define _IBX_AUD_CONFIG_A 0xe2000
+#define _IBX_AUD_CONFIG_B 0xe2100
+#define IBX_AUD_CFG(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CONFIG_A, _IBX_AUD_CONFIG_B)
+#define _CPT_AUD_CONFIG_A 0xe5000
+#define _CPT_AUD_CONFIG_B 0xe5100
+#define CPT_AUD_CFG(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CONFIG_A, _CPT_AUD_CONFIG_B)
+#define _VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
+#define _VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
+#define VLV_AUD_CFG(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CONFIG_A, _VLV_AUD_CONFIG_B)
+
+#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
+#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
+#define AUD_CONFIG_UPPER_N_SHIFT 20
+#define AUD_CONFIG_UPPER_N_MASK (0xff << 20)
+#define AUD_CONFIG_LOWER_N_SHIFT 4
+#define AUD_CONFIG_LOWER_N_MASK (0xfff << 4)
+#define AUD_CONFIG_N_MASK (AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK)
+#define AUD_CONFIG_N(n) \
+ (((((n) >> 12) & 0xff) << AUD_CONFIG_UPPER_N_SHIFT) | \
+ (((n) & 0xfff) << AUD_CONFIG_LOWER_N_SHIFT))
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 (1 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 (2 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 (3 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 (4 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 (5 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 (6 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 (10 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 (11 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 (12 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 (13 << 16)
+#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
+
+#define _HSW_AUD_CONFIG_A 0x65000
+#define _HSW_AUD_CONFIG_B 0x65100
+#define HSW_AUD_CFG(trans) _MMIO_TRANS(trans, _HSW_AUD_CONFIG_A, _HSW_AUD_CONFIG_B)
+
+#define _HSW_AUD_MISC_CTRL_A 0x65010
+#define _HSW_AUD_MISC_CTRL_B 0x65110
+#define HSW_AUD_MISC_CTRL(trans) _MMIO_TRANS(trans, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
+
+#define _HSW_AUD_M_CTS_ENABLE_A 0x65028
+#define _HSW_AUD_M_CTS_ENABLE_B 0x65128
+#define HSW_AUD_M_CTS_ENABLE(trans) _MMIO_TRANS(trans, _HSW_AUD_M_CTS_ENABLE_A, _HSW_AUD_M_CTS_ENABLE_B)
+#define AUD_M_CTS_M_VALUE_INDEX (1 << 21)
+#define AUD_M_CTS_M_PROG_ENABLE (1 << 20)
+#define AUD_CONFIG_M_MASK 0xfffff
+
+#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
+#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
+#define HSW_AUD_DIP_ELD_CTRL(trans) _MMIO_TRANS(trans, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)
+
+/* Audio Digital Converter */
+#define _HSW_AUD_DIG_CNVT_1 0x65080
+#define _HSW_AUD_DIG_CNVT_2 0x65180
+#define AUD_DIG_CNVT(trans) _MMIO_TRANS(trans, _HSW_AUD_DIG_CNVT_1, _HSW_AUD_DIG_CNVT_2)
+#define DIP_PORT_SEL_MASK 0x3
+
+#define _HSW_AUD_EDID_DATA_A 0x65050
+#define _HSW_AUD_EDID_DATA_B 0x65150
+#define HSW_AUD_EDID_DATA(trans) _MMIO_TRANS(trans, _HSW_AUD_EDID_DATA_A, _HSW_AUD_EDID_DATA_B)
+
+#define HSW_AUD_PIPE_CONV_CFG _MMIO(0x6507c)
+#define HSW_AUD_PIN_ELD_CP_VLD _MMIO(0x650c0)
+#define AUDIO_INACTIVE(trans) ((1 << 3) << ((trans) * 4))
+#define AUDIO_OUTPUT_ENABLE(trans) ((1 << 2) << ((trans) * 4))
+#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4))
+#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4))
+
+#define _AUD_TCA_DP_2DOT0_CTRL 0x650bc
+#define _AUD_TCB_DP_2DOT0_CTRL 0x651bc
+#define AUD_DP_2DOT0_CTRL(trans) _MMIO_TRANS(trans, _AUD_TCA_DP_2DOT0_CTRL, _AUD_TCB_DP_2DOT0_CTRL)
+#define AUD_ENABLE_SDP_SPLIT REG_BIT(31)
+
+#define HSW_AUD_CHICKENBIT _MMIO(0x65f10)
+#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
+
+#define AUD_FREQ_CNTRL _MMIO(0x65900)
+#define AUD_PIN_BUF_CTL _MMIO(0x48414)
+#define AUD_PIN_BUF_ENABLE REG_BIT(31)
+
+#define AUD_TS_CDCLK_M _MMIO(0x65ea0)
+#define AUD_TS_CDCLK_M_EN REG_BIT(31)
+#define AUD_TS_CDCLK_N _MMIO(0x65ea4)
+
+/* Display Audio Config Reg */
+#define AUD_CONFIG_BE _MMIO(0x65ef0)
+#define HBLANK_EARLY_ENABLE_ICL(pipe) (0x1 << (20 - (pipe)))
+#define HBLANK_EARLY_ENABLE_TGL(pipe) (0x1 << (24 + (pipe)))
+#define HBLANK_START_COUNT_MASK(pipe) (0x7 << (3 + ((pipe) * 6)))
+#define HBLANK_START_COUNT(pipe, val) (((val) & 0x7) << (3 + ((pipe)) * 6))
+#define NUMBER_SAMPLES_PER_LINE_MASK(pipe) (0x3 << ((pipe) * 6))
+#define NUMBER_SAMPLES_PER_LINE(pipe, val) (((val) & 0x3) << ((pipe) * 6))
+
+#define HBLANK_START_COUNT_8 0
+#define HBLANK_START_COUNT_16 1
+#define HBLANK_START_COUNT_32 2
+#define HBLANK_START_COUNT_64 3
+#define HBLANK_START_COUNT_96 4
+#define HBLANK_START_COUNT_128 5
+
+#endif /* __INTEL_AUDIO_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index c8e1fc53a881..110fc98ec280 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -3,6 +3,7 @@
* Copyright © 2021 Intel Corporation
*/
+#include <linux/backlight.h>
#include <linux/kernel.h>
#include <linux/pwm.h>
#include <linux/string_helpers.h>
@@ -1159,9 +1160,10 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
}
-static u16 get_vbt_pwm_freq(struct drm_i915_private *dev_priv)
+static u16 get_vbt_pwm_freq(struct intel_connector *connector)
{
- u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ u16 pwm_freq_hz = connector->panel.vbt.backlight.pwm_freq_hz;
if (pwm_freq_hz) {
drm_dbg_kms(&dev_priv->drm,
@@ -1181,7 +1183,7 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv);
+ u16 pwm_freq_hz = get_vbt_pwm_freq(connector);
u32 pwm;
if (!panel->backlight.pwm_funcs->hz_to_pwm) {
@@ -1218,11 +1220,11 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
* against this by letting the minimum be at most (arbitrarily chosen)
* 25% of the max.
*/
- min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
- if (min != dev_priv->vbt.backlight.min_brightness) {
+ min = clamp_t(int, connector->panel.vbt.backlight.min_brightness, 0, 64);
+ if (min != connector->panel.vbt.backlight.min_brightness) {
drm_dbg_kms(&dev_priv->drm,
"clamping VBT min backlight %d/255 to %d/255\n",
- dev_priv->vbt.backlight.min_brightness, min);
+ connector->panel.vbt.backlight.min_brightness, min);
}
/* vbt value is a coefficient in range [0..255] */
@@ -1411,7 +1413,7 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl, val;
- panel->backlight.controller = dev_priv->vbt.backlight.controller;
+ panel->backlight.controller = connector->panel.vbt.backlight.controller;
pwm_ctl = intel_de_read(dev_priv,
BXT_BLC_PWM_CTL(panel->backlight.controller));
@@ -1484,7 +1486,7 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
u32 level;
/* Get the right PWM chip for DSI backlight according to VBT */
- if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
+ if (connector->panel.vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
panel->backlight.pwm = pwm_get(dev->dev, "pwm_pmic_backlight");
desc = "PMIC";
} else {
@@ -1513,11 +1515,11 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
- get_vbt_pwm_freq(dev_priv), level);
+ get_vbt_pwm_freq(connector), level);
} else {
/* Set period from VBT frequency, leave other settings at 0. */
panel->backlight.pwm_state.period =
- NSEC_PER_SEC / get_vbt_pwm_freq(dev_priv);
+ NSEC_PER_SEC / get_vbt_pwm_freq(connector);
}
drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n",
@@ -1602,7 +1604,7 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
struct intel_panel *panel = &connector->panel;
int ret;
- if (!dev_priv->vbt.backlight.present) {
+ if (!connector->panel.vbt.backlight.present) {
if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
drm_dbg_kms(&dev_priv->drm,
"no backlight present per VBT, but present per quirk\n");
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 0c5638f5b72b..51dde5bfd956 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -25,6 +25,7 @@
*
*/
+#include <drm/drm_edid.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dsc_helper.h>
@@ -123,7 +124,7 @@ find_raw_section(const void *_bdb, enum bdb_block_id section_id)
* Offset from the start of BDB to the start of the
* block data (just past the block header).
*/
-static u32 block_offset(const void *bdb, enum bdb_block_id section_id)
+static u32 raw_block_offset(const void *bdb, enum bdb_block_id section_id)
{
const void *block;
@@ -135,7 +136,7 @@ static u32 block_offset(const void *bdb, enum bdb_block_id section_id)
}
/* size of the block excluding the header */
-static u32 block_size(const void *bdb, enum bdb_block_id section_id)
+static u32 raw_block_size(const void *bdb, enum bdb_block_id section_id)
{
const void *block;
@@ -232,7 +233,7 @@ static bool validate_lfp_data_ptrs(const void *bdb,
int data_block_size, lfp_data_size;
int i;
- data_block_size = block_size(bdb, BDB_LVDS_LFP_DATA);
+ data_block_size = raw_block_size(bdb, BDB_LVDS_LFP_DATA);
if (data_block_size == 0)
return false;
@@ -309,7 +310,7 @@ static bool fixup_lfp_data_ptrs(const void *bdb, void *ptrs_block)
u32 offset;
int i;
- offset = block_offset(bdb, BDB_LVDS_LFP_DATA);
+ offset = raw_block_offset(bdb, BDB_LVDS_LFP_DATA);
for (i = 0; i < 16; i++) {
if (ptrs->ptr[i].fp_timing.offset < offset ||
@@ -585,6 +586,14 @@ get_lvds_fp_timing(const struct bdb_lvds_lfp_data *data,
return (const void *)data + ptrs->ptr[index].fp_timing.offset;
}
+static const struct lvds_pnp_id *
+get_lvds_pnp_id(const struct bdb_lvds_lfp_data *data,
+ const struct bdb_lvds_lfp_data_ptrs *ptrs,
+ int index)
+{
+ return (const void *)data + ptrs->ptr[index].panel_pnp_id.offset;
+}
+
static const struct bdb_lvds_lfp_data_tail *
get_lfp_data_tail(const struct bdb_lvds_lfp_data *data,
const struct bdb_lvds_lfp_data_ptrs *ptrs)
@@ -595,12 +604,16 @@ get_lfp_data_tail(const struct bdb_lvds_lfp_data *data,
return NULL;
}
-static int opregion_get_panel_type(struct drm_i915_private *i915)
+static int opregion_get_panel_type(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid)
{
return intel_opregion_get_panel_type(i915);
}
-static int vbt_get_panel_type(struct drm_i915_private *i915)
+static int vbt_get_panel_type(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid)
{
const struct bdb_lvds_options *lvds_options;
@@ -608,16 +621,71 @@ static int vbt_get_panel_type(struct drm_i915_private *i915)
if (!lvds_options)
return -1;
- if (lvds_options->panel_type > 0xf) {
+ if (lvds_options->panel_type > 0xf &&
+ lvds_options->panel_type != 0xff) {
drm_dbg_kms(&i915->drm, "Invalid VBT panel type 0x%x\n",
lvds_options->panel_type);
return -1;
}
+ if (devdata && devdata->child.handle == DEVICE_HANDLE_LFP2)
+ return lvds_options->panel_type2;
+
+ drm_WARN_ON(&i915->drm, devdata && devdata->child.handle != DEVICE_HANDLE_LFP1);
+
return lvds_options->panel_type;
}
-static int fallback_get_panel_type(struct drm_i915_private *i915)
+static int pnpid_get_panel_type(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid)
+{
+ const struct bdb_lvds_lfp_data *data;
+ const struct bdb_lvds_lfp_data_ptrs *ptrs;
+ const struct lvds_pnp_id *edid_id;
+ struct lvds_pnp_id edid_id_nodate;
+ int i, best = -1;
+
+ if (!edid)
+ return -1;
+
+ edid_id = (const void *)&edid->mfg_id[0];
+
+ edid_id_nodate = *edid_id;
+ edid_id_nodate.mfg_week = 0;
+ edid_id_nodate.mfg_year = 0;
+
+ ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
+ if (!ptrs)
+ return -1;
+
+ data = find_section(i915, BDB_LVDS_LFP_DATA);
+ if (!data)
+ return -1;
+
+ for (i = 0; i < 16; i++) {
+ const struct lvds_pnp_id *vbt_id =
+ get_lvds_pnp_id(data, ptrs, i);
+
+ /* full match? */
+ if (!memcmp(vbt_id, edid_id, sizeof(*vbt_id)))
+ return i;
+
+ /*
+ * Accept a match w/o date if no full match is found,
+ * and the VBT entry does not specify a date.
+ */
+ if (best < 0 &&
+ !memcmp(vbt_id, &edid_id_nodate, sizeof(*vbt_id)))
+ best = i;
+ }
+
+ return best;
+}
+
+static int fallback_get_panel_type(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid)
{
return 0;
}
@@ -625,14 +693,19 @@ static int fallback_get_panel_type(struct drm_i915_private *i915)
enum panel_type {
PANEL_TYPE_OPREGION,
PANEL_TYPE_VBT,
+ PANEL_TYPE_PNPID,
PANEL_TYPE_FALLBACK,
};
-static int get_panel_type(struct drm_i915_private *i915)
+static int get_panel_type(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid)
{
struct {
const char *name;
- int (*get_panel_type)(struct drm_i915_private *i915);
+ int (*get_panel_type)(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid);
int panel_type;
} panel_types[] = {
[PANEL_TYPE_OPREGION] = {
@@ -643,6 +716,10 @@ static int get_panel_type(struct drm_i915_private *i915)
.name = "VBT",
.get_panel_type = vbt_get_panel_type,
},
+ [PANEL_TYPE_PNPID] = {
+ .name = "PNPID",
+ .get_panel_type = pnpid_get_panel_type,
+ },
[PANEL_TYPE_FALLBACK] = {
.name = "fallback",
.get_panel_type = fallback_get_panel_type,
@@ -651,9 +728,10 @@ static int get_panel_type(struct drm_i915_private *i915)
int i;
for (i = 0; i < ARRAY_SIZE(panel_types); i++) {
- panel_types[i].panel_type = panel_types[i].get_panel_type(i915);
+ panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata, edid);
- drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf);
+ drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf &&
+ panel_types[i].panel_type != 0xff);
if (panel_types[i].panel_type >= 0)
drm_dbg_kms(&i915->drm, "Panel type (%s): %d\n",
@@ -662,7 +740,11 @@ static int get_panel_type(struct drm_i915_private *i915)
if (panel_types[PANEL_TYPE_OPREGION].panel_type >= 0)
i = PANEL_TYPE_OPREGION;
- else if (panel_types[PANEL_TYPE_VBT].panel_type >= 0)
+ else if (panel_types[PANEL_TYPE_VBT].panel_type == 0xff &&
+ panel_types[PANEL_TYPE_PNPID].panel_type >= 0)
+ i = PANEL_TYPE_PNPID;
+ else if (panel_types[PANEL_TYPE_VBT].panel_type != 0xff &&
+ panel_types[PANEL_TYPE_VBT].panel_type >= 0)
i = PANEL_TYPE_VBT;
else
i = PANEL_TYPE_FALLBACK;
@@ -673,26 +755,41 @@ static int get_panel_type(struct drm_i915_private *i915)
return panel_types[i].panel_type;
}
+static unsigned int panel_bits(unsigned int value, int panel_type, int num_bits)
+{
+ return (value >> (panel_type * num_bits)) & (BIT(num_bits) - 1);
+}
+
+static bool panel_bool(unsigned int value, int panel_type)
+{
+ return panel_bits(value, panel_type, 1);
+}
+
/* Parse general panel options */
static void
-parse_panel_options(struct drm_i915_private *i915)
+parse_panel_options(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
const struct bdb_lvds_options *lvds_options;
- int panel_type;
+ int panel_type = panel->vbt.panel_type;
int drrs_mode;
lvds_options = find_section(i915, BDB_LVDS_OPTIONS);
if (!lvds_options)
return;
- i915->vbt.lvds_dither = lvds_options->pixel_dither;
+ panel->vbt.lvds_dither = lvds_options->pixel_dither;
- panel_type = get_panel_type(i915);
-
- i915->vbt.panel_type = panel_type;
+ /*
+ * Empirical evidence indicates the block size can be
+ * either 4,14,16,24+ bytes. For older VBTs no clear
+ * relationship between the block size vs. BDB version.
+ */
+ if (get_blocksize(lvds_options) < 16)
+ return;
- drrs_mode = (lvds_options->dps_panel_type_bits
- >> (panel_type * 2)) & MODE_MASK;
+ drrs_mode = panel_bits(lvds_options->dps_panel_type_bits,
+ panel_type, 2);
/*
* VBT has static DRRS = 0 and seamless DRRS = 2.
* The below piece of code is required to adjust vbt.drrs_type
@@ -700,16 +797,16 @@ parse_panel_options(struct drm_i915_private *i915)
*/
switch (drrs_mode) {
case 0:
- i915->vbt.drrs_type = DRRS_TYPE_STATIC;
+ panel->vbt.drrs_type = DRRS_TYPE_STATIC;
drm_dbg_kms(&i915->drm, "DRRS supported mode is static\n");
break;
case 2:
- i915->vbt.drrs_type = DRRS_TYPE_SEAMLESS;
+ panel->vbt.drrs_type = DRRS_TYPE_SEAMLESS;
drm_dbg_kms(&i915->drm,
"DRRS supported mode is seamless\n");
break;
default:
- i915->vbt.drrs_type = DRRS_TYPE_NONE;
+ panel->vbt.drrs_type = DRRS_TYPE_NONE;
drm_dbg_kms(&i915->drm,
"DRRS not supported (VBT input)\n");
break;
@@ -718,13 +815,14 @@ parse_panel_options(struct drm_i915_private *i915)
static void
parse_lfp_panel_dtd(struct drm_i915_private *i915,
+ struct intel_panel *panel,
const struct bdb_lvds_lfp_data *lvds_lfp_data,
const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs)
{
const struct lvds_dvo_timing *panel_dvo_timing;
const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
- int panel_type = i915->vbt.panel_type;
+ int panel_type = panel->vbt.panel_type;
panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
lvds_lfp_data_ptrs,
@@ -736,7 +834,7 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915,
fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
- i915->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
+ panel->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
drm_dbg_kms(&i915->drm,
"Found panel mode in BIOS VBT legacy lfp table: " DRM_MODE_FMT "\n",
@@ -749,20 +847,21 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915,
/* check the resolution, just to be sure */
if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
fp_timing->y_res == panel_fixed_mode->vdisplay) {
- i915->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
+ panel->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
drm_dbg_kms(&i915->drm,
"VBT initial LVDS value %x\n",
- i915->vbt.bios_lvds_val);
+ panel->vbt.bios_lvds_val);
}
}
static void
-parse_lfp_data(struct drm_i915_private *i915)
+parse_lfp_data(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
const struct bdb_lvds_lfp_data *data;
const struct bdb_lvds_lfp_data_tail *tail;
const struct bdb_lvds_lfp_data_ptrs *ptrs;
- int panel_type = i915->vbt.panel_type;
+ int panel_type = panel->vbt.panel_type;
ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
@@ -772,24 +871,25 @@ parse_lfp_data(struct drm_i915_private *i915)
if (!data)
return;
- if (!i915->vbt.lfp_lvds_vbt_mode)
- parse_lfp_panel_dtd(i915, data, ptrs);
+ if (!panel->vbt.lfp_lvds_vbt_mode)
+ parse_lfp_panel_dtd(i915, panel, data, ptrs);
tail = get_lfp_data_tail(data, ptrs);
if (!tail)
return;
if (i915->vbt.version >= 188) {
- i915->vbt.seamless_drrs_min_refresh_rate =
+ panel->vbt.seamless_drrs_min_refresh_rate =
tail->seamless_drrs_min_refresh_rate[panel_type];
drm_dbg_kms(&i915->drm,
"Seamless DRRS min refresh rate: %d Hz\n",
- i915->vbt.seamless_drrs_min_refresh_rate);
+ panel->vbt.seamless_drrs_min_refresh_rate);
}
}
static void
-parse_generic_dtd(struct drm_i915_private *i915)
+parse_generic_dtd(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
const struct bdb_generic_dtd *generic_dtd;
const struct generic_dtd_entry *dtd;
@@ -824,14 +924,14 @@ parse_generic_dtd(struct drm_i915_private *i915)
num_dtd = (get_blocksize(generic_dtd) -
sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size;
- if (i915->vbt.panel_type >= num_dtd) {
+ if (panel->vbt.panel_type >= num_dtd) {
drm_err(&i915->drm,
"Panel type %d not found in table of %d DTD's\n",
- i915->vbt.panel_type, num_dtd);
+ panel->vbt.panel_type, num_dtd);
return;
}
- dtd = &generic_dtd->dtd[i915->vbt.panel_type];
+ dtd = &generic_dtd->dtd[panel->vbt.panel_type];
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
if (!panel_fixed_mode)
@@ -874,15 +974,16 @@ parse_generic_dtd(struct drm_i915_private *i915)
"Found panel mode in BIOS VBT generic dtd table: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(panel_fixed_mode));
- i915->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
+ panel->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
}
static void
-parse_lfp_backlight(struct drm_i915_private *i915)
+parse_lfp_backlight(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
const struct bdb_lfp_backlight_data *backlight_data;
const struct lfp_backlight_data_entry *entry;
- int panel_type = i915->vbt.panel_type;
+ int panel_type = panel->vbt.panel_type;
u16 level;
backlight_data = find_section(i915, BDB_LVDS_BACKLIGHT);
@@ -898,15 +999,15 @@ parse_lfp_backlight(struct drm_i915_private *i915)
entry = &backlight_data->data[panel_type];
- i915->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
- if (!i915->vbt.backlight.present) {
+ panel->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
+ if (!panel->vbt.backlight.present) {
drm_dbg_kms(&i915->drm,
"PWM backlight not present in VBT (type %u)\n",
entry->type);
return;
}
- i915->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
+ panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
if (i915->vbt.version >= 191) {
size_t exp_size;
@@ -921,13 +1022,13 @@ parse_lfp_backlight(struct drm_i915_private *i915)
const struct lfp_backlight_control_method *method;
method = &backlight_data->backlight_control[panel_type];
- i915->vbt.backlight.type = method->type;
- i915->vbt.backlight.controller = method->controller;
+ panel->vbt.backlight.type = method->type;
+ panel->vbt.backlight.controller = method->controller;
}
}
- i915->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
- i915->vbt.backlight.active_low_pwm = entry->active_low_pwm;
+ panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
+ panel->vbt.backlight.active_low_pwm = entry->active_low_pwm;
if (i915->vbt.version >= 234) {
u16 min_level;
@@ -948,28 +1049,29 @@ parse_lfp_backlight(struct drm_i915_private *i915)
drm_warn(&i915->drm, "Brightness min level > 255\n");
level = 255;
}
- i915->vbt.backlight.min_brightness = min_level;
+ panel->vbt.backlight.min_brightness = min_level;
- i915->vbt.backlight.brightness_precision_bits =
+ panel->vbt.backlight.brightness_precision_bits =
backlight_data->brightness_precision_bits[panel_type];
} else {
level = backlight_data->level[panel_type];
- i915->vbt.backlight.min_brightness = entry->min_brightness;
+ panel->vbt.backlight.min_brightness = entry->min_brightness;
}
drm_dbg_kms(&i915->drm,
"VBT backlight PWM modulation frequency %u Hz, "
"active %s, min brightness %u, level %u, controller %u\n",
- i915->vbt.backlight.pwm_freq_hz,
- i915->vbt.backlight.active_low_pwm ? "low" : "high",
- i915->vbt.backlight.min_brightness,
+ panel->vbt.backlight.pwm_freq_hz,
+ panel->vbt.backlight.active_low_pwm ? "low" : "high",
+ panel->vbt.backlight.min_brightness,
level,
- i915->vbt.backlight.controller);
+ panel->vbt.backlight.controller);
}
/* Try to find sdvo panel data */
static void
-parse_sdvo_panel_data(struct drm_i915_private *i915)
+parse_sdvo_panel_data(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
const struct bdb_sdvo_panel_dtds *dtds;
struct drm_display_mode *panel_fixed_mode;
@@ -1002,7 +1104,7 @@ parse_sdvo_panel_data(struct drm_i915_private *i915)
fill_detail_timing_data(panel_fixed_mode, &dtds->dtds[index]);
- i915->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
+ panel->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
drm_dbg_kms(&i915->drm,
"Found SDVO panel mode in BIOS VBT tables: " DRM_MODE_FMT "\n",
@@ -1181,6 +1283,17 @@ parse_driver_features(struct drm_i915_private *i915)
driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
i915->vbt.int_lvds_support = 0;
}
+}
+
+static void
+parse_panel_driver_features(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ const struct bdb_driver_features *driver;
+
+ driver = find_section(i915, BDB_DRIVER_FEATURES);
+ if (!driver)
+ return;
if (i915->vbt.version < 228) {
drm_dbg_kms(&i915->drm, "DRRS State Enabled:%d\n",
@@ -1191,18 +1304,29 @@ parse_driver_features(struct drm_i915_private *i915)
* static DRRS is 0 and DRRS not supported is represented by
* driver->drrs_enabled=false
*/
- if (!driver->drrs_enabled)
- i915->vbt.drrs_type = DRRS_TYPE_NONE;
+ if (!driver->drrs_enabled && panel->vbt.drrs_type != DRRS_TYPE_NONE) {
+ /*
+ * FIXME Should DMRRS perhaps be treated as seamless
+ * but without the automatic downclocking?
+ */
+ if (driver->dmrrs_enabled)
+ panel->vbt.drrs_type = DRRS_TYPE_STATIC;
+ else
+ panel->vbt.drrs_type = DRRS_TYPE_NONE;
+ }
- i915->vbt.psr.enable = driver->psr_enabled;
+ panel->vbt.psr.enable = driver->psr_enabled;
}
}
static void
-parse_power_conservation_features(struct drm_i915_private *i915)
+parse_power_conservation_features(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
const struct bdb_lfp_power *power;
- u8 panel_type = i915->vbt.panel_type;
+ u8 panel_type = panel->vbt.panel_type;
+
+ panel->vbt.vrr = true; /* matches Windows behaviour */
if (i915->vbt.version < 228)
return;
@@ -1211,7 +1335,7 @@ parse_power_conservation_features(struct drm_i915_private *i915)
if (!power)
return;
- i915->vbt.psr.enable = power->psr & BIT(panel_type);
+ panel->vbt.psr.enable = panel_bool(power->psr, panel_type);
/*
* If DRRS is not supported, drrs_type has to be set to 0.
@@ -1219,34 +1343,47 @@ parse_power_conservation_features(struct drm_i915_private *i915)
* static DRRS is 0 and DRRS not supported is represented by
* power->drrs & BIT(panel_type)=false
*/
- if (!(power->drrs & BIT(panel_type)))
- i915->vbt.drrs_type = DRRS_TYPE_NONE;
+ if (!panel_bool(power->drrs, panel_type) && panel->vbt.drrs_type != DRRS_TYPE_NONE) {
+ /*
+ * FIXME Should DMRRS perhaps be treated as seamless
+ * but without the automatic downclocking?
+ */
+ if (panel_bool(power->dmrrs, panel_type))
+ panel->vbt.drrs_type = DRRS_TYPE_STATIC;
+ else
+ panel->vbt.drrs_type = DRRS_TYPE_NONE;
+ }
if (i915->vbt.version >= 232)
- i915->vbt.edp.hobl = power->hobl & BIT(panel_type);
+ panel->vbt.edp.hobl = panel_bool(power->hobl, panel_type);
+
+ if (i915->vbt.version >= 233)
+ panel->vbt.vrr = panel_bool(power->vrr_feature_enabled,
+ panel_type);
}
static void
-parse_edp(struct drm_i915_private *i915)
+parse_edp(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
const struct bdb_edp *edp;
const struct edp_power_seq *edp_pps;
const struct edp_fast_link_params *edp_link_params;
- int panel_type = i915->vbt.panel_type;
+ int panel_type = panel->vbt.panel_type;
edp = find_section(i915, BDB_EDP);
if (!edp)
return;
- switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+ switch (panel_bits(edp->color_depth, panel_type, 2)) {
case EDP_18BPP:
- i915->vbt.edp.bpp = 18;
+ panel->vbt.edp.bpp = 18;
break;
case EDP_24BPP:
- i915->vbt.edp.bpp = 24;
+ panel->vbt.edp.bpp = 24;
break;
case EDP_30BPP:
- i915->vbt.edp.bpp = 30;
+ panel->vbt.edp.bpp = 30;
break;
}
@@ -1254,31 +1391,39 @@ parse_edp(struct drm_i915_private *i915)
edp_pps = &edp->power_seqs[panel_type];
edp_link_params = &edp->fast_link_params[panel_type];
- i915->vbt.edp.pps = *edp_pps;
+ panel->vbt.edp.pps = *edp_pps;
- switch (edp_link_params->rate) {
- case EDP_RATE_1_62:
- i915->vbt.edp.rate = DP_LINK_BW_1_62;
- break;
- case EDP_RATE_2_7:
- i915->vbt.edp.rate = DP_LINK_BW_2_7;
- break;
- default:
- drm_dbg_kms(&i915->drm,
- "VBT has unknown eDP link rate value %u\n",
- edp_link_params->rate);
- break;
+ if (i915->vbt.version >= 224) {
+ panel->vbt.edp.rate =
+ edp->edp_fast_link_training_rate[panel_type] * 20;
+ } else {
+ switch (edp_link_params->rate) {
+ case EDP_RATE_1_62:
+ panel->vbt.edp.rate = 162000;
+ break;
+ case EDP_RATE_2_7:
+ panel->vbt.edp.rate = 270000;
+ break;
+ case EDP_RATE_5_4:
+ panel->vbt.edp.rate = 540000;
+ break;
+ default:
+ drm_dbg_kms(&i915->drm,
+ "VBT has unknown eDP link rate value %u\n",
+ edp_link_params->rate);
+ break;
+ }
}
switch (edp_link_params->lanes) {
case EDP_LANE_1:
- i915->vbt.edp.lanes = 1;
+ panel->vbt.edp.lanes = 1;
break;
case EDP_LANE_2:
- i915->vbt.edp.lanes = 2;
+ panel->vbt.edp.lanes = 2;
break;
case EDP_LANE_4:
- i915->vbt.edp.lanes = 4;
+ panel->vbt.edp.lanes = 4;
break;
default:
drm_dbg_kms(&i915->drm,
@@ -1289,16 +1434,16 @@ parse_edp(struct drm_i915_private *i915)
switch (edp_link_params->preemphasis) {
case EDP_PREEMPHASIS_NONE:
- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
+ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
break;
case EDP_PREEMPHASIS_3_5dB:
- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
+ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
break;
case EDP_PREEMPHASIS_6dB:
- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
+ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
break;
case EDP_PREEMPHASIS_9_5dB:
- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
+ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
break;
default:
drm_dbg_kms(&i915->drm,
@@ -1309,16 +1454,16 @@ parse_edp(struct drm_i915_private *i915)
switch (edp_link_params->vswing) {
case EDP_VSWING_0_4V:
- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
break;
case EDP_VSWING_0_6V:
- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
+ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
break;
case EDP_VSWING_0_8V:
- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
break;
case EDP_VSWING_1_2V:
- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
break;
default:
drm_dbg_kms(&i915->drm,
@@ -1332,24 +1477,29 @@ parse_edp(struct drm_i915_private *i915)
/* Don't read from VBT if module parameter has valid value*/
if (i915->params.edp_vswing) {
- i915->vbt.edp.low_vswing =
+ panel->vbt.edp.low_vswing =
i915->params.edp_vswing == 1;
} else {
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
- i915->vbt.edp.low_vswing = vswing == 0;
+ panel->vbt.edp.low_vswing = vswing == 0;
}
}
- i915->vbt.edp.drrs_msa_timing_delay =
- (edp->sdrrs_msa_timing_delay >> (panel_type * 2)) & 3;
+ panel->vbt.edp.drrs_msa_timing_delay =
+ panel_bits(edp->sdrrs_msa_timing_delay, panel_type, 2);
+
+ if (i915->vbt.version >= 244)
+ panel->vbt.edp.max_link_rate =
+ edp->edp_max_port_link_rate[panel_type] * 20;
}
static void
-parse_psr(struct drm_i915_private *i915)
+parse_psr(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
const struct bdb_psr *psr;
const struct psr_table *psr_table;
- int panel_type = i915->vbt.panel_type;
+ int panel_type = panel->vbt.panel_type;
psr = find_section(i915, BDB_PSR);
if (!psr) {
@@ -1359,11 +1509,11 @@ parse_psr(struct drm_i915_private *i915)
psr_table = &psr->psr_table[panel_type];
- i915->vbt.psr.full_link = psr_table->full_link;
- i915->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
+ panel->vbt.psr.full_link = psr_table->full_link;
+ panel->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
/* Allowed VBT values goes from 0 to 15 */
- i915->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
+ panel->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
/*
@@ -1374,13 +1524,13 @@ parse_psr(struct drm_i915_private *i915)
(DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915))) {
switch (psr_table->tp1_wakeup_time) {
case 0:
- i915->vbt.psr.tp1_wakeup_time_us = 500;
+ panel->vbt.psr.tp1_wakeup_time_us = 500;
break;
case 1:
- i915->vbt.psr.tp1_wakeup_time_us = 100;
+ panel->vbt.psr.tp1_wakeup_time_us = 100;
break;
case 3:
- i915->vbt.psr.tp1_wakeup_time_us = 0;
+ panel->vbt.psr.tp1_wakeup_time_us = 0;
break;
default:
drm_dbg_kms(&i915->drm,
@@ -1388,19 +1538,19 @@ parse_psr(struct drm_i915_private *i915)
psr_table->tp1_wakeup_time);
fallthrough;
case 2:
- i915->vbt.psr.tp1_wakeup_time_us = 2500;
+ panel->vbt.psr.tp1_wakeup_time_us = 2500;
break;
}
switch (psr_table->tp2_tp3_wakeup_time) {
case 0:
- i915->vbt.psr.tp2_tp3_wakeup_time_us = 500;
+ panel->vbt.psr.tp2_tp3_wakeup_time_us = 500;
break;
case 1:
- i915->vbt.psr.tp2_tp3_wakeup_time_us = 100;
+ panel->vbt.psr.tp2_tp3_wakeup_time_us = 100;
break;
case 3:
- i915->vbt.psr.tp2_tp3_wakeup_time_us = 0;
+ panel->vbt.psr.tp2_tp3_wakeup_time_us = 0;
break;
default:
drm_dbg_kms(&i915->drm,
@@ -1408,18 +1558,18 @@ parse_psr(struct drm_i915_private *i915)
psr_table->tp2_tp3_wakeup_time);
fallthrough;
case 2:
- i915->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
+ panel->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
break;
}
} else {
- i915->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
- i915->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
+ panel->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
+ panel->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
}
if (i915->vbt.version >= 226) {
u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time;
- wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3;
+ wakeup_time = panel_bits(wakeup_time, panel_type, 2);
switch (wakeup_time) {
case 0:
wakeup_time = 500;
@@ -1435,62 +1585,64 @@ parse_psr(struct drm_i915_private *i915)
wakeup_time = 2500;
break;
}
- i915->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
+ panel->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
} else {
/* Reusing PSR1 wakeup time for PSR2 in older VBTs */
- i915->vbt.psr.psr2_tp2_tp3_wakeup_time_us = i915->vbt.psr.tp2_tp3_wakeup_time_us;
+ panel->vbt.psr.psr2_tp2_tp3_wakeup_time_us = panel->vbt.psr.tp2_tp3_wakeup_time_us;
}
}
static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
- u16 version, enum port port)
+ struct intel_panel *panel,
+ enum port port)
{
- if (!i915->vbt.dsi.config->dual_link || version < 197) {
- i915->vbt.dsi.bl_ports = BIT(port);
- if (i915->vbt.dsi.config->cabc_supported)
- i915->vbt.dsi.cabc_ports = BIT(port);
+ if (!panel->vbt.dsi.config->dual_link || i915->vbt.version < 197) {
+ panel->vbt.dsi.bl_ports = BIT(port);
+ if (panel->vbt.dsi.config->cabc_supported)
+ panel->vbt.dsi.cabc_ports = BIT(port);
return;
}
- switch (i915->vbt.dsi.config->dl_dcs_backlight_ports) {
+ switch (panel->vbt.dsi.config->dl_dcs_backlight_ports) {
case DL_DCS_PORT_A:
- i915->vbt.dsi.bl_ports = BIT(PORT_A);
+ panel->vbt.dsi.bl_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
- i915->vbt.dsi.bl_ports = BIT(PORT_C);
+ panel->vbt.dsi.bl_ports = BIT(PORT_C);
break;
default:
case DL_DCS_PORT_A_AND_C:
- i915->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
+ panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
break;
}
- if (!i915->vbt.dsi.config->cabc_supported)
+ if (!panel->vbt.dsi.config->cabc_supported)
return;
- switch (i915->vbt.dsi.config->dl_dcs_cabc_ports) {
+ switch (panel->vbt.dsi.config->dl_dcs_cabc_ports) {
case DL_DCS_PORT_A:
- i915->vbt.dsi.cabc_ports = BIT(PORT_A);
+ panel->vbt.dsi.cabc_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
- i915->vbt.dsi.cabc_ports = BIT(PORT_C);
+ panel->vbt.dsi.cabc_ports = BIT(PORT_C);
break;
default:
case DL_DCS_PORT_A_AND_C:
- i915->vbt.dsi.cabc_ports =
+ panel->vbt.dsi.cabc_ports =
BIT(PORT_A) | BIT(PORT_C);
break;
}
}
static void
-parse_mipi_config(struct drm_i915_private *i915)
+parse_mipi_config(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
const struct bdb_mipi_config *start;
const struct mipi_config *config;
const struct mipi_pps_data *pps;
- int panel_type = i915->vbt.panel_type;
+ int panel_type = panel->vbt.panel_type;
enum port port;
/* parse MIPI blocks only if LFP type is MIPI */
@@ -1498,7 +1650,7 @@ parse_mipi_config(struct drm_i915_private *i915)
return;
/* Initialize this to undefined indicating no generic MIPI support */
- i915->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
+ panel->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
/* Block #40 is already parsed and panel_fixed_mode is
* stored in i915->lfp_lvds_vbt_mode
@@ -1525,17 +1677,17 @@ parse_mipi_config(struct drm_i915_private *i915)
pps = &start->pps[panel_type];
/* store as of now full data. Trim when we realise all is not needed */
- i915->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
- if (!i915->vbt.dsi.config)
+ panel->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
+ if (!panel->vbt.dsi.config)
return;
- i915->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
- if (!i915->vbt.dsi.pps) {
- kfree(i915->vbt.dsi.config);
+ panel->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
+ if (!panel->vbt.dsi.pps) {
+ kfree(panel->vbt.dsi.config);
return;
}
- parse_dsi_backlight_ports(i915, i915->vbt.version, port);
+ parse_dsi_backlight_ports(i915, panel, port);
/* FIXME is the 90 vs. 270 correct? */
switch (config->rotation) {
@@ -1544,25 +1696,25 @@ parse_mipi_config(struct drm_i915_private *i915)
* Most (all?) VBTs claim 0 degrees despite having
* an upside down panel, thus we do not trust this.
*/
- i915->vbt.dsi.orientation =
+ panel->vbt.dsi.orientation =
DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
break;
case ENABLE_ROTATION_90:
- i915->vbt.dsi.orientation =
+ panel->vbt.dsi.orientation =
DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
break;
case ENABLE_ROTATION_180:
- i915->vbt.dsi.orientation =
+ panel->vbt.dsi.orientation =
DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
break;
case ENABLE_ROTATION_270:
- i915->vbt.dsi.orientation =
+ panel->vbt.dsi.orientation =
DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
break;
}
/* We have mandatory mipi config blocks. Initialize as generic panel */
- i915->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
+ panel->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
}
/* Find the sequence block and size for the given panel. */
@@ -1725,13 +1877,14 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
* Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
* skip all delay + gpio operands and stop at the first DSI packet op.
*/
-static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915)
+static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
- const u8 *data = i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ const u8 *data = panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
int index, len;
if (drm_WARN_ON(&i915->drm,
- !data || i915->vbt.dsi.seq_version != 1))
+ !data || panel->vbt.dsi.seq_version != 1))
return 0;
/* index = 1 to skip sequence byte */
@@ -1759,7 +1912,8 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915)
* these devices we split the init OTP sequence into a deassert sequence and
* the actual init OTP part.
*/
-static void fixup_mipi_sequences(struct drm_i915_private *i915)
+static void fixup_mipi_sequences(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
u8 *init_otp;
int len;
@@ -1769,18 +1923,18 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915)
return;
/* Limit this to v1 vid-mode sequences */
- if (i915->vbt.dsi.config->is_cmd_mode ||
- i915->vbt.dsi.seq_version != 1)
+ if (panel->vbt.dsi.config->is_cmd_mode ||
+ panel->vbt.dsi.seq_version != 1)
return;
/* Only do this if there are otp and assert seqs and no deassert seq */
- if (!i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
- !i915->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
- i915->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
+ if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
+ !panel->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
+ panel->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
return;
/* The deassert-sequence ends at the first DSI packet */
- len = get_init_otp_deassert_fragment_len(i915);
+ len = get_init_otp_deassert_fragment_len(i915, panel);
if (!len)
return;
@@ -1788,25 +1942,26 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915)
"Using init OTP fragment to deassert reset\n");
/* Copy the fragment, update seq byte and terminate it */
- init_otp = (u8 *)i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
- i915->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
- if (!i915->vbt.dsi.deassert_seq)
+ init_otp = (u8 *)panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ panel->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
+ if (!panel->vbt.dsi.deassert_seq)
return;
- i915->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
- i915->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
+ panel->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
+ panel->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
/* Use the copy for deassert */
- i915->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
- i915->vbt.dsi.deassert_seq;
+ panel->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
+ panel->vbt.dsi.deassert_seq;
/* Replace the last byte of the fragment with init OTP seq byte */
init_otp[len - 1] = MIPI_SEQ_INIT_OTP;
/* And make MIPI_MIPI_SEQ_INIT_OTP point to it */
- i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
+ panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
}
static void
-parse_mipi_sequence(struct drm_i915_private *i915)
+parse_mipi_sequence(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
- int panel_type = i915->vbt.panel_type;
+ int panel_type = panel->vbt.panel_type;
const struct bdb_mipi_sequence *sequence;
const u8 *seq_data;
u32 seq_size;
@@ -1814,7 +1969,7 @@ parse_mipi_sequence(struct drm_i915_private *i915)
int index = 0;
/* Only our generic panel driver uses the sequence block. */
- if (i915->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
+ if (panel->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
return;
sequence = find_section(i915, BDB_MIPI_SEQUENCE);
@@ -1860,7 +2015,7 @@ parse_mipi_sequence(struct drm_i915_private *i915)
drm_dbg_kms(&i915->drm,
"Unsupported sequence %u\n", seq_id);
- i915->vbt.dsi.sequence[seq_id] = data + index;
+ panel->vbt.dsi.sequence[seq_id] = data + index;
if (sequence->version >= 3)
index = goto_next_sequence_v3(data, index, seq_size);
@@ -1873,18 +2028,18 @@ parse_mipi_sequence(struct drm_i915_private *i915)
}
}
- i915->vbt.dsi.data = data;
- i915->vbt.dsi.size = seq_size;
- i915->vbt.dsi.seq_version = sequence->version;
+ panel->vbt.dsi.data = data;
+ panel->vbt.dsi.size = seq_size;
+ panel->vbt.dsi.seq_version = sequence->version;
- fixup_mipi_sequences(i915);
+ fixup_mipi_sequences(i915, panel);
drm_dbg(&i915->drm, "MIPI related VBT parsing complete\n");
return;
err:
kfree(data);
- memset(i915->vbt.dsi.sequence, 0, sizeof(i915->vbt.dsi.sequence));
+ memset(panel->vbt.dsi.sequence, 0, sizeof(panel->vbt.dsi.sequence));
}
static void
@@ -2343,10 +2498,10 @@ static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
if (port != PORT_A || DISPLAY_VER(i915) >= 12)
return;
- if (!(devdata->child.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING))
+ if (!intel_bios_encoder_supports_dvi(devdata))
return;
- is_hdmi = !(devdata->child.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT);
+ is_hdmi = intel_bios_encoder_supports_hdmi(devdata);
drm_dbg_kms(&i915->drm, "VBT claims port A supports DVI%s, ignoring\n",
is_hdmi ? "/HDMI" : "");
@@ -2432,33 +2587,13 @@ static bool is_port_valid(struct drm_i915_private *i915, enum port port)
return true;
}
-static void parse_ddi_port(struct drm_i915_private *i915,
- struct intel_bios_encoder_data *devdata)
+static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
+ enum port port)
{
+ struct drm_i915_private *i915 = devdata->i915;
const struct child_device_config *child = &devdata->child;
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt, supports_typec_usb, supports_tbt;
int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock;
- enum port port;
-
- port = dvo_port_to_port(i915, child->dvo_port);
- if (port == PORT_NONE)
- return;
-
- if (!is_port_valid(i915, port)) {
- drm_dbg_kms(&i915->drm,
- "VBT reports port %c as supported, but that can't be true: skipping\n",
- port_name(port));
- return;
- }
-
- if (i915->vbt.ports[port]) {
- drm_dbg_kms(&i915->drm,
- "More than one child device for port %c in VBT, using the first.\n",
- port_name(port));
- return;
- }
-
- sanitize_device_type(devdata, port);
is_dvi = intel_bios_encoder_supports_dvi(devdata);
is_dp = intel_bios_encoder_supports_dp(devdata);
@@ -2476,12 +2611,6 @@ static void parse_ddi_port(struct drm_i915_private *i915,
supports_typec_usb, supports_tbt,
devdata->dsc != NULL);
- if (is_dvi)
- sanitize_ddc_pin(devdata, port);
-
- if (is_dp)
- sanitize_aux_ch(devdata, port);
-
hdmi_level_shift = _intel_bios_hdmi_level_shift(devdata);
if (hdmi_level_shift >= 0) {
drm_dbg_kms(&i915->drm,
@@ -2513,6 +2642,39 @@ static void parse_ddi_port(struct drm_i915_private *i915,
drm_dbg_kms(&i915->drm,
"Port %c VBT DP max link rate: %d\n",
port_name(port), dp_max_link_rate);
+}
+
+static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
+{
+ struct drm_i915_private *i915 = devdata->i915;
+ const struct child_device_config *child = &devdata->child;
+ enum port port;
+
+ port = dvo_port_to_port(i915, child->dvo_port);
+ if (port == PORT_NONE)
+ return;
+
+ if (!is_port_valid(i915, port)) {
+ drm_dbg_kms(&i915->drm,
+ "VBT reports port %c as supported, but that can't be true: skipping\n",
+ port_name(port));
+ return;
+ }
+
+ if (i915->vbt.ports[port]) {
+ drm_dbg_kms(&i915->drm,
+ "More than one child device for port %c in VBT, using the first.\n",
+ port_name(port));
+ return;
+ }
+
+ sanitize_device_type(devdata, port);
+
+ if (intel_bios_encoder_supports_dvi(devdata))
+ sanitize_ddc_pin(devdata, port);
+
+ if (intel_bios_encoder_supports_dp(devdata))
+ sanitize_aux_ch(devdata, port);
i915->vbt.ports[port] = devdata;
}
@@ -2525,12 +2687,18 @@ static bool has_ddi_port_info(struct drm_i915_private *i915)
static void parse_ddi_ports(struct drm_i915_private *i915)
{
struct intel_bios_encoder_data *devdata;
+ enum port port;
if (!has_ddi_port_info(i915))
return;
list_for_each_entry(devdata, &i915->vbt.display_devices, node)
- parse_ddi_port(i915, devdata);
+ parse_ddi_port(devdata);
+
+ for_each_port(port) {
+ if (i915->vbt.ports[port])
+ print_ddi_port(i915->vbt.ports[port], port);
+ }
}
static void
@@ -2638,15 +2806,6 @@ init_vbt_defaults(struct drm_i915_private *i915)
{
i915->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
- /* Default to having backlight */
- i915->vbt.backlight.present = true;
-
- /* LFP panel data */
- i915->vbt.lvds_dither = 1;
-
- /* SDVO panel data */
- i915->vbt.sdvo_lvds_vbt_mode = NULL;
-
/* general features */
i915->vbt.int_tv_support = 1;
i915->vbt.int_crt_support = 1;
@@ -2666,6 +2825,17 @@ init_vbt_defaults(struct drm_i915_private *i915)
i915->vbt.lvds_ssc_freq);
}
+/* Common defaults which may be overridden by VBT. */
+static void
+init_vbt_panel_defaults(struct intel_panel *panel)
+{
+ /* Default to having backlight */
+ panel->vbt.backlight.present = true;
+
+ /* LFP panel data */
+ panel->vbt.lvds_dither = true;
+}
+
/* Defaults to initialize only if there is no VBT. */
static void
init_vbt_missing_defaults(struct drm_i915_private *i915)
@@ -2952,17 +3122,7 @@ void intel_bios_init(struct drm_i915_private *i915)
/* Grab useful general definitions */
parse_general_features(i915);
parse_general_definitions(i915);
- parse_panel_options(i915);
- parse_generic_dtd(i915);
- parse_lfp_data(i915);
- parse_lfp_backlight(i915);
- parse_sdvo_panel_data(i915);
parse_driver_features(i915);
- parse_power_conservation_features(i915);
- parse_edp(i915);
- parse_psr(i915);
- parse_mipi_config(i915);
- parse_mipi_sequence(i915);
/* Depends on child device list */
parse_compression_parameters(i915);
@@ -2981,6 +3141,28 @@ out:
kfree(oprom_vbt);
}
+void intel_bios_init_panel(struct drm_i915_private *i915,
+ struct intel_panel *panel,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid)
+{
+ init_vbt_panel_defaults(panel);
+
+ panel->vbt.panel_type = get_panel_type(i915, devdata, edid);
+
+ parse_panel_options(i915, panel);
+ parse_generic_dtd(i915, panel);
+ parse_lfp_data(i915, panel);
+ parse_lfp_backlight(i915, panel);
+ parse_sdvo_panel_data(i915, panel);
+ parse_panel_driver_features(i915, panel);
+ parse_power_conservation_features(i915, panel);
+ parse_edp(i915, panel);
+ parse_psr(i915, panel);
+ parse_mipi_config(i915, panel);
+ parse_mipi_sequence(i915, panel);
+}
+
/**
* intel_bios_driver_remove - Free any resources allocated by intel_bios_init()
* @i915: i915 device instance
@@ -3000,19 +3182,22 @@ void intel_bios_driver_remove(struct drm_i915_private *i915)
list_del(&entry->node);
kfree(entry);
}
+}
- kfree(i915->vbt.sdvo_lvds_vbt_mode);
- i915->vbt.sdvo_lvds_vbt_mode = NULL;
- kfree(i915->vbt.lfp_lvds_vbt_mode);
- i915->vbt.lfp_lvds_vbt_mode = NULL;
- kfree(i915->vbt.dsi.data);
- i915->vbt.dsi.data = NULL;
- kfree(i915->vbt.dsi.pps);
- i915->vbt.dsi.pps = NULL;
- kfree(i915->vbt.dsi.config);
- i915->vbt.dsi.config = NULL;
- kfree(i915->vbt.dsi.deassert_seq);
- i915->vbt.dsi.deassert_seq = NULL;
+void intel_bios_fini_panel(struct intel_panel *panel)
+{
+ kfree(panel->vbt.sdvo_lvds_vbt_mode);
+ panel->vbt.sdvo_lvds_vbt_mode = NULL;
+ kfree(panel->vbt.lfp_lvds_vbt_mode);
+ panel->vbt.lfp_lvds_vbt_mode = NULL;
+ kfree(panel->vbt.dsi.data);
+ panel->vbt.dsi.data = NULL;
+ kfree(panel->vbt.dsi.pps);
+ panel->vbt.dsi.pps = NULL;
+ kfree(panel->vbt.dsi.config);
+ panel->vbt.dsi.config = NULL;
+ kfree(panel->vbt.dsi.deassert_seq);
+ panel->vbt.dsi.deassert_seq = NULL;
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 4709c4d29805..e47582b0de0a 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -33,9 +33,11 @@
#include <linux/types.h>
struct drm_i915_private;
+struct edid;
struct intel_bios_encoder_data;
struct intel_crtc_state;
struct intel_encoder;
+struct intel_panel;
enum port;
enum intel_backlight_type {
@@ -230,6 +232,11 @@ struct mipi_pps_data {
} __packed;
void intel_bios_init(struct drm_i915_private *dev_priv);
+void intel_bios_init_panel(struct drm_i915_private *dev_priv,
+ struct intel_panel *panel,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid);
+void intel_bios_fini_panel(struct intel_panel *panel);
void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 37bd7b17f3d0..79269d2c476b 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -78,7 +78,7 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
u16 dclk;
int ret;
- ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
&val, &val2);
if (ret)
@@ -104,7 +104,7 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
int ret;
int i;
- ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
if (ret)
return ret;
@@ -123,7 +123,7 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
int ret;
/* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(dev_priv, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
+ ret = skl_pcode_request(&dev_priv->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
points_mask,
ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index b2017d8161b4..6e80162632dd 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -800,7 +800,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
"trying to change cdclk frequency with cdclk not enabled\n"))
return;
- ret = snb_pcode_write(dev_priv, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
+ ret = snb_pcode_write(&dev_priv->uncore, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
if (ret) {
drm_err(&dev_priv->drm,
"failed to inform pcode about cdclk change\n");
@@ -828,7 +828,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n");
- snb_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+ snb_pcode_write(&dev_priv->uncore, HSW_PCODE_DE_WRITE_FREQ_REQ,
cdclk_config->voltage_level);
intel_de_write(dev_priv, CDCLK_FREQ,
@@ -1086,7 +1086,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
drm_WARN_ON_ONCE(&dev_priv->drm,
IS_SKYLAKE(dev_priv) && vco == 8640000);
- ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
@@ -1132,7 +1132,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
intel_de_posting_read(dev_priv, CDCLK_CTL);
/* inform PCU of the change */
- snb_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
cdclk_config->voltage_level);
intel_update_cdclk(dev_priv);
@@ -1702,7 +1702,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
/* Inform power controller of upcoming frequency change. */
if (DISPLAY_VER(dev_priv) >= 11)
- ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
@@ -1711,7 +1711,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
* BSpec requires us to wait up to 150usec, but that leads to
* timeouts; the 2ms used here is based on experiment.
*/
- ret = snb_pcode_write_timeout(dev_priv,
+ ret = snb_pcode_write_timeout(&dev_priv->uncore,
HSW_PCODE_DE_WRITE_FREQ_REQ,
0x80000000, 150, 2);
if (ret) {
@@ -1774,7 +1774,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
if (DISPLAY_VER(dev_priv) >= 11) {
- ret = snb_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
cdclk_config->voltage_level);
} else {
/*
@@ -1783,7 +1783,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
* FIXME: Waiting for the request completion could be delayed
* until the next PCODE request based on BSpec.
*/
- ret = snb_pcode_write_timeout(dev_priv,
+ ret = snb_pcode_write_timeout(&dev_priv->uncore,
HSW_PCODE_DE_WRITE_FREQ_REQ,
cdclk_config->voltage_level,
150, 2);
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 34128c9c635c..9583d17e858d 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -505,30 +505,19 @@ static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
static void i9xx_color_commit_arm(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- u32 val;
-
- val = intel_de_read(dev_priv, PIPECONF(pipe));
- val &= ~PIPECONF_GAMMA_MODE_MASK_I9XX;
- val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- intel_de_write(dev_priv, PIPECONF(pipe), val);
+ /* update PIPECONF GAMMA_MODE */
+ i9xx_set_pipeconf(crtc_state);
}
static void ilk_color_commit_arm(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- u32 val;
- val = intel_de_read(dev_priv, PIPECONF(pipe));
- val &= ~PIPECONF_GAMMA_MODE_MASK_ILK;
- val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- intel_de_write(dev_priv, PIPECONF(pipe), val);
+ /* update PIPECONF GAMMA_MODE */
+ ilk_set_pipeconf(crtc_state);
- intel_de_write_fw(dev_priv, PIPE_CSC_MODE(pipe),
+ intel_de_write_fw(dev_priv, PIPE_CSC_MODE(crtc->pipe),
crtc_state->csc_mode);
}
@@ -852,7 +841,7 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.degamma_lut_size;
const struct drm_color_lut *lut = crtc_state->hw.degamma_lut->data;
/*
@@ -894,7 +883,7 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.degamma_lut_size;
/*
* When setting the auto-increment bit, the hardware seems to
@@ -1346,10 +1335,10 @@ static int check_luts(const struct intel_crtc_state *crtc_state)
return -EINVAL;
}
- degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
- gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size;
- degamma_tests = INTEL_INFO(dev_priv)->color.degamma_lut_tests;
- gamma_tests = INTEL_INFO(dev_priv)->color.gamma_lut_tests;
+ degamma_length = INTEL_INFO(dev_priv)->display.color.degamma_lut_size;
+ gamma_length = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
+ degamma_tests = INTEL_INFO(dev_priv)->display.color.degamma_lut_tests;
+ gamma_tests = INTEL_INFO(dev_priv)->display.color.gamma_lut_tests;
if (check_lut_size(degamma_lut, degamma_length) ||
check_lut_size(gamma_lut, gamma_length))
@@ -1638,7 +1627,7 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
/*
* Enable 10bit gamma for D13
* ToDo: Extend to Logarithmic Gamma once the new UAPI
- * is acccepted and implemented by a userspace consumer
+ * is accepted and implemented by a userspace consumer
*/
else if (DISPLAY_VER(i915) >= 13)
gamma_mode |= GAMMA_MODE_MODE_10BIT;
@@ -1885,7 +1874,7 @@ static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
@@ -1928,7 +1917,7 @@ static void i965_read_luts(struct intel_crtc_state *crtc_state)
static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
@@ -1989,7 +1978,7 @@ static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc)
static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
@@ -2040,7 +2029,7 @@ static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int i, hw_lut_size = ivb_lut_10_size(prec_index);
- int lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int lut_size = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
@@ -2093,7 +2082,7 @@ static struct drm_property_blob *
icl_read_lut_multi_segment(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
@@ -2230,7 +2219,7 @@ static const struct intel_color_funcs ilk_color_funcs = {
void intel_color_init(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- bool has_ctm = INTEL_INFO(dev_priv)->color.degamma_lut_size != 0;
+ bool has_ctm = INTEL_INFO(dev_priv)->display.color.degamma_lut_size != 0;
drm_mode_crtc_set_gamma_size(&crtc->base, 256);
@@ -2261,7 +2250,7 @@ void intel_color_init(struct intel_crtc *crtc)
}
drm_crtc_enable_color_mgmt(&crtc->base,
- INTEL_INFO(dev_priv)->color.degamma_lut_size,
+ INTEL_INFO(dev_priv)->display.color.degamma_lut_size,
has_ctm,
- INTEL_INFO(dev_priv)->color.gamma_lut_size);
+ INTEL_INFO(dev_priv)->display.color.gamma_lut_size);
}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
new file mode 100644
index 000000000000..4ca6e9493ff2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_crtc_state_dump.h"
+#include "intel_display_types.h"
+#include "intel_hdmi.h"
+#include "intel_vrr.h"
+
+static void intel_dump_crtc_timings(struct drm_i915_private *i915,
+ const struct drm_display_mode *mode)
+{
+ drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
+ "type: 0x%x flags: 0x%x\n",
+ mode->crtc_clock,
+ mode->crtc_hdisplay, mode->crtc_hsync_start,
+ mode->crtc_hsync_end, mode->crtc_htotal,
+ mode->crtc_vdisplay, mode->crtc_vsync_start,
+ mode->crtc_vsync_end, mode->crtc_vtotal,
+ mode->type, mode->flags);
+}
+
+static void
+intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
+ const char *id, unsigned int lane_count,
+ const struct intel_link_m_n *m_n)
+{
+ struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
+
+ drm_dbg_kms(&i915->drm,
+ "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ id, lane_count,
+ m_n->data_m, m_n->data_n,
+ m_n->link_m, m_n->link_n, m_n->tu);
+}
+
+static void
+intel_dump_infoframe(struct drm_i915_private *i915,
+ const union hdmi_infoframe *frame)
+{
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ hdmi_infoframe_log(KERN_DEBUG, i915->drm.dev, frame);
+}
+
+static void
+intel_dump_dp_vsc_sdp(struct drm_i915_private *i915,
+ const struct drm_dp_vsc_sdp *vsc)
+{
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ drm_dp_vsc_sdp_log(KERN_DEBUG, i915->drm.dev, vsc);
+}
+
+#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
+
+static const char * const output_type_str[] = {
+ OUTPUT_TYPE(UNUSED),
+ OUTPUT_TYPE(ANALOG),
+ OUTPUT_TYPE(DVO),
+ OUTPUT_TYPE(SDVO),
+ OUTPUT_TYPE(LVDS),
+ OUTPUT_TYPE(TVOUT),
+ OUTPUT_TYPE(HDMI),
+ OUTPUT_TYPE(DP),
+ OUTPUT_TYPE(EDP),
+ OUTPUT_TYPE(DSI),
+ OUTPUT_TYPE(DDI),
+ OUTPUT_TYPE(DP_MST),
+};
+
+#undef OUTPUT_TYPE
+
+static void snprintf_output_types(char *buf, size_t len,
+ unsigned int output_types)
+{
+ char *str = buf;
+ int i;
+
+ str[0] = '\0';
+
+ for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
+ int r;
+
+ if ((output_types & BIT(i)) == 0)
+ continue;
+
+ r = snprintf(str, len, "%s%s",
+ str != buf ? "," : "", output_type_str[i]);
+ if (r >= len)
+ break;
+ str += r;
+ len -= r;
+
+ output_types &= ~BIT(i);
+ }
+
+ WARN_ON_ONCE(output_types != 0);
+}
+
+static const char * const output_format_str[] = {
+ [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
+ [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
+ [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
+};
+
+static const char *output_formats(enum intel_output_format format)
+{
+ if (format >= ARRAY_SIZE(output_format_str))
+ return "invalid";
+ return output_format_str[format];
+}
+
+static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ if (!fb) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ str_yes_no(plane_state->uapi.visible));
+ return;
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ fb->base.id, fb->width, fb->height, &fb->format->format,
+ fb->modifier, str_yes_no(plane_state->uapi.visible));
+ drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
+ plane_state->hw.rotation, plane_state->scaler_id);
+ if (plane_state->uapi.visible)
+ drm_dbg_kms(&i915->drm,
+ "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst));
+}
+
+void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
+ struct intel_atomic_state *state,
+ const char *context)
+{
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ char buf[64];
+ int i;
+
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] enable: %s [%s]\n",
+ crtc->base.base.id, crtc->base.name,
+ str_yes_no(pipe_config->hw.enable), context);
+
+ if (!pipe_config->hw.enable)
+ goto dump_planes;
+
+ snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
+ drm_dbg_kms(&i915->drm,
+ "active: %s, output_types: %s (0x%x), output format: %s\n",
+ str_yes_no(pipe_config->hw.active),
+ buf, pipe_config->output_types,
+ output_formats(pipe_config->output_format));
+
+ drm_dbg_kms(&i915->drm,
+ "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
+ transcoder_name(pipe_config->cpu_transcoder),
+ pipe_config->pipe_bpp, pipe_config->dither);
+
+ drm_dbg_kms(&i915->drm, "MST master transcoder: %s\n",
+ transcoder_name(pipe_config->mst_master_transcoder));
+
+ drm_dbg_kms(&i915->drm,
+ "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
+ transcoder_name(pipe_config->master_transcoder),
+ pipe_config->sync_mode_slaves_mask);
+
+ drm_dbg_kms(&i915->drm, "bigjoiner: %s, pipes: 0x%x\n",
+ intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" :
+ intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no",
+ pipe_config->bigjoiner_pipes);
+
+ drm_dbg_kms(&i915->drm, "splitter: %s, link count %d, overlap %d\n",
+ str_enabled_disabled(pipe_config->splitter.enable),
+ pipe_config->splitter.link_count,
+ pipe_config->splitter.pixel_overlap);
+
+ if (pipe_config->has_pch_encoder)
+ intel_dump_m_n_config(pipe_config, "fdi",
+ pipe_config->fdi_lanes,
+ &pipe_config->fdi_m_n);
+
+ if (intel_crtc_has_dp_encoder(pipe_config)) {
+ intel_dump_m_n_config(pipe_config, "dp m_n",
+ pipe_config->lane_count,
+ &pipe_config->dp_m_n);
+ intel_dump_m_n_config(pipe_config, "dp m2_n2",
+ pipe_config->lane_count,
+ &pipe_config->dp_m2_n2);
+ }
+
+ drm_dbg_kms(&i915->drm, "framestart delay: %d, MSA timing delay: %d\n",
+ pipe_config->framestart_delay, pipe_config->msa_timing_delay);
+
+ drm_dbg_kms(&i915->drm,
+ "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
+ pipe_config->has_audio, pipe_config->has_infoframe,
+ pipe_config->infoframes.enable);
+
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
+ drm_dbg_kms(&i915->drm, "GCP: 0x%x\n",
+ pipe_config->infoframes.gcp);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
+ intel_dump_infoframe(i915, &pipe_config->infoframes.avi);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
+ intel_dump_infoframe(i915, &pipe_config->infoframes.spd);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
+ intel_dump_infoframe(i915, &pipe_config->infoframes.hdmi);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
+ intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
+ intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(DP_SDP_VSC))
+ intel_dump_dp_vsc_sdp(i915, &pipe_config->infoframes.vsc);
+
+ drm_dbg_kms(&i915->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
+ str_yes_no(pipe_config->vrr.enable),
+ pipe_config->vrr.vmin, pipe_config->vrr.vmax,
+ pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
+ pipe_config->vrr.flipline,
+ intel_vrr_vmin_vblank_start(pipe_config),
+ intel_vrr_vmax_vblank_start(pipe_config));
+
+ drm_dbg_kms(&i915->drm, "requested mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.mode));
+ drm_dbg_kms(&i915->drm, "adjusted mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.adjusted_mode));
+ intel_dump_crtc_timings(i915, &pipe_config->hw.adjusted_mode);
+ drm_dbg_kms(&i915->drm, "pipe mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
+ intel_dump_crtc_timings(i915, &pipe_config->hw.pipe_mode);
+ drm_dbg_kms(&i915->drm,
+ "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
+ pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
+ pipe_config->pixel_rate);
+
+ drm_dbg_kms(&i915->drm, "linetime: %d, ips linetime: %d\n",
+ pipe_config->linetime, pipe_config->ips_linetime);
+
+ if (DISPLAY_VER(i915) >= 9)
+ drm_dbg_kms(&i915->drm,
+ "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
+ crtc->num_scalers,
+ pipe_config->scaler_state.scaler_users,
+ pipe_config->scaler_state.scaler_id);
+
+ if (HAS_GMCH(i915))
+ drm_dbg_kms(&i915->drm,
+ "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+ pipe_config->gmch_pfit.control,
+ pipe_config->gmch_pfit.pgm_ratios,
+ pipe_config->gmch_pfit.lvds_border_bits);
+ else
+ drm_dbg_kms(&i915->drm,
+ "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
+ DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
+ str_enabled_disabled(pipe_config->pch_pfit.enabled),
+ str_yes_no(pipe_config->pch_pfit.force_thru));
+
+ drm_dbg_kms(&i915->drm, "ips: %i, double wide: %i, drrs: %i\n",
+ pipe_config->ips_enabled, pipe_config->double_wide,
+ pipe_config->has_drrs);
+
+ intel_dpll_dump_hw_state(i915, &pipe_config->dpll_hw_state);
+
+ if (IS_CHERRYVIEW(i915))
+ drm_dbg_kms(&i915->drm,
+ "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->cgm_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
+ else
+ drm_dbg_kms(&i915->drm,
+ "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->csc_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
+
+ drm_dbg_kms(&i915->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
+ pipe_config->hw.degamma_lut ?
+ drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
+ pipe_config->hw.gamma_lut ?
+ drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
+
+dump_planes:
+ if (!state)
+ return;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->pipe == crtc->pipe)
+ intel_dump_plane_state(plane_state);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.h b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.h
new file mode 100644
index 000000000000..9399c35b7e5e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_CRTC_STATE_DUMP_H__
+#define __INTEL_CRTC_STATE_DUMP_H__
+
+struct intel_crtc_state;
+struct intel_atomic_state;
+
+void intel_crtc_state_dump(const struct intel_crtc_state *crtc_state,
+ struct intel_atomic_state *state,
+ const char *context);
+
+#endif /* __INTEL_CRTC_STATE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 8c80de877605..c2797ad2d313 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 9e6fa59eabba..2330604b0bcc 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "intel_audio.h"
+#include "intel_audio_regs.h"
#include "intel_backlight.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
@@ -322,14 +323,10 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
}
}
-static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
+int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
{
int dotclock;
- /* CRT dotclock is determined via other means */
- if (pipe_config->has_pch_encoder)
- return;
-
if (intel_crtc_has_dp_encoder(pipe_config))
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
@@ -345,7 +342,17 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
if (pipe_config->pixel_multiplier)
dotclock /= pipe_config->pixel_multiplier;
- pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
+ return dotclock;
+}
+
+static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
+{
+ /* CRT dotclock is determined via other means */
+ if (pipe_config->has_pch_encoder)
+ return;
+
+ pipe_config->hw.adjusted_mode.crtc_clock =
+ intel_crtc_dotclock(pipe_config);
}
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
@@ -455,6 +462,9 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= TRANS_DDI_SELECT_PORT(port);
switch (crtc_state->pipe_bpp) {
+ default:
+ MISSING_CASE(crtc_state->pipe_bpp);
+ fallthrough;
case 18:
temp |= TRANS_DDI_BPC_6;
break;
@@ -467,8 +477,6 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
case 36:
temp |= TRANS_DDI_BPC_12;
break;
- default:
- BUG();
}
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
@@ -478,6 +486,9 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
if (cpu_transcoder == TRANSCODER_EDP) {
switch (pipe) {
+ default:
+ MISSING_CASE(pipe);
+ fallthrough;
case PIPE_A:
/* On Haswell, can only use the always-on power well for
* eDP when not using the panel fitter, and when not
@@ -494,9 +505,6 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
case PIPE_C:
temp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
break;
- default:
- BUG();
- break;
}
}
@@ -3433,26 +3441,8 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->has_audio =
intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
- if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp &&
- pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
- /*
- * This is a big fat ugly hack.
- *
- * Some machines in UEFI boot mode provide us a VBT that has 18
- * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
- * unknown we fail to light up. Yet the same BIOS boots up with
- * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
- * max, not what it tells us to use.
- *
- * Note: This will still be broken if the eDP panel is not lit
- * up by the BIOS, and thus we can't get the mode at module
- * load.
- */
- drm_dbg_kms(&dev_priv->drm,
- "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
- pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
- dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
- }
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp);
ddi_dotclock_get(pipe_config);
@@ -4189,7 +4179,7 @@ static enum hpd_pin ehl_hpd_pin(struct drm_i915_private *dev_priv,
if (port == PORT_D)
return HPD_PORT_A;
- if (HAS_PCH_MCC(dev_priv))
+ if (HAS_PCH_TGP(dev_priv))
return icl_hpd_pin(dev_priv, port);
return HPD_PORT_A + port - PORT_A;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index 85f58dd3df72..006a2e979000 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -878,26 +878,6 @@ static const struct intel_ddi_buf_trans adls_combo_phy_trans_edp_hbr3 = {
.num_entries = ARRAY_SIZE(_adls_combo_phy_trans_edp_hbr3),
};
-static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_hdmi[] = {
- /* NT mV Trans mV db */
- { .icl = { 0x6, 0x60, 0x3F, 0x00, 0x00 } }, /* 400 400 0.0 */
- { .icl = { 0x6, 0x68, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
- { .icl = { 0xA, 0x73, 0x3F, 0x00, 0x00 } }, /* 650 650 0.0 ALS */
- { .icl = { 0xA, 0x78, 0x3F, 0x00, 0x00 } }, /* 800 800 0.0 */
- { .icl = { 0xB, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1000 1000 0.0 Re-timer */
- { .icl = { 0xB, 0x7F, 0x3B, 0x00, 0x04 } }, /* Full Red -1.5 */
- { .icl = { 0xB, 0x7F, 0x39, 0x00, 0x06 } }, /* Full Red -1.8 */
- { .icl = { 0xB, 0x7F, 0x37, 0x00, 0x08 } }, /* Full Red -2.0 CRLS */
- { .icl = { 0xB, 0x7F, 0x35, 0x00, 0x0A } }, /* Full Red -2.5 */
- { .icl = { 0xB, 0x7F, 0x33, 0x00, 0x0C } }, /* Full Red -3.0 */
-};
-
-static const struct intel_ddi_buf_trans adlp_combo_phy_trans_hdmi = {
- .entries = _adlp_combo_phy_trans_hdmi,
- .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_hdmi),
- .hdmi_default_entry = ARRAY_SIZE(_adlp_combo_phy_trans_hdmi) - 1,
-};
-
static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
@@ -953,9 +933,9 @@ static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr2_edp_h
{ .icl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */
{ .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
{ .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */
- { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */
- { .icl = { 0xC, 0x6C, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */
- { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */
+ { .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 500 900 5.1 */
+ { .icl = { 0xC, 0x63, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */
+ { .icl = { 0x6, 0x7F, 0x38, 0x00, 0x07 } }, /* 600 900 3.5 */
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
@@ -1062,17 +1042,18 @@ bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table)
static bool use_edp_hobl(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_connector *connector = intel_dp->attached_connector;
- return i915->vbt.edp.hobl && !intel_dp->hobl_failed;
+ return connector->panel.vbt.edp.hobl && !intel_dp->hobl_failed;
}
static bool use_edp_low_vswing(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_connector *connector = intel_dp->attached_connector;
- return i915->vbt.edp.low_vswing;
+ return connector->panel.vbt.edp.low_vswing;
}
static const struct intel_ddi_buf_trans *
@@ -1556,7 +1537,7 @@ adlp_get_combo_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&adlp_combo_phy_trans_hdmi, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
return adlp_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 806d50b302ab..a0f84cbe974f 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -87,6 +87,7 @@
#include "intel_cdclk.h"
#include "intel_color.h"
#include "intel_crtc.h"
+#include "intel_crtc_state_dump.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
@@ -99,6 +100,8 @@
#include "intel_frontbuffer.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
+#include "intel_modeset_verify.h"
+#include "intel_modeset_setup.h"
#include "intel_overlay.h"
#include "intel_panel.h"
#include "intel_pch_display.h"
@@ -123,13 +126,9 @@
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
-static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
-static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
-static void intel_modeset_setup_hw_state(struct drm_device *dev,
- struct drm_modeset_acquire_ctx *ctx);
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
@@ -164,7 +163,7 @@ static void intel_modeset_setup_hw_state(struct drm_device *dev,
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
*/
-static void intel_update_watermarks(struct drm_i915_private *dev_priv)
+void intel_update_watermarks(struct drm_i915_private *dev_priv)
{
if (dev_priv->wm_disp->update_wm)
dev_priv->wm_disp->update_wm(dev_priv);
@@ -500,6 +499,9 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
i915_reg_t dpll_reg;
switch (dig_port->base.port) {
+ default:
+ MISSING_CASE(dig_port->base.port);
+ fallthrough;
case PORT_B:
port_mask = DPLL_PORTB_READY_MASK;
dpll_reg = DPLL(0);
@@ -513,8 +515,6 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
port_mask = DPLL_PORTD_READY_MASK;
dpll_reg = DPIO_PHY_STATUS;
break;
- default:
- BUG();
}
if (intel_de_wait_for_register(dev_priv, dpll_reg,
@@ -730,10 +730,9 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0);
}
-static void
-intel_set_plane_visible(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state,
- bool visible)
+void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state,
+ bool visible)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
@@ -745,7 +744,7 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
}
-static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
+void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct drm_plane *plane;
@@ -780,7 +779,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
crtc->base.base.id, crtc->base.name);
intel_set_plane_visible(crtc_state, plane_state, false);
- fixup_plane_bitmasks(crtc_state);
+ intel_plane_fixup_bitmasks(crtc_state);
crtc_state->data_rate[plane->id] = 0;
crtc_state->data_rate_y[plane->id] = 0;
crtc_state->rel_data_rate[plane->id] = 0;
@@ -829,7 +828,7 @@ intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
}
static int
-__intel_display_resume(struct drm_device *dev,
+__intel_display_resume(struct drm_i915_private *i915,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
@@ -837,8 +836,8 @@ __intel_display_resume(struct drm_device *dev,
struct drm_crtc *crtc;
int i, ret;
- intel_modeset_setup_hw_state(dev, ctx);
- intel_vga_redisable(to_i915(dev));
+ intel_modeset_setup_hw_state(i915, ctx);
+ intel_vga_redisable(i915);
if (!state)
return 0;
@@ -858,12 +857,13 @@ __intel_display_resume(struct drm_device *dev,
}
/* ignore any reset values/BIOS leftovers in the WM registers */
- if (!HAS_GMCH(to_i915(dev)))
+ if (!HAS_GMCH(i915))
to_intel_atomic_state(state)->skip_intermediate_wm = true;
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
- drm_WARN_ON(dev, ret == -EDEADLK);
+ drm_WARN_ON(&i915->drm, ret == -EDEADLK);
+
return ret;
}
@@ -936,56 +936,55 @@ void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
state->acquire_ctx = ctx;
}
-void intel_display_finish_reset(struct drm_i915_private *dev_priv)
+void intel_display_finish_reset(struct drm_i915_private *i915)
{
- struct drm_device *dev = &dev_priv->drm;
- struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
+ struct drm_modeset_acquire_ctx *ctx = &i915->reset_ctx;
struct drm_atomic_state *state;
int ret;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(i915))
return;
/* reset doesn't touch the display */
- if (!test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
+ if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags))
return;
- state = fetch_and_zero(&dev_priv->modeset_restore_state);
+ state = fetch_and_zero(&i915->modeset_restore_state);
if (!state)
goto unlock;
/* reset doesn't touch the display */
- if (!gpu_reset_clobbers_display(dev_priv)) {
+ if (!gpu_reset_clobbers_display(i915)) {
/* for testing only restore the display */
- ret = __intel_display_resume(dev, state, ctx);
+ ret = __intel_display_resume(i915, state, ctx);
if (ret)
- drm_err(&dev_priv->drm,
+ drm_err(&i915->drm,
"Restoring old state failed with %i\n", ret);
} else {
/*
* The display has been reset as well,
* so need a full re-initialization.
*/
- intel_pps_unlock_regs_wa(dev_priv);
- intel_modeset_init_hw(dev_priv);
- intel_init_clock_gating(dev_priv);
- intel_hpd_init(dev_priv);
+ intel_pps_unlock_regs_wa(i915);
+ intel_modeset_init_hw(i915);
+ intel_init_clock_gating(i915);
+ intel_hpd_init(i915);
- ret = __intel_display_resume(dev, state, ctx);
+ ret = __intel_display_resume(i915, state, ctx);
if (ret)
- drm_err(&dev_priv->drm,
+ drm_err(&i915->drm,
"Restoring old state failed with %i\n", ret);
- intel_hpd_poll_disable(dev_priv);
+ intel_hpd_poll_disable(i915);
}
drm_atomic_state_put(state);
unlock:
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&i915->drm.mode_config.mutex);
- clear_bit_unlock(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
+ clear_bit_unlock(I915_RESET_MODESET, &to_gt(i915)->reset.flags);
}
static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
@@ -2206,9 +2205,8 @@ static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits);
}
-static void
-modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
- struct intel_power_domain_mask *old_domains)
+void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
+ struct intel_power_domain_mask *old_domains)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -2232,8 +2230,8 @@ modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
domain);
}
-static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
- struct intel_power_domain_mask *domains)
+void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
+ struct intel_power_domain_mask *domains)
{
intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
&crtc->enabled_power_domains,
@@ -2413,89 +2411,6 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
i830_enable_pipe(dev_priv, pipe);
}
-static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct intel_encoder *encoder;
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_bw_state *bw_state =
- to_intel_bw_state(dev_priv->bw_obj.state);
- struct intel_cdclk_state *cdclk_state =
- to_intel_cdclk_state(dev_priv->cdclk.obj.state);
- struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(dev_priv->dbuf.obj.state);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane *plane;
- struct drm_atomic_state *state;
- struct intel_crtc_state *temp_crtc_state;
- enum pipe pipe = crtc->pipe;
- int ret;
-
- if (!crtc_state->hw.active)
- return;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
-
- if (plane_state->uapi.visible)
- intel_plane_disable_noatomic(crtc, plane);
- }
-
- state = drm_atomic_state_alloc(&dev_priv->drm);
- if (!state) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to disable [CRTC:%d:%s], out of memory",
- crtc->base.base.id, crtc->base.name);
- return;
- }
-
- state->acquire_ctx = ctx;
-
- /* Everything's already locked, -EDEADLK can't happen. */
- temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
- ret = drm_atomic_add_affected_connectors(state, &crtc->base);
-
- drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
-
- dev_priv->display->crtc_disable(to_intel_atomic_state(state), crtc);
-
- drm_atomic_state_put(state);
-
- drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
- crtc->base.base.id, crtc->base.name);
-
- crtc->active = false;
- crtc->base.enabled = false;
-
- drm_WARN_ON(&dev_priv->drm,
- drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
- crtc_state->uapi.active = false;
- crtc_state->uapi.connector_mask = 0;
- crtc_state->uapi.encoder_mask = 0;
- intel_crtc_free_hw_state(crtc_state);
- memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
-
- for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
- encoder->base.crtc = NULL;
-
- intel_fbc_disable(crtc);
- intel_update_watermarks(dev_priv);
- intel_disable_shared_dpll(crtc_state);
-
- intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
-
- cdclk_state->min_cdclk[pipe] = 0;
- cdclk_state->min_voltage_level[pipe] = 0;
- cdclk_state->active_pipes &= ~BIT(pipe);
-
- dbuf_state->active_pipes &= ~BIT(pipe);
-
- bw_state->data_rate[pipe] = 0;
- bw_state->num_active_planes[pipe] = 0;
-}
/*
* turn all crtc's off, but do not adjust state
@@ -2528,45 +2443,6 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_encoder);
}
-/* Cross check the actual hw state with our own modeset state tracking (and it's
- * internal consistency). */
-static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
-
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
- connector->base.base.id, connector->base.name);
-
- if (connector->get_hw_state(connector)) {
- struct intel_encoder *encoder = intel_attached_encoder(connector);
-
- I915_STATE_WARN(!crtc_state,
- "connector enabled without attached crtc\n");
-
- if (!crtc_state)
- return;
-
- I915_STATE_WARN(!crtc_state->hw.active,
- "connector is active, but attached crtc isn't\n");
-
- if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
- return;
-
- I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
- "atomic encoder doesn't match attached encoder\n");
-
- I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
- "attached encoder crtc differs from connector crtc\n");
- } else {
- I915_STATE_WARN(crtc_state && crtc_state->hw.active,
- "attached crtc is active, but connector isn't\n");
- I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
- "best encoder set without crtc!\n");
- }
-}
-
static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
{
const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -2708,8 +2584,8 @@ static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state
intel_crtc_compute_pixel_rate(crtc_state);
}
-static void intel_encoder_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state)
+void intel_encoder_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
{
encoder->get_config(encoder, crtc_state);
@@ -2811,9 +2687,11 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
return 0;
}
-static int intel_crtc_compute_config(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int intel_crtc_compute_config(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
int ret;
ret = intel_crtc_compute_pipe_src(crtc_state);
@@ -3135,14 +3013,18 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
intel_bigjoiner_adjust_pipe_src(pipe_config);
}
-static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
+void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 pipeconf = 0;
- /* we keep both pipes enabled on 830 */
- if (IS_I830(dev_priv))
+ /*
+ * - We keep both pipes enabled on 830
+ * - During modeset the pipe is still disabled and must remain so
+ * - During fastset the pipe is already enabled and must remain so
+ */
+ if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state))
pipeconf |= PIPECONF_ENABLE;
if (crtc_state->double_wide)
@@ -3157,6 +3039,10 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
PIPECONF_DITHER_TYPE_SP;
switch (crtc_state->pipe_bpp) {
+ default:
+ /* Case prevented by intel_choose_pipe_bpp_dither. */
+ MISSING_CASE(crtc_state->pipe_bpp);
+ fallthrough;
case 18:
pipeconf |= PIPECONF_BPC_6;
break;
@@ -3166,9 +3052,6 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
case 30:
pipeconf |= PIPECONF_BPC_10;
break;
- default:
- /* Case prevented by intel_choose_pipe_bpp_dither. */
- BUG();
}
}
@@ -3454,16 +3337,25 @@ out:
return ret;
}
-static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
+void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- u32 val;
+ u32 val = 0;
- val = 0;
+ /*
+ * - During modeset the pipe is still disabled and must remain so
+ * - During fastset the pipe is already enabled and must remain so
+ */
+ if (!intel_crtc_needs_modeset(crtc_state))
+ val |= PIPECONF_ENABLE;
switch (crtc_state->pipe_bpp) {
+ default:
+ /* Case prevented by intel_choose_pipe_bpp_dither. */
+ MISSING_CASE(crtc_state->pipe_bpp);
+ fallthrough;
case 18:
val |= PIPECONF_BPC_6;
break;
@@ -3476,9 +3368,6 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
case 36:
val |= PIPECONF_BPC_12;
break;
- default:
- /* Case prevented by intel_choose_pipe_bpp_dither. */
- BUG();
}
if (crtc_state->dither)
@@ -3519,6 +3408,13 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
+ /*
+ * - During modeset the pipe is still disabled and must remain so
+ * - During fastset the pipe is already enabled and must remain so
+ */
+ if (!intel_crtc_needs_modeset(crtc_state))
+ val |= PIPECONF_ENABLE;
+
if (IS_HASWELL(dev_priv) && crtc_state->dither)
val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
@@ -4246,7 +4142,7 @@ out:
return active;
}
-static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
+bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
@@ -4980,45 +4876,12 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return 0;
}
-static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
-{
- struct intel_connector *connector;
- struct drm_connector_list_iter conn_iter;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- for_each_intel_connector_iter(connector, &conn_iter) {
- struct drm_connector_state *conn_state = connector->base.state;
- struct intel_encoder *encoder =
- to_intel_encoder(connector->base.encoder);
-
- if (conn_state->crtc)
- drm_connector_put(&connector->base);
-
- if (encoder) {
- struct intel_crtc *crtc =
- to_intel_crtc(encoder->base.crtc);
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- conn_state->best_encoder = &encoder->base;
- conn_state->crtc = &crtc->base;
- conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
-
- drm_connector_get(&connector->base);
- } else {
- conn_state->best_encoder = NULL;
- conn_state->crtc = NULL;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
-}
-
static int
compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *crtc_state)
{
struct drm_connector *connector = conn_state->connector;
- struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_display_info *info = &connector->display_info;
int bpp;
@@ -5040,27 +4903,28 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
return -EINVAL;
}
- if (bpp < pipe_config->pipe_bpp) {
+ if (bpp < crtc_state->pipe_bpp) {
drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
- "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
+ "[CONNECTOR:%d:%s] Limiting display bpp to %d "
+ "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n",
connector->base.id, connector->name,
bpp, 3 * info->bpc,
3 * conn_state->max_requested_bpc,
- pipe_config->pipe_bpp);
+ crtc_state->pipe_bpp);
- pipe_config->pipe_bpp = bpp;
+ crtc_state->pipe_bpp = bpp;
}
return 0;
}
static int
-compute_baseline_pipe_bpp(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+compute_baseline_pipe_bpp(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct drm_atomic_state *state = pipe_config->uapi.state;
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int bpp, i;
@@ -5073,16 +4937,16 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
else
bpp = 8*3;
- pipe_config->pipe_bpp = bpp;
+ crtc_state->pipe_bpp = bpp;
/* Clamp display bpp to connector max bpp */
- for_each_new_connector_in_state(state, connector, connector_state, i) {
+ for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
int ret;
if (connector_state->crtc != &crtc->base)
continue;
- ret = compute_sink_pipe_bpp(connector_state, pipe_config);
+ ret = compute_sink_pipe_bpp(connector_state, crtc_state);
if (ret)
return ret;
}
@@ -5090,310 +4954,6 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
return 0;
}
-static void intel_dump_crtc_timings(struct drm_i915_private *i915,
- const struct drm_display_mode *mode)
-{
- drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
- "type: 0x%x flags: 0x%x\n",
- mode->crtc_clock,
- mode->crtc_hdisplay, mode->crtc_hsync_start,
- mode->crtc_hsync_end, mode->crtc_htotal,
- mode->crtc_vdisplay, mode->crtc_vsync_start,
- mode->crtc_vsync_end, mode->crtc_vtotal,
- mode->type, mode->flags);
-}
-
-static void
-intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
- const char *id, unsigned int lane_count,
- const struct intel_link_m_n *m_n)
-{
- struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
-
- drm_dbg_kms(&i915->drm,
- "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- id, lane_count,
- m_n->data_m, m_n->data_n,
- m_n->link_m, m_n->link_n, m_n->tu);
-}
-
-static void
-intel_dump_infoframe(struct drm_i915_private *dev_priv,
- const union hdmi_infoframe *frame)
-{
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
-
- hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
-}
-
-static void
-intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
- const struct drm_dp_vsc_sdp *vsc)
-{
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
-
- drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
-}
-
-#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
-
-static const char * const output_type_str[] = {
- OUTPUT_TYPE(UNUSED),
- OUTPUT_TYPE(ANALOG),
- OUTPUT_TYPE(DVO),
- OUTPUT_TYPE(SDVO),
- OUTPUT_TYPE(LVDS),
- OUTPUT_TYPE(TVOUT),
- OUTPUT_TYPE(HDMI),
- OUTPUT_TYPE(DP),
- OUTPUT_TYPE(EDP),
- OUTPUT_TYPE(DSI),
- OUTPUT_TYPE(DDI),
- OUTPUT_TYPE(DP_MST),
-};
-
-#undef OUTPUT_TYPE
-
-static void snprintf_output_types(char *buf, size_t len,
- unsigned int output_types)
-{
- char *str = buf;
- int i;
-
- str[0] = '\0';
-
- for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
- int r;
-
- if ((output_types & BIT(i)) == 0)
- continue;
-
- r = snprintf(str, len, "%s%s",
- str != buf ? "," : "", output_type_str[i]);
- if (r >= len)
- break;
- str += r;
- len -= r;
-
- output_types &= ~BIT(i);
- }
-
- WARN_ON_ONCE(output_types != 0);
-}
-
-static const char * const output_format_str[] = {
- [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
- [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
- [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
-};
-
-static const char *output_formats(enum intel_output_format format)
-{
- if (format >= ARRAY_SIZE(output_format_str))
- return "invalid";
- return output_format_str[format];
-}
-
-static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
-
- if (!fb) {
- drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
- plane->base.base.id, plane->base.name,
- str_yes_no(plane_state->uapi.visible));
- return;
- }
-
- drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
- plane->base.base.id, plane->base.name,
- fb->base.id, fb->width, fb->height, &fb->format->format,
- fb->modifier, str_yes_no(plane_state->uapi.visible));
- drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
- plane_state->hw.rotation, plane_state->scaler_id);
- if (plane_state->uapi.visible)
- drm_dbg_kms(&i915->drm,
- "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
- DRM_RECT_FP_ARG(&plane_state->uapi.src),
- DRM_RECT_ARG(&plane_state->uapi.dst));
-}
-
-static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
- struct intel_atomic_state *state,
- const char *context)
-{
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_plane_state *plane_state;
- struct intel_plane *plane;
- char buf[64];
- int i;
-
- drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
- crtc->base.base.id, crtc->base.name,
- str_yes_no(pipe_config->hw.enable), context);
-
- if (!pipe_config->hw.enable)
- goto dump_planes;
-
- snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
- drm_dbg_kms(&dev_priv->drm,
- "active: %s, output_types: %s (0x%x), output format: %s\n",
- str_yes_no(pipe_config->hw.active),
- buf, pipe_config->output_types,
- output_formats(pipe_config->output_format));
-
- drm_dbg_kms(&dev_priv->drm,
- "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
- transcoder_name(pipe_config->cpu_transcoder),
- pipe_config->pipe_bpp, pipe_config->dither);
-
- drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
- transcoder_name(pipe_config->mst_master_transcoder));
-
- drm_dbg_kms(&dev_priv->drm,
- "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
- transcoder_name(pipe_config->master_transcoder),
- pipe_config->sync_mode_slaves_mask);
-
- drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s, pipes: 0x%x\n",
- intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" :
- intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no",
- pipe_config->bigjoiner_pipes);
-
- drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
- str_enabled_disabled(pipe_config->splitter.enable),
- pipe_config->splitter.link_count,
- pipe_config->splitter.pixel_overlap);
-
- if (pipe_config->has_pch_encoder)
- intel_dump_m_n_config(pipe_config, "fdi",
- pipe_config->fdi_lanes,
- &pipe_config->fdi_m_n);
-
- if (intel_crtc_has_dp_encoder(pipe_config)) {
- intel_dump_m_n_config(pipe_config, "dp m_n",
- pipe_config->lane_count,
- &pipe_config->dp_m_n);
- intel_dump_m_n_config(pipe_config, "dp m2_n2",
- pipe_config->lane_count,
- &pipe_config->dp_m2_n2);
- }
-
- drm_dbg_kms(&dev_priv->drm, "framestart delay: %d, MSA timing delay: %d\n",
- pipe_config->framestart_delay, pipe_config->msa_timing_delay);
-
- drm_dbg_kms(&dev_priv->drm,
- "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
- pipe_config->has_audio, pipe_config->has_infoframe,
- pipe_config->infoframes.enable);
-
- if (pipe_config->infoframes.enable &
- intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
- drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
- pipe_config->infoframes.gcp);
- if (pipe_config->infoframes.enable &
- intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
- intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
- if (pipe_config->infoframes.enable &
- intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
- intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
- if (pipe_config->infoframes.enable &
- intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
- intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
- if (pipe_config->infoframes.enable &
- intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
- intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
- if (pipe_config->infoframes.enable &
- intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
- intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
- if (pipe_config->infoframes.enable &
- intel_hdmi_infoframe_enable(DP_SDP_VSC))
- intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
-
- drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
- str_yes_no(pipe_config->vrr.enable),
- pipe_config->vrr.vmin, pipe_config->vrr.vmax,
- pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
- pipe_config->vrr.flipline,
- intel_vrr_vmin_vblank_start(pipe_config),
- intel_vrr_vmax_vblank_start(pipe_config));
-
- drm_dbg_kms(&dev_priv->drm, "requested mode: " DRM_MODE_FMT "\n",
- DRM_MODE_ARG(&pipe_config->hw.mode));
- drm_dbg_kms(&dev_priv->drm, "adjusted mode: " DRM_MODE_FMT "\n",
- DRM_MODE_ARG(&pipe_config->hw.adjusted_mode));
- intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
- drm_dbg_kms(&dev_priv->drm, "pipe mode: " DRM_MODE_FMT "\n",
- DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
- intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
- drm_dbg_kms(&dev_priv->drm,
- "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
- pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
- pipe_config->pixel_rate);
-
- drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
- pipe_config->linetime, pipe_config->ips_linetime);
-
- if (DISPLAY_VER(dev_priv) >= 9)
- drm_dbg_kms(&dev_priv->drm,
- "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
- crtc->num_scalers,
- pipe_config->scaler_state.scaler_users,
- pipe_config->scaler_state.scaler_id);
-
- if (HAS_GMCH(dev_priv))
- drm_dbg_kms(&dev_priv->drm,
- "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
- pipe_config->gmch_pfit.control,
- pipe_config->gmch_pfit.pgm_ratios,
- pipe_config->gmch_pfit.lvds_border_bits);
- else
- drm_dbg_kms(&dev_priv->drm,
- "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
- DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
- str_enabled_disabled(pipe_config->pch_pfit.enabled),
- str_yes_no(pipe_config->pch_pfit.force_thru));
-
- drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i, drrs: %i\n",
- pipe_config->ips_enabled, pipe_config->double_wide,
- pipe_config->has_drrs);
-
- intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
-
- if (IS_CHERRYVIEW(dev_priv))
- drm_dbg_kms(&dev_priv->drm,
- "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
- pipe_config->cgm_mode, pipe_config->gamma_mode,
- pipe_config->gamma_enable, pipe_config->csc_enable);
- else
- drm_dbg_kms(&dev_priv->drm,
- "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
- pipe_config->csc_mode, pipe_config->gamma_mode,
- pipe_config->gamma_enable, pipe_config->csc_enable);
-
- drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
- pipe_config->hw.degamma_lut ?
- drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
- pipe_config->hw.gamma_lut ?
- drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
-
-dump_planes:
- if (!state)
- return;
-
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- if (plane->pipe == crtc->pipe)
- intel_dump_plane_state(plane_state);
- }
-}
-
static bool check_digital_port_conflicts(struct intel_atomic_state *state)
{
struct drm_device *dev = state->base.dev;
@@ -5500,27 +5060,6 @@ intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state,
intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
}
-static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
-{
- if (intel_crtc_is_bigjoiner_slave(crtc_state))
- return;
-
- crtc_state->uapi.enable = crtc_state->hw.enable;
- crtc_state->uapi.active = crtc_state->hw.active;
- drm_WARN_ON(crtc_state->uapi.crtc->dev,
- drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
-
- crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
- crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
-
- drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
- crtc_state->hw.degamma_lut);
- drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
- crtc_state->hw.gamma_lut);
- drm_property_replace_blob(&crtc_state->uapi.ctm,
- crtc_state->hw.ctm);
-}
-
static void
copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state,
struct intel_crtc *slave_crtc)
@@ -5636,40 +5175,39 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
static int
intel_modeset_pipe_config(struct intel_atomic_state *state,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = pipe_config->uapi.crtc;
- struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int pipe_src_w, pipe_src_h;
int base_bpp, ret, i;
bool retry = true;
- pipe_config->cpu_transcoder =
- (enum transcoder) to_intel_crtc(crtc)->pipe;
+ crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe;
- pipe_config->framestart_delay = 1;
+ crtc_state->framestart_delay = 1;
/*
* Sanitize sync polarity flags based on requested ones. If neither
* positive or negative polarity is requested, treat this as meaning
* negative polarity.
*/
- if (!(pipe_config->hw.adjusted_mode.flags &
+ if (!(crtc_state->hw.adjusted_mode.flags &
(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
- pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+ crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
- if (!(pipe_config->hw.adjusted_mode.flags &
+ if (!(crtc_state->hw.adjusted_mode.flags &
(DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
- pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+ crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
- ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
- pipe_config);
+ ret = compute_baseline_pipe_bpp(state, crtc);
if (ret)
return ret;
- base_bpp = pipe_config->pipe_bpp;
+ base_bpp = crtc_state->pipe_bpp;
/*
* Determine the real pipe dimensions. Note that stereo modes can
@@ -5679,21 +5217,22 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
* computation to clearly distinguish it from the adjusted mode, which
* can be changed by the connectors in the below retry loop.
*/
- drm_mode_get_hv_timing(&pipe_config->hw.mode,
+ drm_mode_get_hv_timing(&crtc_state->hw.mode,
&pipe_src_w, &pipe_src_h);
- drm_rect_init(&pipe_config->pipe_src, 0, 0,
+ drm_rect_init(&crtc_state->pipe_src, 0, 0,
pipe_src_w, pipe_src_h);
for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(connector_state->best_encoder);
- if (connector_state->crtc != crtc)
+ if (connector_state->crtc != &crtc->base)
continue;
- if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
+ if (!check_single_encoder_cloning(state, crtc, encoder)) {
drm_dbg_kms(&i915->drm,
- "rejecting invalid cloning configuration\n");
+ "[ENCODER:%d:%s] rejecting invalid cloning configuration\n",
+ encoder->base.base.id, encoder->base.name);
return -EINVAL;
}
@@ -5702,20 +5241,20 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
* hooks so that the hooks can use this information safely.
*/
if (encoder->compute_output_type)
- pipe_config->output_types |=
- BIT(encoder->compute_output_type(encoder, pipe_config,
+ crtc_state->output_types |=
+ BIT(encoder->compute_output_type(encoder, crtc_state,
connector_state));
else
- pipe_config->output_types |= BIT(encoder->type);
+ crtc_state->output_types |= BIT(encoder->type);
}
encoder_retry:
/* Ensure the port clock defaults are reset when retrying. */
- pipe_config->port_clock = 0;
- pipe_config->pixel_multiplier = 1;
+ crtc_state->port_clock = 0;
+ crtc_state->pixel_multiplier = 1;
/* Fill in default crtc timings, allow encoders to overwrite them. */
- drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
+ drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode,
CRTC_STEREO_DOUBLE);
/* Pass our mode to the connectors and the CRTC to give them a chance to
@@ -5726,39 +5265,43 @@ encoder_retry:
struct intel_encoder *encoder =
to_intel_encoder(connector_state->best_encoder);
- if (connector_state->crtc != crtc)
+ if (connector_state->crtc != &crtc->base)
continue;
- ret = encoder->compute_config(encoder, pipe_config,
+ ret = encoder->compute_config(encoder, crtc_state,
connector_state);
if (ret == -EDEADLK)
return ret;
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "Encoder config failure: %d\n", ret);
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n",
+ encoder->base.base.id, encoder->base.name, ret);
return ret;
}
}
/* Set default port clock if not overwritten by the encoder. Needs to be
* done afterwards in case the encoder adjusts the mode. */
- if (!pipe_config->port_clock)
- pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
- * pipe_config->pixel_multiplier;
+ if (!crtc_state->port_clock)
+ crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock
+ * crtc_state->pixel_multiplier;
- ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
+ ret = intel_crtc_compute_config(state, crtc);
if (ret == -EDEADLK)
return ret;
if (ret == -EAGAIN) {
if (drm_WARN(&i915->drm, !retry,
- "loop in pipe configuration computation\n"))
+ "[CRTC:%d:%s] loop in pipe configuration computation\n",
+ crtc->base.base.id, crtc->base.name))
return -EINVAL;
- drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] bw constrained, retrying\n",
+ crtc->base.base.id, crtc->base.name);
retry = false;
goto encoder_retry;
}
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "CRTC config failure: %d\n", ret);
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n",
+ crtc->base.base.id, crtc->base.name, ret);
return ret;
}
@@ -5766,21 +5309,22 @@ encoder_retry:
* only enable it on 6bpc panels and when its not a compliance
* test requesting 6bpc video pattern.
*/
- pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
- !pipe_config->dither_force_disable;
+ crtc_state->dither = (crtc_state->pipe_bpp == 6*3) &&
+ !crtc_state->dither_force_disable;
drm_dbg_kms(&i915->drm,
- "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
- base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
+ "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
+ crtc->base.base.id, crtc->base.name,
+ base_bpp, crtc_state->pipe_bpp, crtc_state->dither);
return 0;
}
static int
-intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
+intel_modeset_pipe_config_late(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct drm_connector_state *conn_state;
struct drm_connector *connector;
int i;
@@ -5971,7 +5515,7 @@ static bool fastboot_enabled(struct drm_i915_private *dev_priv)
return false;
}
-static bool
+bool
intel_pipe_config_compare(const struct intel_crtc_state *current_config,
const struct intel_crtc_state *pipe_config,
bool fastset)
@@ -6077,6 +5621,28 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
+#define PIPE_CONF_CHECK_TIMINGS(name) do { \
+ PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
+ PIPE_CONF_CHECK_I(name.crtc_htotal); \
+ PIPE_CONF_CHECK_I(name.crtc_hblank_start); \
+ PIPE_CONF_CHECK_I(name.crtc_hblank_end); \
+ PIPE_CONF_CHECK_I(name.crtc_hsync_start); \
+ PIPE_CONF_CHECK_I(name.crtc_hsync_end); \
+ PIPE_CONF_CHECK_I(name.crtc_vdisplay); \
+ PIPE_CONF_CHECK_I(name.crtc_vtotal); \
+ PIPE_CONF_CHECK_I(name.crtc_vblank_start); \
+ PIPE_CONF_CHECK_I(name.crtc_vblank_end); \
+ PIPE_CONF_CHECK_I(name.crtc_vsync_start); \
+ PIPE_CONF_CHECK_I(name.crtc_vsync_end); \
+} while (0)
+
+#define PIPE_CONF_CHECK_RECT(name) do { \
+ PIPE_CONF_CHECK_I(name.x1); \
+ PIPE_CONF_CHECK_I(name.x2); \
+ PIPE_CONF_CHECK_I(name.y1); \
+ PIPE_CONF_CHECK_I(name.y2); \
+} while (0)
+
/* This is required for BDW+ where there is only one set of registers for
* switching between high and low RR.
* This macro can be used whenever a comparison has to be made between one
@@ -6173,7 +5739,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
+ PIPE_CONF_CHECK_I(hw.enable);
+ PIPE_CONF_CHECK_I(hw.active);
+
PIPE_CONF_CHECK_I(cpu_transcoder);
+ PIPE_CONF_CHECK_I(mst_master_transcoder);
PIPE_CONF_CHECK_BOOL(has_pch_encoder);
PIPE_CONF_CHECK_I(fdi_lanes);
@@ -6194,33 +5764,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(framestart_delay);
PIPE_CONF_CHECK_I(msa_timing_delay);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
-
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
-
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
-
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
+ PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode);
+ PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode);
PIPE_CONF_CHECK_I(pixel_multiplier);
@@ -6264,18 +5809,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
if (!fastset) {
- PIPE_CONF_CHECK_I(pipe_src.x1);
- PIPE_CONF_CHECK_I(pipe_src.y1);
- PIPE_CONF_CHECK_I(pipe_src.x2);
- PIPE_CONF_CHECK_I(pipe_src.y2);
+ PIPE_CONF_CHECK_RECT(pipe_src);
PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
- if (current_config->pch_pfit.enabled) {
- PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
- PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
- PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
- PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
- }
+ PIPE_CONF_CHECK_RECT(pch_pfit.dst);
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
@@ -6379,8 +5916,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(splitter.link_count);
PIPE_CONF_CHECK_I(splitter.pixel_overlap);
- PIPE_CONF_CHECK_I(mst_master_transcoder);
-
PIPE_CONF_CHECK_BOOL(vrr.enable);
PIPE_CONF_CHECK_I(vrr.vmin);
PIPE_CONF_CHECK_I(vrr.vmax);
@@ -6396,295 +5931,13 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
#undef PIPE_CONF_CHECK_COLOR_LUT
+#undef PIPE_CONF_CHECK_TIMINGS
+#undef PIPE_CONF_CHECK_RECT
#undef PIPE_CONF_QUIRK
return ret;
}
-static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *pipe_config)
-{
- if (pipe_config->has_pch_encoder) {
- int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
- &pipe_config->fdi_m_n);
- int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
-
- /*
- * FDI already provided one idea for the dotclock.
- * Yell if the encoder disagrees.
- */
- drm_WARN(&dev_priv->drm,
- !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
- "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
- fdi_dotclock, dotclock);
- }
-}
-
-static void verify_wm_state(struct intel_crtc *crtc,
- struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct skl_hw_state {
- struct skl_ddb_entry ddb[I915_MAX_PLANES];
- struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
- struct skl_pipe_wm wm;
- } *hw;
- const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
- int level, max_level = ilk_wm_max_level(dev_priv);
- struct intel_plane *plane;
- u8 hw_enabled_slices;
-
- if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
- return;
-
- hw = kzalloc(sizeof(*hw), GFP_KERNEL);
- if (!hw)
- return;
-
- skl_pipe_wm_get_hw_state(crtc, &hw->wm);
-
- skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
-
- hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
-
- if (DISPLAY_VER(dev_priv) >= 11 &&
- hw_enabled_slices != dev_priv->dbuf.enabled_slices)
- drm_err(&dev_priv->drm,
- "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
- dev_priv->dbuf.enabled_slices,
- hw_enabled_slices);
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
- const struct skl_wm_level *hw_wm_level, *sw_wm_level;
-
- /* Watermarks */
- for (level = 0; level <= max_level; level++) {
- hw_wm_level = &hw->wm.planes[plane->id].wm[level];
- sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
-
- if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
- continue;
-
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name, level,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
- sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
-
- if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
- sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
-
- if (HAS_HW_SAGV_WM(dev_priv) &&
- !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
- sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
-
- if (HAS_HW_SAGV_WM(dev_priv) &&
- !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- /* DDB */
- hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
- sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
-
- if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
- plane->base.base.id, plane->base.name,
- sw_ddb_entry->start, sw_ddb_entry->end,
- hw_ddb_entry->start, hw_ddb_entry->end);
- }
- }
-
- kfree(hw);
-}
-
-static void
-verify_connector_state(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_connector *connector;
- struct drm_connector_state *new_conn_state;
- int i;
-
- for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
- struct drm_encoder *encoder = connector->encoder;
- struct intel_crtc_state *crtc_state = NULL;
-
- if (new_conn_state->crtc != &crtc->base)
- continue;
-
- if (crtc)
- crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
-
- intel_connector_verify_state(crtc_state, new_conn_state);
-
- I915_STATE_WARN(new_conn_state->best_encoder != encoder,
- "connector's atomic encoder doesn't match legacy encoder\n");
- }
-}
-
-static void
-verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
-{
- struct intel_encoder *encoder;
- struct drm_connector *connector;
- struct drm_connector_state *old_conn_state, *new_conn_state;
- int i;
-
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- bool enabled = false, found = false;
- enum pipe pipe;
-
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
- encoder->base.base.id,
- encoder->base.name);
-
- for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
- new_conn_state, i) {
- if (old_conn_state->best_encoder == &encoder->base)
- found = true;
-
- if (new_conn_state->best_encoder != &encoder->base)
- continue;
- found = enabled = true;
-
- I915_STATE_WARN(new_conn_state->crtc !=
- encoder->base.crtc,
- "connector's crtc doesn't match encoder crtc\n");
- }
-
- if (!found)
- continue;
-
- I915_STATE_WARN(!!encoder->base.crtc != enabled,
- "encoder's enabled state mismatch "
- "(expected %i, found %i)\n",
- !!encoder->base.crtc, enabled);
-
- if (!encoder->base.crtc) {
- bool active;
-
- active = encoder->get_hw_state(encoder, &pipe);
- I915_STATE_WARN(active,
- "encoder detached but still enabled on pipe %c.\n",
- pipe_name(pipe));
- }
- }
-}
-
-static void
-verify_crtc_state(struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_encoder *encoder;
- struct intel_crtc_state *pipe_config = old_crtc_state;
- struct drm_atomic_state *state = old_crtc_state->uapi.state;
- struct intel_crtc *master_crtc;
-
- __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
- intel_crtc_free_hw_state(old_crtc_state);
- intel_crtc_state_reset(old_crtc_state, crtc);
- old_crtc_state->uapi.state = state;
-
- drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
- crtc->base.name);
-
- pipe_config->hw.enable = new_crtc_state->hw.enable;
-
- intel_crtc_get_pipe_config(pipe_config);
-
- /* we keep both pipes enabled on 830 */
- if (IS_I830(dev_priv) && pipe_config->hw.active)
- pipe_config->hw.active = new_crtc_state->hw.active;
-
- I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
- "crtc active state doesn't match with hw state "
- "(expected %i, found %i)\n",
- new_crtc_state->hw.active, pipe_config->hw.active);
-
- I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
- "transitional active state does not match atomic hw state "
- "(expected %i, found %i)\n",
- new_crtc_state->hw.active, crtc->active);
-
- master_crtc = intel_master_crtc(new_crtc_state);
-
- for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) {
- enum pipe pipe;
- bool active;
-
- active = encoder->get_hw_state(encoder, &pipe);
- I915_STATE_WARN(active != new_crtc_state->hw.active,
- "[ENCODER:%i] active %i with crtc active %i\n",
- encoder->base.base.id, active,
- new_crtc_state->hw.active);
-
- I915_STATE_WARN(active && master_crtc->pipe != pipe,
- "Encoder connected to wrong pipe %c\n",
- pipe_name(pipe));
-
- if (active)
- intel_encoder_get_config(encoder, pipe_config);
- }
-
- if (!new_crtc_state->hw.active)
- return;
-
- intel_pipe_config_sanity_check(dev_priv, pipe_config);
-
- if (!intel_pipe_config_compare(new_crtc_state,
- pipe_config, false)) {
- I915_STATE_WARN(1, "pipe state doesn't match!\n");
- intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
- intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
- }
-}
-
static void
intel_verify_planes(struct intel_atomic_state *state)
{
@@ -6698,167 +5951,6 @@ intel_verify_planes(struct intel_atomic_state *state)
plane_state->uapi.visible);
}
-static void
-verify_single_dpll_state(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- struct intel_crtc *crtc,
- struct intel_crtc_state *new_crtc_state)
-{
- struct intel_dpll_hw_state dpll_hw_state;
- u8 pipe_mask;
- bool active;
-
- memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
-
- drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
-
- active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
-
- if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
- I915_STATE_WARN(!pll->on && pll->active_mask,
- "pll in active use but not on in sw tracking\n");
- I915_STATE_WARN(pll->on && !pll->active_mask,
- "pll is on but not used by any active pipe\n");
- I915_STATE_WARN(pll->on != active,
- "pll on state mismatch (expected %i, found %i)\n",
- pll->on, active);
- }
-
- if (!crtc) {
- I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
- "more active pll users than references: 0x%x vs 0x%x\n",
- pll->active_mask, pll->state.pipe_mask);
-
- return;
- }
-
- pipe_mask = BIT(crtc->pipe);
-
- if (new_crtc_state->hw.active)
- I915_STATE_WARN(!(pll->active_mask & pipe_mask),
- "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
- pipe_name(crtc->pipe), pll->active_mask);
- else
- I915_STATE_WARN(pll->active_mask & pipe_mask,
- "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
- pipe_name(crtc->pipe), pll->active_mask);
-
- I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
- "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
- pipe_mask, pll->state.pipe_mask);
-
- I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
- &dpll_hw_state,
- sizeof(dpll_hw_state)),
- "pll hw state mismatch\n");
-}
-
-static void
-verify_shared_dpll_state(struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (new_crtc_state->shared_dpll)
- verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
-
- if (old_crtc_state->shared_dpll &&
- old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
- u8 pipe_mask = BIT(crtc->pipe);
- struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
-
- I915_STATE_WARN(pll->active_mask & pipe_mask,
- "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
- pipe_name(crtc->pipe), pll->active_mask);
- I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
- "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
- pipe_name(crtc->pipe), pll->state.pipe_mask);
- }
-}
-
-static void
-verify_mpllb_state(struct intel_atomic_state *state,
- struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- struct intel_mpllb_state mpllb_hw_state = { 0 };
- struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct intel_encoder *encoder;
-
- if (!IS_DG2(i915))
- return;
-
- if (!new_crtc_state->hw.active)
- return;
-
- encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
- intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
-
-#define MPLLB_CHECK(name) do { \
- if (mpllb_sw_state->name != mpllb_hw_state.name) { \
- pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
- "(expected 0x%08x, found 0x%08x)", \
- mpllb_sw_state->name, \
- mpllb_hw_state.name); \
- } \
-} while (0)
-
- MPLLB_CHECK(mpllb_cp);
- MPLLB_CHECK(mpllb_div);
- MPLLB_CHECK(mpllb_div2);
- MPLLB_CHECK(mpllb_fracn1);
- MPLLB_CHECK(mpllb_fracn2);
- MPLLB_CHECK(mpllb_sscen);
- MPLLB_CHECK(mpllb_sscstep);
-
- /*
- * ref_control is handled by the hardware/firemware and never
- * programmed by the software, but the proper values are supplied
- * in the bspec for verification purposes.
- */
- MPLLB_CHECK(ref_control);
-
-#undef MPLLB_CHECK
-}
-
-static void
-intel_modeset_verify_crtc(struct intel_crtc *crtc,
- struct intel_atomic_state *state,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
-{
- if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
- return;
-
- verify_wm_state(crtc, new_crtc_state);
- verify_connector_state(state, crtc);
- verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
- verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
- verify_mpllb_state(state, new_crtc_state);
-}
-
-static void
-verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
-{
- int i;
-
- for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
- verify_single_dpll_state(dev_priv,
- &dev_priv->dpll.shared_dplls[i],
- NULL, NULL);
-}
-
-static void
-intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
- struct intel_atomic_state *state)
-{
- verify_encoder_state(dev_priv, state);
- verify_connector_state(state, NULL);
- verify_disabled_dpll_state(dev_priv);
-}
-
int intel_modeset_all_pipes(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
@@ -6897,8 +5989,7 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state)
return 0;
}
-static void
-intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
+void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -7733,7 +6824,7 @@ static int intel_atomic_check(struct drm_device *dev,
if (!new_crtc_state->hw.enable)
continue;
- ret = intel_modeset_pipe_config(state, new_crtc_state);
+ ret = intel_modeset_pipe_config(state, crtc);
if (ret)
goto fail;
@@ -7747,7 +6838,7 @@ static int intel_atomic_check(struct drm_device *dev,
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
- ret = intel_modeset_pipe_config_late(new_crtc_state);
+ ret = intel_modeset_pipe_config_late(state, crtc);
if (ret)
goto fail;
@@ -7871,9 +6962,9 @@ static int intel_atomic_check(struct drm_device *dev,
!new_crtc_state->update_pipe)
continue;
- intel_dump_pipe_config(new_crtc_state, state,
- intel_crtc_needs_modeset(new_crtc_state) ?
- "[modeset]" : "[fastset]");
+ intel_crtc_state_dump(new_crtc_state, state,
+ intel_crtc_needs_modeset(new_crtc_state) ?
+ "modeset" : "fastset");
}
return 0;
@@ -7888,7 +6979,7 @@ static int intel_atomic_check(struct drm_device *dev,
*/
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i)
- intel_dump_pipe_config(new_crtc_state, state, "[failed]");
+ intel_crtc_state_dump(new_crtc_state, state, "failed");
return ret;
}
@@ -8452,7 +7543,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
new_crtc_state, i) {
if (intel_crtc_needs_modeset(new_crtc_state) ||
new_crtc_state->update_pipe) {
- modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
+ intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
}
}
@@ -8552,7 +7643,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
intel_post_plane_update(state, crtc);
- modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]);
+ intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]);
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
@@ -9689,7 +8780,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
intel_setup_outputs(i915);
drm_modeset_lock_all(dev);
- intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
+ intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx);
intel_acpi_assign_connector_fwnodes(i915);
drm_modeset_unlock_all(dev);
@@ -9842,580 +8933,17 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_de_posting_read(dev_priv, DPLL(pipe));
}
-static void
-intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc;
-
- if (DISPLAY_VER(dev_priv) >= 4)
- return;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_plane *plane =
- to_intel_plane(crtc->base.primary);
- struct intel_crtc *plane_crtc;
- enum pipe pipe;
-
- if (!plane->get_hw_state(plane, &pipe))
- continue;
-
- if (pipe == crtc->pipe)
- continue;
-
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
- plane->base.base.id, plane->base.name);
-
- plane_crtc = intel_crtc_for_pipe(dev_priv, pipe);
- intel_plane_disable_noatomic(plane_crtc, plane);
- }
-}
-
-static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct intel_encoder *encoder;
-
- for_each_encoder_on_crtc(dev, &crtc->base, encoder)
- return true;
-
- return false;
-}
-
-static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
-{
- struct drm_device *dev = encoder->base.dev;
- struct intel_connector *connector;
-
- for_each_connector_on_encoder(dev, &encoder->base, connector)
- return connector;
-
- return NULL;
-}
-
-static void intel_sanitize_crtc(struct intel_crtc *crtc,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
-
- if (crtc_state->hw.active) {
- struct intel_plane *plane;
-
- /* Disable everything but the primary plane */
- for_each_intel_plane_on_crtc(dev, crtc, plane) {
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
-
- if (plane_state->uapi.visible &&
- plane->base.type != DRM_PLANE_TYPE_PRIMARY)
- intel_plane_disable_noatomic(crtc, plane);
- }
-
- /* Disable any background color/etc. set by the BIOS */
- intel_color_commit_noarm(crtc_state);
- intel_color_commit_arm(crtc_state);
- }
-
- /* Adjust the state of the output pipe according to whether we
- * have active connectors/encoders. */
- if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
- !intel_crtc_is_bigjoiner_slave(crtc_state))
- intel_crtc_disable_noatomic(crtc, ctx);
-
- if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
- /*
- * We start out with underrun reporting disabled to avoid races.
- * For correct bookkeeping mark this on active crtcs.
- *
- * Also on gmch platforms we dont have any hardware bits to
- * disable the underrun reporting. Which means we need to start
- * out with underrun reporting disabled also on inactive pipes,
- * since otherwise we'll complain about the garbage we read when
- * e.g. coming up after runtime pm.
- *
- * No protection against concurrent access is required - at
- * worst a fifo underrun happens which also sets this to false.
- */
- crtc->cpu_fifo_underrun_disabled = true;
- /*
- * We track the PCH trancoder underrun reporting state
- * within the crtc. With crtc for pipe A housing the underrun
- * reporting state for PCH transcoder A, crtc for pipe B housing
- * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
- * and marking underrun reporting as disabled for the non-existing
- * PCH transcoders B and C would prevent enabling the south
- * error interrupt (see cpt_can_enable_serr_int()).
- */
- if (intel_has_pch_trancoder(dev_priv, crtc->pipe))
- crtc->pch_fifo_underrun_disabled = true;
- }
-}
-
-static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-
- /*
- * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
- * the hardware when a high res displays plugged in. DPLL P
- * divider is zero, and the pipe timings are bonkers. We'll
- * try to disable everything in that case.
- *
- * FIXME would be nice to be able to sanitize this state
- * without several WARNs, but for now let's take the easy
- * road.
- */
- return IS_SANDYBRIDGE(dev_priv) &&
- crtc_state->hw.active &&
- crtc_state->shared_dpll &&
- crtc_state->port_clock == 0;
-}
-
-static void intel_sanitize_encoder(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_connector *connector;
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct intel_crtc_state *crtc_state = crtc ?
- to_intel_crtc_state(crtc->base.state) : NULL;
-
- /* We need to check both for a crtc link (meaning that the
- * encoder is active and trying to read from a pipe) and the
- * pipe itself being active. */
- bool has_active_crtc = crtc_state &&
- crtc_state->hw.active;
-
- if (crtc_state && has_bogus_dpll_config(crtc_state)) {
- drm_dbg_kms(&dev_priv->drm,
- "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
- pipe_name(crtc->pipe));
- has_active_crtc = false;
- }
-
- connector = intel_encoder_find_connector(encoder);
- if (connector && !has_active_crtc) {
- drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
- encoder->base.base.id,
- encoder->base.name);
-
- /* Connector is active, but has no active pipe. This is
- * fallout from our resume register restoring. Disable
- * the encoder manually again. */
- if (crtc_state) {
- struct drm_encoder *best_encoder;
-
- drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] manually disabled\n",
- encoder->base.base.id,
- encoder->base.name);
-
- /* avoid oopsing in case the hooks consult best_encoder */
- best_encoder = connector->base.state->best_encoder;
- connector->base.state->best_encoder = &encoder->base;
-
- /* FIXME NULL atomic state passed! */
- if (encoder->disable)
- encoder->disable(NULL, encoder, crtc_state,
- connector->base.state);
- if (encoder->post_disable)
- encoder->post_disable(NULL, encoder, crtc_state,
- connector->base.state);
-
- connector->base.state->best_encoder = best_encoder;
- }
- encoder->base.crtc = NULL;
-
- /* Inconsistent output/port/pipe state happens presumably due to
- * a bug in one of the get_hw_state functions. Or someplace else
- * in our code, like the register restore mess on resume. Clamp
- * things to off as a safer default. */
-
- connector->base.dpms = DRM_MODE_DPMS_OFF;
- connector->base.encoder = NULL;
- }
-
- /* notify opregion of the sanitized encoder state */
- intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
-
- if (HAS_DDI(dev_priv))
- intel_ddi_sanitize_encoder_pll_mapping(encoder);
-}
-
-/* FIXME read out full plane state for all planes */
-static void readout_plane_state(struct drm_i915_private *dev_priv)
-{
- struct intel_plane *plane;
- struct intel_crtc *crtc;
-
- for_each_intel_plane(&dev_priv->drm, plane) {
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- struct intel_crtc_state *crtc_state;
- enum pipe pipe = PIPE_A;
- bool visible;
-
- visible = plane->get_hw_state(plane, &pipe);
-
- crtc = intel_crtc_for_pipe(dev_priv, pipe);
- crtc_state = to_intel_crtc_state(crtc->base.state);
-
- intel_set_plane_visible(crtc_state, plane_state, visible);
-
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
- plane->base.base.id, plane->base.name,
- str_enabled_disabled(visible), pipe_name(pipe));
- }
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- fixup_plane_bitmasks(crtc_state);
- }
-}
-
-static void intel_modeset_readout_hw_state(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_cdclk_state *cdclk_state =
- to_intel_cdclk_state(dev_priv->cdclk.obj.state);
- struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(dev_priv->dbuf.obj.state);
- enum pipe pipe;
- struct intel_crtc *crtc;
- struct intel_encoder *encoder;
- struct intel_connector *connector;
- struct drm_connector_list_iter conn_iter;
- u8 active_pipes = 0;
-
- for_each_intel_crtc(dev, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
- intel_crtc_free_hw_state(crtc_state);
- intel_crtc_state_reset(crtc_state, crtc);
-
- intel_crtc_get_pipe_config(crtc_state);
-
- crtc_state->hw.enable = crtc_state->hw.active;
-
- crtc->base.enabled = crtc_state->hw.enable;
- crtc->active = crtc_state->hw.active;
-
- if (crtc_state->hw.active)
- active_pipes |= BIT(crtc->pipe);
-
- drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] hw state readout: %s\n",
- crtc->base.base.id, crtc->base.name,
- str_enabled_disabled(crtc_state->hw.active));
- }
-
- cdclk_state->active_pipes = dbuf_state->active_pipes = active_pipes;
-
- readout_plane_state(dev_priv);
-
- for_each_intel_encoder(dev, encoder) {
- struct intel_crtc_state *crtc_state = NULL;
-
- pipe = 0;
-
- if (encoder->get_hw_state(encoder, &pipe)) {
- crtc = intel_crtc_for_pipe(dev_priv, pipe);
- crtc_state = to_intel_crtc_state(crtc->base.state);
-
- encoder->base.crtc = &crtc->base;
- intel_encoder_get_config(encoder, crtc_state);
-
- /* read out to slave crtc as well for bigjoiner */
- if (crtc_state->bigjoiner_pipes) {
- struct intel_crtc *slave_crtc;
-
- /* encoder should read be linked to bigjoiner master */
- WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
-
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc,
- intel_crtc_bigjoiner_slave_pipes(crtc_state)) {
- struct intel_crtc_state *slave_crtc_state;
-
- slave_crtc_state = to_intel_crtc_state(slave_crtc->base.state);
- intel_encoder_get_config(encoder, slave_crtc_state);
- }
- }
- } else {
- encoder->base.crtc = NULL;
- }
-
- if (encoder->sync_state)
- encoder->sync_state(encoder, crtc_state);
-
- drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
- encoder->base.base.id, encoder->base.name,
- str_enabled_disabled(encoder->base.crtc),
- pipe_name(pipe));
- }
-
- intel_dpll_readout_hw_state(dev_priv);
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- for_each_intel_connector_iter(connector, &conn_iter) {
- if (connector->get_hw_state(connector)) {
- struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
-
- connector->base.dpms = DRM_MODE_DPMS_ON;
-
- encoder = intel_attached_encoder(connector);
- connector->base.encoder = &encoder->base;
-
- crtc = to_intel_crtc(encoder->base.crtc);
- crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
-
- if (crtc_state && crtc_state->hw.active) {
- /*
- * This has to be done during hardware readout
- * because anything calling .crtc_disable may
- * rely on the connector_mask being accurate.
- */
- crtc_state->uapi.connector_mask |=
- drm_connector_mask(&connector->base);
- crtc_state->uapi.encoder_mask |=
- drm_encoder_mask(&encoder->base);
- }
- } else {
- connector->base.dpms = DRM_MODE_DPMS_OFF;
- connector->base.encoder = NULL;
- }
- drm_dbg_kms(&dev_priv->drm,
- "[CONNECTOR:%d:%s] hw state readout: %s\n",
- connector->base.base.id, connector->base.name,
- str_enabled_disabled(connector->base.encoder));
- }
- drm_connector_list_iter_end(&conn_iter);
-
- for_each_intel_crtc(dev, crtc) {
- struct intel_bw_state *bw_state =
- to_intel_bw_state(dev_priv->bw_obj.state);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane *plane;
- int min_cdclk = 0;
-
- if (crtc_state->hw.active) {
- /*
- * The initial mode needs to be set in order to keep
- * the atomic core happy. It wants a valid mode if the
- * crtc's enabled, so we do the above call.
- *
- * But we don't set all the derived state fully, hence
- * set a flag to indicate that a full recalculation is
- * needed on the next commit.
- */
- crtc_state->inherited = true;
-
- intel_crtc_update_active_timings(crtc_state);
-
- intel_crtc_copy_hw_to_uapi_state(crtc_state);
- }
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
-
- /*
- * FIXME don't have the fb yet, so can't
- * use intel_plane_data_rate() :(
- */
- if (plane_state->uapi.visible)
- crtc_state->data_rate[plane->id] =
- 4 * crtc_state->pixel_rate;
- /*
- * FIXME don't have the fb yet, so can't
- * use plane->min_cdclk() :(
- */
- if (plane_state->uapi.visible && plane->min_cdclk) {
- if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
- crtc_state->min_cdclk[plane->id] =
- DIV_ROUND_UP(crtc_state->pixel_rate, 2);
- else
- crtc_state->min_cdclk[plane->id] =
- crtc_state->pixel_rate;
- }
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] min_cdclk %d kHz\n",
- plane->base.base.id, plane->base.name,
- crtc_state->min_cdclk[plane->id]);
- }
-
- if (crtc_state->hw.active) {
- min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
- if (drm_WARN_ON(dev, min_cdclk < 0))
- min_cdclk = 0;
- }
-
- cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
- cdclk_state->min_voltage_level[crtc->pipe] =
- crtc_state->min_voltage_level;
-
- intel_bw_crtc_update(bw_state, crtc_state);
-
- intel_pipe_config_sanity_check(dev_priv, crtc_state);
- }
-}
-
-static void
-get_encoder_power_domains(struct drm_i915_private *dev_priv)
-{
- struct intel_encoder *encoder;
-
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- struct intel_crtc_state *crtc_state;
-
- if (!encoder->get_power_domains)
- continue;
-
- /*
- * MST-primary and inactive encoders don't have a crtc state
- * and neither of these require any power domain references.
- */
- if (!encoder->base.crtc)
- continue;
-
- crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
- encoder->get_power_domains(encoder, crtc_state);
- }
-}
-
-static void intel_early_display_was(struct drm_i915_private *dev_priv)
-{
- /*
- * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
- * Also known as Wa_14010480278.
- */
- if (IS_DISPLAY_VER(dev_priv, 10, 12))
- intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
- intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
-
- if (IS_HASWELL(dev_priv)) {
- /*
- * WaRsPkgCStateDisplayPMReq:hsw
- * System hang if this isn't done before disabling all planes!
- */
- intel_de_write(dev_priv, CHICKEN_PAR1_1,
- intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
- }
-
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
- /* Display WA #1142:kbl,cfl,cml */
- intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
- KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
- intel_de_rmw(dev_priv, CHICKEN_MISC_2,
- KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
- KBL_ARB_FILL_SPARE_14);
- }
-}
-
-
-/* Scan out the current hw modeset state,
- * and sanitizes it to the current state
- */
-static void
-intel_modeset_setup_hw_state(struct drm_device *dev,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_encoder *encoder;
- struct intel_crtc *crtc;
- intel_wakeref_t wakeref;
-
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-
- intel_early_display_was(dev_priv);
- intel_modeset_readout_hw_state(dev);
-
- /* HW state is read out, now we need to sanitize this mess. */
- get_encoder_power_domains(dev_priv);
-
- intel_pch_sanitize(dev_priv);
-
- /*
- * intel_sanitize_plane_mapping() may need to do vblank
- * waits, so we need vblank interrupts restored beforehand.
- */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- drm_crtc_vblank_reset(&crtc->base);
-
- if (crtc_state->hw.active)
- intel_crtc_vblank_on(crtc_state);
- }
-
- intel_fbc_sanitize(dev_priv);
-
- intel_sanitize_plane_mapping(dev_priv);
-
- for_each_intel_encoder(dev, encoder)
- intel_sanitize_encoder(encoder);
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- intel_sanitize_crtc(crtc, ctx);
- intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
- }
-
- intel_modeset_update_connector_atomic_state(dev);
-
- intel_dpll_sanitize_state(dev_priv);
-
- if (IS_G4X(dev_priv)) {
- g4x_wm_get_hw_state(dev_priv);
- g4x_wm_sanitize(dev_priv);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_wm_get_hw_state(dev_priv);
- vlv_wm_sanitize(dev_priv);
- } else if (DISPLAY_VER(dev_priv) >= 9) {
- skl_wm_get_hw_state(dev_priv);
- skl_wm_sanitize(dev_priv);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- ilk_wm_get_hw_state(dev_priv);
- }
-
- for_each_intel_crtc(dev, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_power_domain_mask put_domains;
-
- modeset_get_crtc_power_domains(crtc_state, &put_domains);
- if (drm_WARN_ON(dev, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM)))
- modeset_put_crtc_power_domains(crtc, &put_domains);
- }
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
-
- intel_power_domains_sanitize_state(dev_priv);
-}
-
void intel_display_resume(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *state = dev_priv->modeset_restore_state;
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_atomic_state *state = i915->modeset_restore_state;
struct drm_modeset_acquire_ctx ctx;
int ret;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(i915))
return;
- dev_priv->modeset_restore_state = NULL;
+ i915->modeset_restore_state = NULL;
if (state)
state->acquire_ctx = &ctx;
@@ -10430,14 +8958,14 @@ void intel_display_resume(struct drm_device *dev)
}
if (!ret)
- ret = __intel_display_resume(dev, state, &ctx);
+ ret = __intel_display_resume(i915, state, &ctx);
- intel_enable_ipc(dev_priv);
+ intel_enable_ipc(i915);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
if (ret)
- drm_err(&dev_priv->drm,
+ drm_err(&i915->drm,
"Restoring old state failed with %i\n", ret);
if (state)
drm_atomic_state_put(state);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 187910d94ec6..fa5371036239 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -56,6 +56,7 @@ struct intel_initial_plane_config;
struct intel_load_detect_pipe;
struct intel_plane;
struct intel_plane_state;
+struct intel_power_domain_mask;
struct intel_remapped_info;
struct intel_rotation_info;
struct pci_dev;
@@ -192,7 +193,7 @@ enum plane_id {
#define for_each_dbuf_slice(__dev_priv, __slice) \
for ((__slice) = DBUF_S1; (__slice) < I915_MAX_DBUF_SLICES; (__slice)++) \
- for_each_if(INTEL_INFO(__dev_priv)->dbuf.slice_mask & BIT(__slice))
+ for_each_if(INTEL_INFO(__dev_priv)->display.dbuf.slice_mask & BIT(__slice))
#define for_each_dbuf_slice_in_mask(__dev_priv, __slice, __mask) \
for_each_dbuf_slice((__dev_priv), (__slice)) \
@@ -559,8 +560,15 @@ bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state);
bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state);
u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state);
struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state);
+bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state);
+bool intel_pipe_config_compare(const struct intel_crtc_state *current_config,
+ const struct intel_crtc_state *pipe_config,
+ bool fastset);
+void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state);
void intel_plane_destroy(struct drm_plane *plane);
+void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
+void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state);
void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
@@ -583,6 +591,8 @@ int intel_display_suspend(struct drm_device *dev);
void intel_encoder_destroy(struct drm_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
+void intel_encoder_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy);
bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy);
bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy);
@@ -635,6 +645,7 @@ void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
+int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config);
enum intel_display_power_domain intel_port_to_power_domain(struct intel_digital_port *dig_port);
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port);
@@ -652,10 +663,16 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state);
void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane);
+void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state,
+ bool visible);
+void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state);
void intel_display_driver_register(struct drm_i915_private *i915);
void intel_display_driver_unregister(struct drm_i915_private *i915);
+void intel_update_watermarks(struct drm_i915_private *i915);
+
/* modesetting */
bool intel_modeset_probe_defer(struct pci_dev *pdev);
void intel_modeset_init_hw(struct drm_i915_private *i915);
@@ -667,6 +684,10 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915);
void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915);
void intel_display_resume(struct drm_device *dev);
int intel_modeset_all_pipes(struct intel_atomic_state *state);
+void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
+ struct intel_power_domain_mask *old_domains);
+void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
+ struct intel_power_domain_mask *domains);
/* modesetting asserts */
void assert_transcoder(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 452d773fd4e3..6c3954479047 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -590,6 +590,8 @@ static void intel_connector_info(struct seq_file *m,
seq_puts(m, "\tHDCP version: ");
intel_hdcp_info(m, intel_connector);
+ seq_printf(m, "\tmax bpc: %u\n", connector->display_info.bpc);
+
intel_panel_info(m, intel_connector);
seq_printf(m, "\tmodes:\n");
@@ -2202,6 +2204,29 @@ static const struct file_operations i915_dsc_bpp_fops = {
.write = i915_dsc_bpp_write
};
+/*
+ * Returns the Current CRTC's bpc.
+ * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc
+ */
+static int i915_current_bpc_show(struct seq_file *m, void *data)
+{
+ struct intel_crtc *crtc = to_intel_crtc(m->private);
+ struct intel_crtc_state *crtc_state;
+ int ret;
+
+ ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+ seq_printf(m, "Current: %u\n", crtc_state->pipe_bpp / 3);
+
+ drm_modeset_unlock(&crtc->base.mutex);
+
+ return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_current_bpc);
+
/**
* intel_connector_debugfs_add - add i915 specific connector debugfs files
* @connector: pointer to a registered drm_connector
@@ -2272,4 +2297,7 @@ void intel_crtc_debugfs_add(struct drm_crtc *crtc)
crtc_updates_add(crtc);
intel_fbc_crtc_debugfs_add(to_intel_crtc(crtc));
+
+ debugfs_create_file("i915_current_bpc", 0444, crtc->debugfs_entry, crtc,
+ &i915_current_bpc_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 949edc983a16..589af257edeb 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -907,7 +907,9 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
if (!HAS_DISPLAY(dev_priv))
return 0;
- if (IS_DG1(dev_priv))
+ if (IS_DG2(dev_priv))
+ max_dc = 0;
+ else if (IS_DG1(dev_priv))
max_dc = 3;
else if (DISPLAY_VER(dev_priv) >= 12)
max_dc = 4;
@@ -1036,7 +1038,7 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
- u8 slice_mask = INTEL_INFO(dev_priv)->dbuf.slice_mask;
+ u8 slice_mask = INTEL_INFO(dev_priv)->display.dbuf.slice_mask;
enum dbuf_slice slice;
drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
@@ -1194,7 +1196,7 @@ static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
{
if (IS_HASWELL(dev_priv)) {
- if (snb_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
+ if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
drm_dbg_kms(&dev_priv->drm,
"Failed to write to D_COMP\n");
} else {
@@ -1606,7 +1608,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP &&
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 5be18eb94042..91cfd5890f46 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -474,7 +474,7 @@ static void icl_tc_cold_exit(struct drm_i915_private *i915)
int ret, tries = 0;
while (1) {
- ret = snb_pcode_write_timeout(i915, ICL_PCODE_EXIT_TCCOLD, 0,
+ ret = snb_pcode_write_timeout(&i915->uncore, ICL_PCODE_EXIT_TCCOLD, 0,
250, 1);
if (ret != -EAGAIN || ++tries == 3)
break;
@@ -1739,7 +1739,7 @@ tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
* Spec states that we should timeout the request after 200us
* but the function below will timeout after 500us
*/
- ret = snb_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, &high_val);
+ ret = snb_pcode_read(&i915->uncore, TGL_PCODE_TCCOLD, &low_val, &high_val);
if (ret == 0) {
if (block &&
(low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 408152f9f46a..0da9b208d56e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -38,6 +38,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
@@ -279,6 +280,76 @@ struct intel_panel_bl_funcs {
u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
};
+enum drrs_type {
+ DRRS_TYPE_NONE,
+ DRRS_TYPE_STATIC,
+ DRRS_TYPE_SEAMLESS,
+};
+
+struct intel_vbt_panel_data {
+ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+
+ /* Feature bits */
+ unsigned int panel_type:4;
+ unsigned int lvds_dither:1;
+ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+
+ bool vrr;
+
+ u8 seamless_drrs_min_refresh_rate;
+ enum drrs_type drrs_type;
+
+ struct {
+ int max_link_rate;
+ int rate;
+ int lanes;
+ int preemphasis;
+ int vswing;
+ int bpp;
+ struct edp_power_seq pps;
+ u8 drrs_msa_timing_delay;
+ bool low_vswing;
+ bool initialized;
+ bool hobl;
+ } edp;
+
+ struct {
+ bool enable;
+ bool full_link;
+ bool require_aux_wakeup;
+ int idle_frames;
+ int tp1_wakeup_time_us;
+ int tp2_tp3_wakeup_time_us;
+ int psr2_tp2_tp3_wakeup_time_us;
+ } psr;
+
+ struct {
+ u16 pwm_freq_hz;
+ u16 brightness_precision_bits;
+ bool present;
+ bool active_low_pwm;
+ u8 min_brightness; /* min_brightness/255 of max */
+ u8 controller; /* brightness controller number */
+ enum intel_backlight_type type;
+ } backlight;
+
+ /* MIPI DSI */
+ struct {
+ u16 panel_id;
+ struct mipi_config *config;
+ struct mipi_pps_data *pps;
+ u16 bl_ports;
+ u16 cabc_ports;
+ u8 seq_version;
+ u32 size;
+ u8 *data;
+ const u8 *sequence[MIPI_SEQ_MAX];
+ u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
+ enum drm_panel_orientation orientation;
+ } dsi;
+};
+
struct intel_panel {
struct list_head fixed_modes;
@@ -318,6 +389,8 @@ struct intel_panel {
const struct intel_panel_bl_funcs *pwm_funcs;
void (*power)(struct intel_connector *, bool enable);
} backlight;
+
+ struct intel_vbt_panel_data vbt;
};
struct intel_digital_port;
@@ -1474,6 +1547,7 @@ struct intel_pps {
int backlight_off_delay;
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
+ bool initializing;
unsigned long last_power_on;
unsigned long last_backlight_off;
ktime_t panel_power_off_time;
@@ -1496,6 +1570,7 @@ struct intel_pps {
*/
bool pps_reset;
struct edp_power_seq pps_delays;
+ struct edp_power_seq bios_pps_delays;
};
struct intel_psr {
@@ -1727,13 +1802,14 @@ static inline enum dpio_channel
vlv_dig_port_to_channel(struct intel_digital_port *dig_port)
{
switch (dig_port->base.port) {
+ default:
+ MISSING_CASE(dig_port->base.port);
+ fallthrough;
case PORT_B:
case PORT_D:
return DPIO_CH0;
case PORT_C:
return DPIO_CH1;
- default:
- BUG();
}
}
@@ -1741,13 +1817,14 @@ static inline enum dpio_phy
vlv_dig_port_to_phy(struct intel_digital_port *dig_port)
{
switch (dig_port->base.port) {
+ default:
+ MISSING_CASE(dig_port->base.port);
+ fallthrough;
case PORT_B:
case PORT_C:
return DPIO_PHY0;
case PORT_D:
return DPIO_PHY1;
- default:
- BUG();
}
}
@@ -1755,13 +1832,14 @@ static inline enum dpio_channel
vlv_pipe_to_channel(enum pipe pipe)
{
switch (pipe) {
+ default:
+ MISSING_CASE(pipe);
+ fallthrough;
case PIPE_A:
case PIPE_C:
return DPIO_CH0;
case PIPE_B:
return DPIO_CH1;
- default:
- BUG();
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index a171d42a5c5b..fa9ef591b885 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -52,6 +52,10 @@
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
+#define DG2_DMC_PATH DMC_PATH(dg2, 2, 06)
+#define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 06)
+MODULE_FIRMWARE(DG2_DMC_PATH);
+
#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 16)
#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 16)
MODULE_FIRMWARE(ADLP_DMC_PATH);
@@ -244,9 +248,14 @@ struct stepping_info {
char substepping;
};
+static bool has_dmc_id_fw(struct drm_i915_private *i915, int dmc_id)
+{
+ return i915->dmc.dmc_info[dmc_id].payload;
+}
+
bool intel_dmc_has_payload(struct drm_i915_private *i915)
{
- return i915->dmc.dmc_info[DMC_FW_MAIN].payload;
+ return has_dmc_id_fw(i915, DMC_FW_MAIN);
}
static const struct stepping_info *
@@ -268,6 +277,85 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
}
+static void
+disable_flip_queue_event(struct drm_i915_private *i915,
+ i915_reg_t ctl_reg, i915_reg_t htp_reg)
+{
+ u32 event_ctl;
+ u32 event_htp;
+
+ event_ctl = intel_de_read(i915, ctl_reg);
+ event_htp = intel_de_read(i915, htp_reg);
+ if (event_ctl != (DMC_EVT_CTL_ENABLE |
+ DMC_EVT_CTL_RECURRING |
+ REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVT_CTL_EVENT_ID_CLK_MSEC)) ||
+ !event_htp) {
+ drm_dbg_kms(&i915->drm,
+ "Unexpected DMC event configuration (control %08x htp %08x)\n",
+ event_ctl, event_htp);
+ return;
+ }
+
+ intel_de_write(i915, ctl_reg,
+ REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVT_CTL_EVENT_ID_FALSE));
+ intel_de_write(i915, htp_reg, 0);
+}
+
+static bool
+get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id,
+ i915_reg_t *ctl_reg, i915_reg_t *htp_reg)
+{
+ switch (dmc_id) {
+ case DMC_FW_MAIN:
+ if (DISPLAY_VER(i915) == 12) {
+ *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3);
+ *htp_reg = DMC_EVT_HTP(i915, dmc_id, 3);
+
+ return true;
+ }
+ break;
+ case DMC_FW_PIPEA ... DMC_FW_PIPED:
+ if (IS_DG2(i915)) {
+ *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2);
+ *htp_reg = DMC_EVT_HTP(i915, dmc_id, 2);
+
+ return true;
+ }
+ break;
+ }
+
+ return false;
+}
+
+static void
+disable_all_flip_queue_events(struct drm_i915_private *i915)
+{
+ int dmc_id;
+
+ /* TODO: check if the following applies to all D13+ platforms. */
+ if (!IS_DG2(i915) && !IS_TIGERLAKE(i915))
+ return;
+
+ for (dmc_id = 0; dmc_id < DMC_FW_MAX; dmc_id++) {
+ i915_reg_t ctl_reg;
+ i915_reg_t htp_reg;
+
+ if (!has_dmc_id_fw(i915, dmc_id))
+ continue;
+
+ if (!get_flip_queue_event_regs(i915, dmc_id, &ctl_reg, &htp_reg))
+ continue;
+
+ disable_flip_queue_event(i915, ctl_reg, htp_reg);
+ }
+}
+
/**
* intel_dmc_load_program() - write the firmware from memory to register.
* @dev_priv: i915 drm device.
@@ -308,6 +396,13 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv)
dev_priv->dmc.dc_state = 0;
gen9_set_dc_state_debugmask(dev_priv);
+
+ /*
+ * Flip queue events need to be disabled before enabling DC5/6.
+ * i915 doesn't use the flip queue feature, so disable it already
+ * here.
+ */
+ disable_all_flip_queue_events(dev_priv);
}
void assert_dmc_loaded(struct drm_i915_private *i915)
@@ -732,7 +827,11 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
*/
intel_dmc_runtime_pm_get(dev_priv);
- if (IS_ALDERLAKE_P(dev_priv)) {
+ if (IS_DG2(dev_priv)) {
+ dmc->fw_path = DG2_DMC_PATH;
+ dmc->required_version = DG2_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
+ } else if (IS_ALDERLAKE_P(dev_priv)) {
dmc->fw_path = ADLP_DMC_PATH;
dmc->required_version = ADLP_DMC_VERSION_REQUIRED;
dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index 7853827988d4..238620b55966 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -10,28 +10,69 @@
#define DMC_PROGRAM(addr, i) _MMIO((addr) + (i) * 4)
#define DMC_SSP_BASE_ADDR_GEN9 0x00002FC0
+
+#define _ADLP_PIPEDMC_REG_MMIO_BASE_A 0x5f000
+#define _TGL_PIPEDMC_REG_MMIO_BASE_A 0x92000
+
+#define __PIPEDMC_REG_MMIO_BASE(i915, dmc_id) \
+ ((DISPLAY_VER(i915) >= 13 ? _ADLP_PIPEDMC_REG_MMIO_BASE_A : \
+ _TGL_PIPEDMC_REG_MMIO_BASE_A) + \
+ 0x400 * ((dmc_id) - 1))
+
+#define __DMC_REG_MMIO_BASE 0x8f000
+
+#define _DMC_REG_MMIO_BASE(i915, dmc_id) \
+ ((dmc_id) == DMC_FW_MAIN ? __DMC_REG_MMIO_BASE : \
+ __PIPEDMC_REG_MMIO_BASE(i915, dmc_id))
+
+#define _DMC_REG(i915, dmc_id, reg) \
+ ((reg) - __DMC_REG_MMIO_BASE + _DMC_REG_MMIO_BASE(i915, dmc_id))
+
+#define _DMC_EVT_HTP_0 0x8f004
+
+#define DMC_EVT_HTP(i915, dmc_id, handler) \
+ _MMIO(_DMC_REG(i915, dmc_id, _DMC_EVT_HTP_0) + 4 * (handler))
+
+#define _DMC_EVT_CTL_0 0x8f034
+
+#define DMC_EVT_CTL(i915, dmc_id, handler) \
+ _MMIO(_DMC_REG(i915, dmc_id, _DMC_EVT_CTL_0) + 4 * (handler))
+
+#define DMC_EVT_CTL_ENABLE REG_BIT(31)
+#define DMC_EVT_CTL_RECURRING REG_BIT(30)
+#define DMC_EVT_CTL_TYPE_MASK REG_GENMASK(17, 16)
+#define DMC_EVT_CTL_TYPE_LEVEL_0 0
+#define DMC_EVT_CTL_TYPE_LEVEL_1 1
+#define DMC_EVT_CTL_TYPE_EDGE_1_0 2
+#define DMC_EVT_CTL_TYPE_EDGE_0_1 3
+
+#define DMC_EVT_CTL_EVENT_ID_MASK REG_GENMASK(15, 8)
+#define DMC_EVT_CTL_EVENT_ID_FALSE 0x01
+/* An event handler scheduled to run at a 1 kHz frequency. */
+#define DMC_EVT_CTL_EVENT_ID_CLK_MSEC 0xbf
+
#define DMC_HTP_ADDR_SKL 0x00500034
#define DMC_SSP_BASE _MMIO(0x8F074)
#define DMC_HTP_SKL _MMIO(0x8F004)
#define DMC_LAST_WRITE _MMIO(0x8F034)
#define DMC_LAST_WRITE_VALUE 0xc003b400
#define DMC_MMIO_START_RANGE 0x80000
-#define DMC_MMIO_END_RANGE 0x8FFFF
-#define DMC_V1_MMIO_START_RANGE 0x80000
-#define TGL_MAIN_MMIO_START 0x8F000
-#define TGL_MAIN_MMIO_END 0x8FFFF
-#define _TGL_PIPEA_MMIO_START 0x92000
-#define _TGL_PIPEA_MMIO_END 0x93FFF
-#define _TGL_PIPEB_MMIO_START 0x96000
-#define _TGL_PIPEB_MMIO_END 0x97FFF
-#define ADLP_PIPE_MMIO_START 0x5F000
-#define ADLP_PIPE_MMIO_END 0x5FFFF
+#define DMC_MMIO_END_RANGE 0x8FFFF
+#define DMC_V1_MMIO_START_RANGE 0x80000
+#define TGL_MAIN_MMIO_START 0x8F000
+#define TGL_MAIN_MMIO_END 0x8FFFF
+#define _TGL_PIPEA_MMIO_START 0x92000
+#define _TGL_PIPEA_MMIO_END 0x93FFF
+#define _TGL_PIPEB_MMIO_START 0x96000
+#define _TGL_PIPEB_MMIO_END 0x97FFF
+#define ADLP_PIPE_MMIO_START 0x5F000
+#define ADLP_PIPE_MMIO_END 0x5FFFF
#define TGL_PIPE_MMIO_START(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_START,\
- _TGL_PIPEB_MMIO_START)
+ _TGL_PIPEB_MMIO_START)
#define TGL_PIPE_MMIO_END(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_END,\
- _TGL_PIPEB_MMIO_END)
+ _TGL_PIPEB_MMIO_END)
#define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030)
#define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C)
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index ff67899522cf..32292c0be2bd 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -40,6 +40,7 @@
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
#include "g4x_dp.h"
@@ -434,6 +435,26 @@ static int dg1_max_source_rate(struct intel_dp *intel_dp)
return 810000;
}
+static int vbt_max_link_rate(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int max_rate;
+
+ max_rate = intel_bios_dp_max_link_rate(encoder);
+
+ if (intel_dp_is_edp(intel_dp)) {
+ struct intel_connector *connector = intel_dp->attached_connector;
+ int edp_max_rate = connector->panel.vbt.edp.max_link_rate;
+
+ if (max_rate && edp_max_rate)
+ max_rate = min(max_rate, edp_max_rate);
+ else if (edp_max_rate)
+ max_rate = edp_max_rate;
+ }
+
+ return max_rate;
+}
+
static void
intel_dp_set_source_rates(struct intel_dp *intel_dp)
{
@@ -455,7 +476,6 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
162000, 270000
};
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &dig_port->base;
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
const int *source_rates;
int size, max_rate = 0, vbt_max_rate;
@@ -491,7 +511,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
size = ARRAY_SIZE(g4x_rates);
}
- vbt_max_rate = intel_bios_dp_max_link_rate(encoder);
+ vbt_max_rate = vbt_max_link_rate(intel_dp);
if (max_rate && vbt_max_rate)
max_rate = min(max_rate, vbt_max_rate);
else if (vbt_max_rate)
@@ -684,7 +704,6 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
*/
bits_per_pixel = (link_clock * lane_count * 8) /
intel_dp_mode_to_fec_clock(mode_clock);
- drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel);
/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
@@ -693,9 +712,6 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
if (bigjoiner)
max_bpp_small_joiner_ram *= 2;
- drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n",
- max_bpp_small_joiner_ram);
-
/*
* Greatest allowed DSC BPP = MIN (output BPP from available Link BW
* check, output bpp from small joiner RAM check)
@@ -707,7 +723,6 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
i915->max_cdclk_freq * 48 /
intel_dp_mode_to_fec_clock(mode_clock);
- drm_dbg_kms(&i915->drm, "Max big joiner bpp: %u\n", max_bpp_bigjoiner);
bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner);
}
@@ -1246,11 +1261,12 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp,
if (intel_dp_is_edp(intel_dp)) {
/* Get bpp from vbt only for panels that dont have bpp in edid */
if (intel_connector->base.display_info.bpc == 0 &&
- dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
+ intel_connector->panel.vbt.edp.bpp &&
+ intel_connector->panel.vbt.edp.bpp < bpp) {
drm_dbg_kms(&dev_priv->drm,
"clamping bpp for eDP panel to BIOS-provided %i\n",
- dev_priv->vbt.edp.bpp);
- bpp = dev_priv->vbt.edp.bpp;
+ intel_connector->panel.vbt.edp.bpp);
+ bpp = intel_connector->panel.vbt.edp.bpp;
}
}
@@ -1906,7 +1922,7 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
}
if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915))
- pipe_config->msa_timing_delay = i915->vbt.edp.drrs_msa_timing_delay;
+ pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay;
pipe_config->has_drrs = true;
@@ -2736,6 +2752,33 @@ static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
DRM_MODE_ARG(mode));
}
+void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_connector *connector = intel_dp->attached_connector;
+
+ if (connector->panel.vbt.edp.bpp && pipe_bpp > connector->panel.vbt.edp.bpp) {
+ /*
+ * This is a big fat ugly hack.
+ *
+ * Some machines in UEFI boot mode provide us a VBT that has 18
+ * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+ * unknown we fail to light up. Yet the same BIOS boots up with
+ * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+ * max, not what it tells us to use.
+ *
+ * Note: This will still be broken if the eDP panel is not lit
+ * up by the BIOS, and thus we can't get the mode at module
+ * load.
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+ pipe_bpp, connector->panel.vbt.edp.bpp);
+ connector->panel.vbt.edp.bpp = pipe_bpp;
+ }
+}
+
static void intel_edp_mso_init(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -2850,9 +2893,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
intel_dp_set_sink_rates(intel_dp);
intel_dp_set_max_sink_lane_count(intel_dp);
- intel_dp_set_common_rates(intel_dp);
- intel_dp_reset_max_link_params(intel_dp);
-
/* Read the eDP DSC DPCD registers */
if (DISPLAY_VER(dev_priv) >= 10)
intel_dp_get_dsc_sink_cap(intel_dp);
@@ -4550,7 +4590,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
edid = intel_dp_get_edid(intel_dp);
connector->detect_edid = edid;
- vrr_capable = intel_vrr_is_capable(&connector->base);
+ vrr_capable = intel_vrr_is_capable(connector);
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
connector->base.base.id, connector->base.name, str_yes_no(vrr_capable));
drm_connector_set_vrr_capable_property(&connector->base, vrr_capable);
@@ -5155,6 +5195,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector = &intel_connector->base;
struct drm_display_mode *fixed_mode;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool has_dpcd;
enum pipe pipe = INVALID_PIPE;
struct edid *edid;
@@ -5211,8 +5252,12 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
intel_connector->edid = edid;
+ intel_bios_init_panel(dev_priv, &intel_connector->panel,
+ encoder->devdata, IS_ERR(edid) ? NULL : edid);
+
intel_panel_add_edid_fixed_modes(intel_connector,
- dev_priv->vbt.drrs_type != DRRS_TYPE_NONE);
+ intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE,
+ intel_vrr_is_capable(intel_connector));
/* MSO requires information from the EDID */
intel_edp_mso_init(intel_dp);
@@ -5254,6 +5299,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_edp_add_properties(intel_dp);
+ intel_pps_init_late(intel_dp);
+
return true;
out_vdd_off:
@@ -5334,11 +5381,8 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
type = DRM_MODE_CONNECTOR_DisplayPort;
}
- intel_dp_set_source_rates(intel_dp);
intel_dp_set_default_sink_rates(intel_dp);
intel_dp_set_default_max_sink_lane_count(intel_dp);
- intel_dp_set_common_rates(intel_dp);
- intel_dp_reset_max_link_params(intel_dp);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
@@ -5366,16 +5410,19 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
- /* init MST on ports that can support it */
- intel_dp_mst_encoder_init(dig_port,
- intel_connector->base.base.id);
-
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
intel_dp_aux_fini(intel_dp);
- intel_dp_mst_encoder_cleanup(dig_port);
goto fail;
}
+ intel_dp_set_source_rates(intel_dp);
+ intel_dp_set_common_rates(intel_dp);
+ intel_dp_reset_max_link_params(intel_dp);
+
+ /* init MST on ports that can support it */
+ intel_dp_mst_encoder_init(dig_port,
+ intel_connector->base.base.id);
+
intel_dp_add_properties(intel_dp, connector);
if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index d457e17bdc57..a54902c713a3 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -29,6 +29,7 @@ struct link_config_limits {
int min_bpp, max_bpp;
};
+void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp);
void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct link_config_limits *limits);
@@ -63,6 +64,7 @@ enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
+void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp);
void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index fb6cf30ee628..c92d5bb2326a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -370,7 +370,7 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
int ret;
ret = drm_edp_backlight_init(&intel_dp->aux, &panel->backlight.edp.vesa.info,
- i915->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd,
+ panel->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd,
&current_level, &current_mode);
if (ret < 0)
return ret;
@@ -454,7 +454,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
case INTEL_DP_AUX_BACKLIGHT_OFF:
return -ENODEV;
case INTEL_DP_AUX_BACKLIGHT_AUTO:
- switch (i915->vbt.backlight.type) {
+ switch (panel->vbt.backlight.type) {
case INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE:
try_vesa_interface = true;
break;
@@ -466,7 +466,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
}
break;
case INTEL_DP_AUX_BACKLIGHT_ON:
- if (i915->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)
+ if (panel->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)
try_intel_interface = true;
try_vesa_interface = true;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 6eef0b8a91eb..5262f16b45ac 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -933,7 +933,17 @@ static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- return 0;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_encoder *encoder =
+ intel_get_crtc_new_encoder(state, crtc_state);
+
+ if (DISPLAY_VER(dev_priv) < 11 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ return intel_compute_shared_dplls(state, crtc, encoder);
}
static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
@@ -944,21 +954,12 @@ static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
- int ret;
if (DISPLAY_VER(dev_priv) < 11 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
- ret = intel_reserve_shared_dplls(state, crtc, encoder);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
- return ret;
- }
-
- return 0;
+ return intel_reserve_shared_dplls(state, crtc, encoder);
}
static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
@@ -1125,39 +1126,26 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->clock_set &&
!g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
+ refclk, NULL, &crtc_state->dpll))
return -EINVAL;
- }
ilk_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
- return 0;
+ return intel_compute_shared_dplls(state, crtc, NULL);
}
static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- int ret;
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (!crtc_state->has_pch_encoder)
return 0;
- ret = intel_reserve_shared_dplls(state, crtc, NULL);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
- return ret;
- }
-
- return 0;
+ return intel_reserve_shared_dplls(state, crtc, NULL);
}
void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
@@ -1198,7 +1186,6 @@ void chv_compute_dpll(struct intel_crtc_state *crtc_state)
static int chv_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit = &intel_limits_chv;
@@ -1206,10 +1193,8 @@ static int chv_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->clock_set &&
!chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
+ refclk, NULL, &crtc_state->dpll))
return -EINVAL;
- }
chv_compute_dpll(crtc_state);
@@ -1219,7 +1204,6 @@ static int chv_crtc_compute_clock(struct intel_atomic_state *state,
static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit = &intel_limits_vlv;
@@ -1228,7 +1212,6 @@ static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->clock_set &&
!vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -1270,11 +1253,8 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->clock_set &&
!g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
+ refclk, NULL, &crtc_state->dpll))
return -EINVAL;
- }
i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
@@ -1306,11 +1286,8 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->clock_set &&
!pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
+ refclk, NULL, &crtc_state->dpll))
return -EINVAL;
- }
i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
@@ -1342,11 +1319,8 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->clock_set &&
!i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
+ refclk, NULL, &crtc_state->dpll))
return -EINVAL;
- }
i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
@@ -1380,11 +1354,8 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->clock_set &&
!i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
+ refclk, NULL, &crtc_state->dpll))
return -EINVAL;
- }
i8xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
@@ -1436,6 +1407,7 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ int ret;
drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
@@ -1448,7 +1420,14 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->hw.enable)
return 0;
- return i915->dpll_funcs->crtc_compute_clock(state, crtc);
+ ret = i915->dpll_funcs->crtc_compute_clock(state, crtc);
+ if (ret) {
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
+ crtc->base.base.id, crtc->base.name);
+ return ret;
+ }
+
+ return 0;
}
int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
@@ -1457,6 +1436,7 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ int ret;
drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
@@ -1469,7 +1449,14 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
if (!i915->dpll_funcs->crtc_get_shared_dpll)
return 0;
- return i915->dpll_funcs->crtc_get_shared_dpll(state, crtc);
+ ret = i915->dpll_funcs->crtc_get_shared_dpll(state, crtc);
+ if (ret) {
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
+ crtc->base.base.id, crtc->base.name);
+ return ret;
+ }
+
+ return 0;
}
void
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 88c2f38aa870..118598c9a809 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -90,6 +90,9 @@ struct intel_shared_dpll_funcs {
struct intel_dpll_mgr {
const struct dpll_info *dpll_info;
+ int (*compute_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
int (*get_dplls)(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder);
@@ -514,6 +517,13 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
udelay(200);
}
+static int ibx_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ return 0;
+}
+
static int ibx_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -578,6 +588,7 @@ static const struct dpll_info pch_plls[] = {
static const struct intel_dpll_mgr pch_pll_mgr = {
.dpll_info = pch_plls,
+ .compute_dplls = ibx_compute_dpll,
.get_dplls = ibx_get_dpll,
.put_dplls = intel_put_dpll,
.dump_hw_state = ibx_dump_hw_state,
@@ -894,33 +905,35 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
*r2_out = best.r2;
}
-static struct intel_shared_dpll *
-hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int
+hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_shared_dpll *pll;
- u32 val;
unsigned int p, n2, r2;
hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
- val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
- WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
- WRPLL_DIVIDER_POST(p);
-
- crtc_state->dpll_hw_state.wrpll = val;
+ crtc_state->dpll_hw_state.wrpll =
+ WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
+ WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p);
- pll = intel_find_shared_dpll(state, crtc,
- &crtc_state->dpll_hw_state,
- BIT(DPLL_ID_WRPLL2) |
- BIT(DPLL_ID_WRPLL1));
+ return 0;
+}
- if (!pll)
- return NULL;
+static struct intel_shared_dpll *
+hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
- return pll;
+ return intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_WRPLL2) |
+ BIT(DPLL_ID_WRPLL1));
}
static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
@@ -963,6 +976,24 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
return (refclk * n / 10) / (p * r) * 2;
}
+static int
+hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ int clock = crtc_state->port_clock;
+
+ switch (clock / 2) {
+ case 81000:
+ case 135000:
+ case 270000:
+ return 0;
+ default:
+ drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
+ clock);
+ return -EINVAL;
+ }
+}
+
static struct intel_shared_dpll *
hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
{
@@ -982,8 +1013,7 @@ hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
pll_id = DPLL_ID_LCPLL_2700;
break;
default:
- drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
- clock);
+ MISSING_CASE(clock / 2);
return NULL;
}
@@ -1019,18 +1049,28 @@ static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
return link_clock * 2;
}
-static struct intel_shared_dpll *
-hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int
+hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
- return NULL;
+ return -EINVAL;
+
+ crtc_state->dpll_hw_state.spll =
+ SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
- crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
- SPLL_REF_MUXED_SSC;
+ return 0;
+}
+
+static struct intel_shared_dpll *
+hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
BIT(DPLL_ID_SPLL));
@@ -1060,6 +1100,23 @@ static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
return link_clock * 2;
}
+static int hsw_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return hsw_ddi_wrpll_compute_dpll(state, crtc);
+ else if (intel_crtc_has_dp_encoder(crtc_state))
+ return hsw_ddi_lcpll_compute_dpll(crtc_state);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ return hsw_ddi_spll_compute_dpll(state, crtc);
+ else
+ return -EINVAL;
+}
+
static int hsw_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -1153,6 +1210,7 @@ static const struct dpll_info hsw_plls[] = {
static const struct intel_dpll_mgr hsw_pll_mgr = {
.dpll_info = hsw_plls,
+ .compute_dplls = hsw_compute_dpll,
.get_dplls = hsw_get_dpll,
.put_dplls = intel_put_dpll,
.update_ref_clks = hsw_update_dpll_ref_clks,
@@ -1545,10 +1603,8 @@ skip_remaining_dividers:
break;
}
- if (!ctx.p) {
- DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
+ if (!ctx.p)
return -EINVAL;
- }
/*
* gcc incorrectly analyses that these can be used without being
@@ -1741,23 +1797,28 @@ static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
return link_clock * 2;
}
-static int skl_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int skl_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_shared_dpll *pll;
- int ret;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- ret = skl_ddi_hdmi_pll_dividers(crtc_state);
+ return skl_ddi_hdmi_pll_dividers(crtc_state);
else if (intel_crtc_has_dp_encoder(crtc_state))
- ret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
+ return skl_ddi_dp_set_dpll_hw_state(crtc_state);
else
- ret = -EINVAL;
- if (ret)
- return ret;
+ return -EINVAL;
+}
+
+static int skl_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_shared_dpll *pll;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
pll = intel_find_shared_dpll(state, crtc,
@@ -1834,6 +1895,7 @@ static const struct dpll_info skl_plls[] = {
static const struct intel_dpll_mgr skl_pll_mgr = {
.dpll_info = skl_plls,
+ .compute_dplls = skl_compute_dpll,
.get_dplls = skl_get_dpll,
.put_dplls = intel_put_dpll,
.update_ref_clks = skl_update_dpll_ref_clks,
@@ -2081,19 +2143,14 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
struct dpll *clk_div)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
/* Calculate HDMI div */
/*
* FIXME: tie the following calculation into
* i9xx_crtc_compute_clock
*/
- if (!bxt_find_best_dpll(crtc_state, clk_div)) {
- drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
- crtc_state->port_clock,
- pipe_name(crtc->pipe));
+ if (!bxt_find_best_dpll(crtc_state, clk_div))
return -EINVAL;
- }
drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
@@ -2225,6 +2282,21 @@ static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
}
+static int bxt_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
+ else if (intel_crtc_has_dp_encoder(crtc_state))
+ return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
+ else
+ return -EINVAL;
+}
+
static int bxt_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -2234,16 +2306,6 @@ static int bxt_get_dpll(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id id;
- int ret;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- ret = bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
- else if (intel_crtc_has_dp_encoder(crtc_state))
- ret = bxt_ddi_dp_set_dpll_hw_state(crtc_state);
- else
- ret = -EINVAL;
- if (ret)
- return ret;
/* 1:1 mapping between ports and PLLs */
id = (enum intel_dpll_id) encoder->port;
@@ -2302,6 +2364,7 @@ static const struct dpll_info bxt_plls[] = {
static const struct intel_dpll_mgr bxt_pll_mgr = {
.dpll_info = bxt_plls,
+ .compute_dplls = bxt_compute_dpll,
.get_dplls = bxt_get_dpll,
.put_dplls = intel_put_dpll,
.update_ref_clks = bxt_update_dpll_ref_clks,
@@ -2809,11 +2872,8 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
pll_state, is_dkl);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "Failed to find divisors for clock %d\n", clock);
+ if (ret)
return ret;
- }
m1div = 2;
m2div_int = dco_khz / (refclk_khz * m1div);
@@ -2823,12 +2883,8 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
m2div_int = dco_khz / (refclk_khz * m1div);
}
- if (m2div_int > 255) {
- drm_dbg_kms(&dev_priv->drm,
- "Failed to find mdiv for clock %d\n",
- clock);
+ if (m2div_int > 255)
return -EINVAL;
- }
}
m2div_rem = dco_khz % (refclk_khz * m1div);
@@ -3119,18 +3175,15 @@ static u32 intel_get_hti_plls(struct drm_i915_private *i915)
return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
}
-static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct skl_wrpll_params pll_params = { };
struct icl_port_dpll *port_dpll =
&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum port port = encoder->port;
- unsigned long dpll_mask;
+ struct skl_wrpll_params pll_params = {};
int ret;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
@@ -3139,14 +3192,26 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
else
ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "Could not calculate combo PHY PLL state.\n");
+ if (ret)
return ret;
- }
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
+ return 0;
+}
+
+static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct icl_port_dpll *port_dpll =
+ &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ enum port port = encoder->port;
+ unsigned long dpll_mask;
+
if (IS_ALDERLAKE_S(dev_priv)) {
dpll_mask =
BIT(DPLL_ID_DG1_DPLL3) |
@@ -3183,12 +3248,8 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
dpll_mask);
- if (!port_dpll->pll) {
- drm_dbg_kms(&dev_priv->drm,
- "No combo PHY PLL found for [ENCODER:%d:%s]\n",
- encoder->base.base.id, encoder->base.name);
+ if (!port_dpll->pll)
return -EINVAL;
- }
intel_reference_shared_dpll(state, crtc,
port_dpll->pll, &port_dpll->hw_state);
@@ -3198,47 +3259,55 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
return 0;
}
-static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct skl_wrpll_params pll_params = { };
- struct icl_port_dpll *port_dpll;
- enum intel_dpll_id dpll_id;
+ struct icl_port_dpll *port_dpll =
+ &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ struct skl_wrpll_params pll_params = {};
int ret;
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
ret = icl_calc_tbt_pll(crtc_state, &pll_params);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "Could not calculate TBT PLL state.\n");
+ if (ret)
return ret;
- }
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
+ port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
+ ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct icl_port_dpll *port_dpll =
+ &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ enum intel_dpll_id dpll_id;
+ int ret;
+
+ port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
BIT(DPLL_ID_ICL_TBTPLL));
- if (!port_dpll->pll) {
- drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
+ if (!port_dpll->pll)
return -EINVAL;
- }
intel_reference_shared_dpll(state, crtc,
port_dpll->pll, &port_dpll->hw_state);
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
- ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "Could not calculate MG PHY PLL state.\n");
- goto err_unreference_tbt_pll;
- }
-
dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
encoder->port));
port_dpll->pll = intel_find_shared_dpll(state, crtc,
@@ -3246,7 +3315,6 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
BIT(dpll_id));
if (!port_dpll->pll) {
ret = -EINVAL;
- drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
goto err_unreference_tbt_pll;
}
intel_reference_shared_dpll(state, crtc,
@@ -3263,6 +3331,23 @@ err_unreference_tbt_pll:
return ret;
}
+static int icl_compute_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+ if (intel_phy_is_combo(dev_priv, phy))
+ return icl_compute_combo_phy_dpll(state, crtc);
+ else if (intel_phy_is_tc(dev_priv, phy))
+ return icl_compute_tc_phy_dplls(state, crtc);
+
+ MISSING_CASE(phy);
+
+ return 0;
+}
+
static int icl_get_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -3943,6 +4028,7 @@ static const struct dpll_info icl_plls[] = {
static const struct intel_dpll_mgr icl_pll_mgr = {
.dpll_info = icl_plls,
+ .compute_dplls = icl_compute_dplls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_active_dpll = icl_update_active_dpll,
@@ -3959,6 +4045,7 @@ static const struct dpll_info ehl_plls[] = {
static const struct intel_dpll_mgr ehl_pll_mgr = {
.dpll_info = ehl_plls,
+ .compute_dplls = icl_compute_dplls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_ref_clks = icl_update_dpll_ref_clks,
@@ -3987,6 +4074,7 @@ static const struct dpll_info tgl_plls[] = {
static const struct intel_dpll_mgr tgl_pll_mgr = {
.dpll_info = tgl_plls,
+ .compute_dplls = icl_compute_dplls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_active_dpll = icl_update_active_dpll,
@@ -4003,6 +4091,7 @@ static const struct dpll_info rkl_plls[] = {
static const struct intel_dpll_mgr rkl_pll_mgr = {
.dpll_info = rkl_plls,
+ .compute_dplls = icl_compute_dplls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_ref_clks = icl_update_dpll_ref_clks,
@@ -4019,6 +4108,7 @@ static const struct dpll_info dg1_plls[] = {
static const struct intel_dpll_mgr dg1_pll_mgr = {
.dpll_info = dg1_plls,
+ .compute_dplls = icl_compute_dplls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_ref_clks = icl_update_dpll_ref_clks,
@@ -4035,6 +4125,7 @@ static const struct dpll_info adls_plls[] = {
static const struct intel_dpll_mgr adls_pll_mgr = {
.dpll_info = adls_plls,
+ .compute_dplls = icl_compute_dplls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_ref_clks = icl_update_dpll_ref_clks,
@@ -4054,6 +4145,7 @@ static const struct dpll_info adlp_plls[] = {
static const struct intel_dpll_mgr adlp_pll_mgr = {
.dpll_info = adlp_plls,
+ .compute_dplls = icl_compute_dplls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_active_dpll = icl_update_active_dpll,
@@ -4119,6 +4211,33 @@ void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
}
/**
+ * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
+ * @state: atomic state
+ * @crtc: CRTC to compute DPLLs for
+ * @encoder: encoder
+ *
+ * This function computes the DPLL state for the given CRTC and encoder.
+ *
+ * The new configuration in the atomic commit @state is made effective by
+ * calling intel_shared_dpll_swap_state().
+ *
+ * Returns:
+ * 0 on success, negative error code on falure.
+ */
+int intel_compute_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
+
+ if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
+ return -EINVAL;
+
+ return dpll_mgr->compute_dplls(state, crtc, encoder);
+}
+
+/**
* intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
* @state: atomic state
* @crtc: CRTC to reserve DPLLs for
@@ -4330,3 +4449,91 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
hw_state->fp1);
}
}
+
+static void
+verify_single_dpll_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_dpll_hw_state dpll_hw_state;
+ u8 pipe_mask;
+ bool active;
+
+ memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
+
+ drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
+
+ active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
+
+ if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
+ I915_STATE_WARN(!pll->on && pll->active_mask,
+ "pll in active use but not on in sw tracking\n");
+ I915_STATE_WARN(pll->on && !pll->active_mask,
+ "pll is on but not used by any active pipe\n");
+ I915_STATE_WARN(pll->on != active,
+ "pll on state mismatch (expected %i, found %i)\n",
+ pll->on, active);
+ }
+
+ if (!crtc) {
+ I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
+ "more active pll users than references: 0x%x vs 0x%x\n",
+ pll->active_mask, pll->state.pipe_mask);
+
+ return;
+ }
+
+ pipe_mask = BIT(crtc->pipe);
+
+ if (new_crtc_state->hw.active)
+ I915_STATE_WARN(!(pll->active_mask & pipe_mask),
+ "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
+ pipe_name(crtc->pipe), pll->active_mask);
+ else
+ I915_STATE_WARN(pll->active_mask & pipe_mask,
+ "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
+ pipe_name(crtc->pipe), pll->active_mask);
+
+ I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
+ "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
+ pipe_mask, pll->state.pipe_mask);
+
+ I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
+ &dpll_hw_state,
+ sizeof(dpll_hw_state)),
+ "pll hw state mismatch\n");
+}
+
+void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (new_crtc_state->shared_dpll)
+ verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll,
+ crtc, new_crtc_state);
+
+ if (old_crtc_state->shared_dpll &&
+ old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
+ u8 pipe_mask = BIT(crtc->pipe);
+ struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
+
+ I915_STATE_WARN(pll->active_mask & pipe_mask,
+ "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
+ pipe_name(crtc->pipe), pll->active_mask);
+ I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
+ "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
+ pipe_name(crtc->pipe), pll->state.pipe_mask);
+ }
+}
+
+void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < i915->dpll.num_shared_dpll; i++)
+ verify_single_dpll_state(i915, &i915->dpll.shared_dplls[i],
+ NULL, NULL);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index f7c96a1f13c8..3247dc300ae4 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -336,6 +336,9 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+int intel_compute_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
int intel_reserve_shared_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder);
@@ -365,4 +368,9 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
+void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state);
+void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915);
+
#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index fb0e7e79e0cd..ac587647e1f5 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -4,6 +4,7 @@
*/
#include "gem/i915_gem_domain.h"
+#include "gem/i915_gem_internal.h"
#include "gt/gen8_ppgtt.h"
#include "i915_drv.h"
@@ -127,8 +128,12 @@ struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
struct i915_vma *vma;
void __iomem *iomem;
struct i915_gem_ww_ctx ww;
+ u64 pin_flags = 0;
int err;
+ if (i915_gem_object_is_stolen(dpt->obj))
+ pin_flags |= PIN_MAPPABLE;
+
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
atomic_inc(&i915->gpu_error.pending_fb_pin);
@@ -138,7 +143,7 @@ struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
continue;
vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0, 4096,
- HAS_LMEM(i915) ? 0 : PIN_MAPPABLE);
+ pin_flags);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
continue;
@@ -248,10 +253,13 @@ intel_dpt_create(struct intel_framebuffer *fb)
size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
- if (HAS_LMEM(i915))
- dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS);
- else
+ dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(dpt_obj) && i915_ggtt_has_aperture(to_gt(i915)->ggtt))
dpt_obj = i915_gem_object_create_stolen(i915, size);
+ if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
+ drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
+ dpt_obj = i915_gem_object_create_internal(i915, size);
+ }
if (IS_ERR(dpt_obj))
return ERR_CAST(dpt_obj);
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 166caf293f7b..7da4a9cbe4ba 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -217,9 +217,6 @@ static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv,
{
struct intel_crtc *crtc;
- if (dev_priv->vbt.drrs_type != DRRS_TYPE_SEAMLESS)
- return;
-
for_each_intel_crtc(&dev_priv->drm, crtc) {
unsigned int frontbuffer_bits;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index 389a8c24cdc1..35e121cd226c 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -102,7 +102,7 @@ intel_dsi_get_panel_orientation(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum drm_panel_orientation orientation;
- orientation = dev_priv->vbt.dsi.orientation;
+ orientation = connector->panel.vbt.dsi.orientation;
if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return orientation;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 7d234429e71e..1bc7118c56a2 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -160,12 +160,10 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
static int dcs_setup_backlight(struct intel_connector *connector,
enum pipe unused)
{
- struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_panel *panel = &connector->panel;
- if (dev_priv->vbt.backlight.brightness_precision_bits > 8)
- panel->backlight.max = (1 << dev_priv->vbt.backlight.brightness_precision_bits) - 1;
+ if (panel->vbt.backlight.brightness_precision_bits > 8)
+ panel->backlight.max = (1 << panel->vbt.backlight.brightness_precision_bits) - 1;
else
panel->backlight.max = PANEL_PWM_MAX_VALUE;
@@ -185,11 +183,10 @@ static const struct intel_panel_bl_funcs dcs_bl_funcs = {
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
struct intel_panel *panel = &intel_connector->panel;
- if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
+ if (panel->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
return -ENODEV;
if (drm_WARN_ON(dev, encoder->type != INTEL_OUTPUT_DSI))
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index dd24aef925f2..75e8cc4337c9 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -240,9 +240,10 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
-static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
+static void vlv_exec_gpio(struct intel_connector *connector,
u8 gpio_source, u8 gpio_index, bool value)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct gpio_map *map;
u16 pconf0, padval;
u32 tmp;
@@ -256,7 +257,7 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
map = &vlv_gpio_table[gpio_index];
- if (dev_priv->vbt.dsi.seq_version >= 3) {
+ if (connector->panel.vbt.dsi.seq_version >= 3) {
/* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
port = IOSF_PORT_GPIO_NC;
} else {
@@ -287,14 +288,15 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
}
-static void chv_exec_gpio(struct drm_i915_private *dev_priv,
+static void chv_exec_gpio(struct intel_connector *connector,
u8 gpio_source, u8 gpio_index, bool value)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u16 cfg0, cfg1;
u16 family_num;
u8 port;
- if (dev_priv->vbt.dsi.seq_version >= 3) {
+ if (connector->panel.vbt.dsi.seq_version >= 3) {
if (gpio_index >= CHV_GPIO_IDX_START_SE) {
/* XXX: it's unclear whether 255->57 is part of SE. */
gpio_index -= CHV_GPIO_IDX_START_SE;
@@ -340,9 +342,10 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
}
-static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
+static void bxt_exec_gpio(struct intel_connector *connector,
u8 gpio_source, u8 gpio_index, bool value)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
/* XXX: this table is a quick ugly hack. */
static struct gpio_desc *bxt_gpio_table[U8_MAX + 1];
struct gpio_desc *gpio_desc = bxt_gpio_table[gpio_index];
@@ -366,9 +369,11 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
gpiod_set_value(gpio_desc, value);
}
-static void icl_exec_gpio(struct drm_i915_private *dev_priv,
+static void icl_exec_gpio(struct intel_connector *connector,
u8 gpio_source, u8 gpio_index, bool value)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
}
@@ -376,18 +381,19 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_connector *connector = intel_dsi->attached_connector;
u8 gpio_source, gpio_index = 0, gpio_number;
bool value;
drm_dbg_kms(&dev_priv->drm, "\n");
- if (dev_priv->vbt.dsi.seq_version >= 3)
+ if (connector->panel.vbt.dsi.seq_version >= 3)
gpio_index = *data++;
gpio_number = *data++;
/* gpio source in sequence v2 only */
- if (dev_priv->vbt.dsi.seq_version == 2)
+ if (connector->panel.vbt.dsi.seq_version == 2)
gpio_source = (*data >> 1) & 3;
else
gpio_source = 0;
@@ -396,13 +402,13 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
value = *data++ & 1;
if (DISPLAY_VER(dev_priv) >= 11)
- icl_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+ icl_exec_gpio(connector, gpio_source, gpio_index, value);
else if (IS_VALLEYVIEW(dev_priv))
- vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
+ vlv_exec_gpio(connector, gpio_source, gpio_number, value);
else if (IS_CHERRYVIEW(dev_priv))
- chv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
+ chv_exec_gpio(connector, gpio_source, gpio_number, value);
else
- bxt_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+ bxt_exec_gpio(connector, gpio_source, gpio_index, value);
return data;
}
@@ -585,14 +591,15 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id)
{
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct intel_connector *connector = intel_dsi->attached_connector;
const u8 *data;
fn_mipi_elem_exec mipi_elem_exec;
if (drm_WARN_ON(&dev_priv->drm,
- seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
+ seq_id >= ARRAY_SIZE(connector->panel.vbt.dsi.sequence)))
return;
- data = dev_priv->vbt.dsi.sequence[seq_id];
+ data = connector->panel.vbt.dsi.sequence[seq_id];
if (!data)
return;
@@ -605,7 +612,7 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
data++;
/* Skip Size of Sequence. */
- if (dev_priv->vbt.dsi.seq_version >= 3)
+ if (connector->panel.vbt.dsi.seq_version >= 3)
data += 4;
while (1) {
@@ -621,7 +628,7 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
mipi_elem_exec = NULL;
/* Size of Operation. */
- if (dev_priv->vbt.dsi.seq_version >= 3)
+ if (connector->panel.vbt.dsi.seq_version >= 3)
operation_size = *data++;
if (mipi_elem_exec) {
@@ -669,10 +676,10 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct intel_connector *connector = intel_dsi->attached_connector;
/* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
- if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
+ if (is_vid_mode(intel_dsi) && connector->panel.vbt.dsi.seq_version >= 3)
return;
msleep(msec);
@@ -734,9 +741,10 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
- struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
- struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
+ struct intel_connector *connector = intel_dsi->attached_connector;
+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
+ struct mipi_pps_data *pps = connector->panel.vbt.dsi.pps;
+ struct drm_display_mode *mode = connector->panel.vbt.lfp_lvds_vbt_mode;
u16 burst_mode_ratio;
enum port port;
@@ -872,7 +880,8 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ struct intel_connector *connector = intel_dsi->attached_connector;
+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
bool want_backlight_gpio = false;
bool want_panel_gpio = false;
@@ -927,7 +936,8 @@ void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ struct intel_connector *connector = intel_dsi->attached_connector;
+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
if (intel_dsi->gpio_panel) {
gpiod_put(intel_dsi->gpio_panel);
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 9f5a6b79e95b..b191915ab351 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -3,6 +3,7 @@
* Copyright © 2021 Intel Corporation
*/
+#include <drm/drm_blend.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_modeset_helper.h>
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index bbdc34a23d54..16537830ccf0 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -40,6 +40,7 @@
#include <linux/string_helpers.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include "i915_drv.h"
@@ -813,8 +814,8 @@ static void intel_fbc_program_cfb(struct intel_fbc *fbc)
static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
{
- /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,dg2,adlp */
- if (DISPLAY_VER(fbc->i915) >= 11)
+ /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp */
+ if (DISPLAY_VER(fbc->i915) >= 11 && !IS_DG2(fbc->i915))
intel_de_rmw(fbc->i915, ILK_DPFC_CHICKEN(fbc->id), 0,
DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 44ac0cee8b77..8ea66a2e1b09 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -298,7 +298,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
* Mailbox interface.
*/
if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
- ret = snb_pcode_write(dev_priv, SKL_PCODE_LOAD_HDCP_KEYS, 1);
+ ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
if (ret) {
drm_err(&dev_priv->drm,
"Failed to initiate HDCP key load (%d)\n",
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 1ae09431f53a..ebd91aa69dd2 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -2852,7 +2852,7 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
ddc_pin = rkl_port_to_ddc_pin(dev_priv, port);
else if (DISPLAY_VER(dev_priv) == 9 && HAS_PCH_TGP(dev_priv))
ddc_pin = gen9bc_tgp_port_to_ddc_pin(dev_priv, port);
- else if (HAS_PCH_MCC(dev_priv))
+ else if (IS_JSL_EHL(dev_priv) && HAS_PCH_TGP(dev_priv))
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 8204126d17f9..5f8b4f481cff 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -668,7 +668,8 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
*/
void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
{
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(dev_priv) ||
+ !INTEL_DISPLAY_ENABLED(dev_priv))
return;
WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 7fbc8031a5aa..15d59de8810e 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -26,6 +26,7 @@
#include <drm/display/drm_dp_dual_mode_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
#include "intel_de.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index e8478161f8b9..730480ac3300 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -809,7 +809,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
else
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
if (val == 0)
- val = dev_priv->vbt.bios_lvds_val;
+ val = connector->panel.vbt.bios_lvds_val;
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
}
@@ -967,9 +967,13 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
}
intel_connector->edid = edid;
+ intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL,
+ IS_ERR(edid) ? NULL : edid);
+
/* Try EDID first */
intel_panel_add_edid_fixed_modes(intel_connector,
- dev_priv->vbt.drrs_type != DRRS_TYPE_NONE);
+ intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE,
+ false);
/* Failed to get EDID, what about VBT? */
if (!intel_panel_preferred_fixed_mode(intel_connector))
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
new file mode 100644
index 000000000000..f0e04d3904c6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -0,0 +1,734 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ *
+ * Read out the current hardware modeset state, and sanitize it to the current
+ * state.
+ */
+
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_atomic_state_helper.h>
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_bw.h"
+#include "intel_color.h"
+#include "intel_crtc.h"
+#include "intel_crtc_state_dump.h"
+#include "intel_ddi.h"
+#include "intel_de.h"
+#include "intel_display.h"
+#include "intel_display_power.h"
+#include "intel_display_types.h"
+#include "intel_modeset_setup.h"
+#include "intel_pch_display.h"
+#include "intel_pm.h"
+
+static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct intel_encoder *encoder;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_bw_state *bw_state =
+ to_intel_bw_state(i915->bw_obj.state);
+ struct intel_cdclk_state *cdclk_state =
+ to_intel_cdclk_state(i915->cdclk.obj.state);
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->dbuf.obj.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane;
+ struct drm_atomic_state *state;
+ struct intel_crtc_state *temp_crtc_state;
+ enum pipe pipe = crtc->pipe;
+ int ret;
+
+ if (!crtc_state->hw.active)
+ return;
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ if (plane_state->uapi.visible)
+ intel_plane_disable_noatomic(crtc, plane);
+ }
+
+ state = drm_atomic_state_alloc(&i915->drm);
+ if (!state) {
+ drm_dbg_kms(&i915->drm,
+ "failed to disable [CRTC:%d:%s], out of memory",
+ crtc->base.base.id, crtc->base.name);
+ return;
+ }
+
+ state->acquire_ctx = ctx;
+
+ /* Everything's already locked, -EDEADLK can't happen. */
+ temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
+ ret = drm_atomic_add_affected_connectors(state, &crtc->base);
+
+ drm_WARN_ON(&i915->drm, IS_ERR(temp_crtc_state) || ret);
+
+ i915->display->crtc_disable(to_intel_atomic_state(state), crtc);
+
+ drm_atomic_state_put(state);
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
+ crtc->base.base.id, crtc->base.name);
+
+ crtc->active = false;
+ crtc->base.enabled = false;
+
+ drm_WARN_ON(&i915->drm,
+ drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
+ crtc_state->uapi.active = false;
+ crtc_state->uapi.connector_mask = 0;
+ crtc_state->uapi.encoder_mask = 0;
+ intel_crtc_free_hw_state(crtc_state);
+ memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
+
+ for_each_encoder_on_crtc(&i915->drm, &crtc->base, encoder)
+ encoder->base.crtc = NULL;
+
+ intel_fbc_disable(crtc);
+ intel_update_watermarks(i915);
+ intel_disable_shared_dpll(crtc_state);
+
+ intel_display_power_put_all_in_set(i915, &crtc->enabled_power_domains);
+
+ cdclk_state->min_cdclk[pipe] = 0;
+ cdclk_state->min_voltage_level[pipe] = 0;
+ cdclk_state->active_pipes &= ~BIT(pipe);
+
+ dbuf_state->active_pipes &= ~BIT(pipe);
+
+ bw_state->data_rate[pipe] = 0;
+ bw_state->num_active_planes[pipe] = 0;
+}
+
+static void intel_modeset_update_connector_atomic_state(struct drm_i915_private *i915)
+{
+ struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ struct drm_connector_state *conn_state = connector->base.state;
+ struct intel_encoder *encoder =
+ to_intel_encoder(connector->base.encoder);
+
+ if (conn_state->crtc)
+ drm_connector_put(&connector->base);
+
+ if (encoder) {
+ struct intel_crtc *crtc =
+ to_intel_crtc(encoder->base.crtc);
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ conn_state->best_encoder = &encoder->base;
+ conn_state->crtc = &crtc->base;
+ conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
+
+ drm_connector_get(&connector->base);
+ } else {
+ conn_state->best_encoder = NULL;
+ conn_state->crtc = NULL;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
+static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
+{
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
+ return;
+
+ crtc_state->uapi.enable = crtc_state->hw.enable;
+ crtc_state->uapi.active = crtc_state->hw.active;
+ drm_WARN_ON(crtc_state->uapi.crtc->dev,
+ drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
+
+ crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
+ crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
+
+ drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
+ crtc_state->hw.degamma_lut);
+ drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
+ crtc_state->hw.gamma_lut);
+ drm_property_replace_blob(&crtc_state->uapi.ctm,
+ crtc_state->hw.ctm);
+}
+
+static void
+intel_sanitize_plane_mapping(struct drm_i915_private *i915)
+{
+ struct intel_crtc *crtc;
+
+ if (DISPLAY_VER(i915) >= 4)
+ return;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ struct intel_crtc *plane_crtc;
+ enum pipe pipe;
+
+ if (!plane->get_hw_state(plane, &pipe))
+ continue;
+
+ if (pipe == crtc->pipe)
+ continue;
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
+ plane->base.base.id, plane->base.name);
+
+ plane_crtc = intel_crtc_for_pipe(i915, pipe);
+ intel_plane_disable_noatomic(plane_crtc, plane);
+ }
+}
+
+static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_encoder *encoder;
+
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder)
+ return true;
+
+ return false;
+}
+
+static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_connector *connector;
+
+ for_each_connector_on_encoder(dev, &encoder->base, connector)
+ return connector;
+
+ return NULL;
+}
+
+static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (!crtc_state->hw.active && !HAS_GMCH(i915))
+ return;
+
+ /*
+ * We start out with underrun reporting disabled to avoid races.
+ * For correct bookkeeping mark this on active crtcs.
+ *
+ * Also on gmch platforms we dont have any hardware bits to
+ * disable the underrun reporting. Which means we need to start
+ * out with underrun reporting disabled also on inactive pipes,
+ * since otherwise we'll complain about the garbage we read when
+ * e.g. coming up after runtime pm.
+ *
+ * No protection against concurrent access is required - at
+ * worst a fifo underrun happens which also sets this to false.
+ */
+ crtc->cpu_fifo_underrun_disabled = true;
+
+ /*
+ * We track the PCH trancoder underrun reporting state
+ * within the crtc. With crtc for pipe A housing the underrun
+ * reporting state for PCH transcoder A, crtc for pipe B housing
+ * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
+ * and marking underrun reporting as disabled for the non-existing
+ * PCH transcoders B and C would prevent enabling the south
+ * error interrupt (see cpt_can_enable_serr_int()).
+ */
+ if (intel_has_pch_trancoder(i915, crtc->pipe))
+ crtc->pch_fifo_underrun_disabled = true;
+}
+
+static void intel_sanitize_crtc(struct intel_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ if (crtc_state->hw.active) {
+ struct intel_plane *plane;
+
+ /* Disable everything but the primary plane */
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ if (plane_state->uapi.visible &&
+ plane->base.type != DRM_PLANE_TYPE_PRIMARY)
+ intel_plane_disable_noatomic(crtc, plane);
+ }
+
+ /* Disable any background color/etc. set by the BIOS */
+ intel_color_commit_noarm(crtc_state);
+ intel_color_commit_arm(crtc_state);
+ }
+
+ /*
+ * Adjust the state of the output pipe according to whether we have
+ * active connectors/encoders.
+ */
+ if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
+ !intel_crtc_is_bigjoiner_slave(crtc_state))
+ intel_crtc_disable_noatomic(crtc, ctx);
+}
+
+static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ /*
+ * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
+ * the hardware when a high res displays plugged in. DPLL P
+ * divider is zero, and the pipe timings are bonkers. We'll
+ * try to disable everything in that case.
+ *
+ * FIXME would be nice to be able to sanitize this state
+ * without several WARNs, but for now let's take the easy
+ * road.
+ */
+ return IS_SANDYBRIDGE(i915) &&
+ crtc_state->hw.active &&
+ crtc_state->shared_dpll &&
+ crtc_state->port_clock == 0;
+}
+
+static void intel_sanitize_encoder(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_connector *connector;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc_state *crtc_state = crtc ?
+ to_intel_crtc_state(crtc->base.state) : NULL;
+
+ /*
+ * We need to check both for a crtc link (meaning that the encoder is
+ * active and trying to read from a pipe) and the pipe itself being
+ * active.
+ */
+ bool has_active_crtc = crtc_state &&
+ crtc_state->hw.active;
+
+ if (crtc_state && has_bogus_dpll_config(crtc_state)) {
+ drm_dbg_kms(&i915->drm,
+ "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
+ pipe_name(crtc->pipe));
+ has_active_crtc = false;
+ }
+
+ connector = intel_encoder_find_connector(encoder);
+ if (connector && !has_active_crtc) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
+ encoder->base.base.id,
+ encoder->base.name);
+
+ /*
+ * Connector is active, but has no active pipe. This is fallout
+ * from our resume register restoring. Disable the encoder
+ * manually again.
+ */
+ if (crtc_state) {
+ struct drm_encoder *best_encoder;
+
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] manually disabled\n",
+ encoder->base.base.id,
+ encoder->base.name);
+
+ /* avoid oopsing in case the hooks consult best_encoder */
+ best_encoder = connector->base.state->best_encoder;
+ connector->base.state->best_encoder = &encoder->base;
+
+ /* FIXME NULL atomic state passed! */
+ if (encoder->disable)
+ encoder->disable(NULL, encoder, crtc_state,
+ connector->base.state);
+ if (encoder->post_disable)
+ encoder->post_disable(NULL, encoder, crtc_state,
+ connector->base.state);
+
+ connector->base.state->best_encoder = best_encoder;
+ }
+ encoder->base.crtc = NULL;
+
+ /*
+ * Inconsistent output/port/pipe state happens presumably due to
+ * a bug in one of the get_hw_state functions. Or someplace else
+ * in our code, like the register restore mess on resume. Clamp
+ * things to off as a safer default.
+ */
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ }
+
+ /* notify opregion of the sanitized encoder state */
+ intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
+
+ if (HAS_DDI(i915))
+ intel_ddi_sanitize_encoder_pll_mapping(encoder);
+}
+
+/* FIXME read out full plane state for all planes */
+static void readout_plane_state(struct drm_i915_private *i915)
+{
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+
+ for_each_intel_plane(&i915->drm, plane) {
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_crtc_state *crtc_state;
+ enum pipe pipe = PIPE_A;
+ bool visible;
+
+ visible = plane->get_hw_state(plane, &pipe);
+
+ crtc = intel_crtc_for_pipe(i915, pipe);
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ intel_set_plane_visible(crtc_state, plane_state, visible);
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
+ plane->base.base.id, plane->base.name,
+ str_enabled_disabled(visible), pipe_name(pipe));
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ intel_plane_fixup_bitmasks(crtc_state);
+ }
+}
+
+static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
+{
+ struct intel_cdclk_state *cdclk_state =
+ to_intel_cdclk_state(i915->cdclk.obj.state);
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->dbuf.obj.state);
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ u8 active_pipes = 0;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
+ intel_crtc_free_hw_state(crtc_state);
+ intel_crtc_state_reset(crtc_state, crtc);
+
+ intel_crtc_get_pipe_config(crtc_state);
+
+ crtc_state->hw.enable = crtc_state->hw.active;
+
+ crtc->base.enabled = crtc_state->hw.enable;
+ crtc->active = crtc_state->hw.active;
+
+ if (crtc_state->hw.active)
+ active_pipes |= BIT(crtc->pipe);
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] hw state readout: %s\n",
+ crtc->base.base.id, crtc->base.name,
+ str_enabled_disabled(crtc_state->hw.active));
+ }
+
+ cdclk_state->active_pipes = active_pipes;
+ dbuf_state->active_pipes = active_pipes;
+
+ readout_plane_state(i915);
+
+ for_each_intel_encoder(&i915->drm, encoder) {
+ struct intel_crtc_state *crtc_state = NULL;
+
+ pipe = 0;
+
+ if (encoder->get_hw_state(encoder, &pipe)) {
+ crtc = intel_crtc_for_pipe(i915, pipe);
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ encoder->base.crtc = &crtc->base;
+ intel_encoder_get_config(encoder, crtc_state);
+
+ /* read out to slave crtc as well for bigjoiner */
+ if (crtc_state->bigjoiner_pipes) {
+ struct intel_crtc *slave_crtc;
+
+ /* encoder should read be linked to bigjoiner master */
+ WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
+ intel_crtc_bigjoiner_slave_pipes(crtc_state)) {
+ struct intel_crtc_state *slave_crtc_state;
+
+ slave_crtc_state = to_intel_crtc_state(slave_crtc->base.state);
+ intel_encoder_get_config(encoder, slave_crtc_state);
+ }
+ }
+ } else {
+ encoder->base.crtc = NULL;
+ }
+
+ if (encoder->sync_state)
+ encoder->sync_state(encoder, crtc_state);
+
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
+ encoder->base.base.id, encoder->base.name,
+ str_enabled_disabled(encoder->base.crtc),
+ pipe_name(pipe));
+ }
+
+ intel_dpll_readout_hw_state(i915);
+
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ if (connector->get_hw_state(connector)) {
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ connector->base.dpms = DRM_MODE_DPMS_ON;
+
+ encoder = intel_attached_encoder(connector);
+ connector->base.encoder = &encoder->base;
+
+ crtc = to_intel_crtc(encoder->base.crtc);
+ crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
+
+ if (crtc_state && crtc_state->hw.active) {
+ /*
+ * This has to be done during hardware readout
+ * because anything calling .crtc_disable may
+ * rely on the connector_mask being accurate.
+ */
+ crtc_state->uapi.connector_mask |=
+ drm_connector_mask(&connector->base);
+ crtc_state->uapi.encoder_mask |=
+ drm_encoder_mask(&encoder->base);
+ }
+ } else {
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ }
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] hw state readout: %s\n",
+ connector->base.base.id, connector->base.name,
+ str_enabled_disabled(connector->base.encoder));
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_bw_state *bw_state =
+ to_intel_bw_state(i915->bw_obj.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane;
+ int min_cdclk = 0;
+
+ if (crtc_state->hw.active) {
+ /*
+ * The initial mode needs to be set in order to keep
+ * the atomic core happy. It wants a valid mode if the
+ * crtc's enabled, so we do the above call.
+ *
+ * But we don't set all the derived state fully, hence
+ * set a flag to indicate that a full recalculation is
+ * needed on the next commit.
+ */
+ crtc_state->inherited = true;
+
+ intel_crtc_update_active_timings(crtc_state);
+
+ intel_crtc_copy_hw_to_uapi_state(crtc_state);
+ }
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ /*
+ * FIXME don't have the fb yet, so can't
+ * use intel_plane_data_rate() :(
+ */
+ if (plane_state->uapi.visible)
+ crtc_state->data_rate[plane->id] =
+ 4 * crtc_state->pixel_rate;
+ /*
+ * FIXME don't have the fb yet, so can't
+ * use plane->min_cdclk() :(
+ */
+ if (plane_state->uapi.visible && plane->min_cdclk) {
+ if (crtc_state->double_wide || DISPLAY_VER(i915) >= 10)
+ crtc_state->min_cdclk[plane->id] =
+ DIV_ROUND_UP(crtc_state->pixel_rate, 2);
+ else
+ crtc_state->min_cdclk[plane->id] =
+ crtc_state->pixel_rate;
+ }
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] min_cdclk %d kHz\n",
+ plane->base.base.id, plane->base.name,
+ crtc_state->min_cdclk[plane->id]);
+ }
+
+ if (crtc_state->hw.active) {
+ min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
+ if (drm_WARN_ON(&i915->drm, min_cdclk < 0))
+ min_cdclk = 0;
+ }
+
+ cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
+ cdclk_state->min_voltage_level[crtc->pipe] =
+ crtc_state->min_voltage_level;
+
+ intel_bw_crtc_update(bw_state, crtc_state);
+ }
+}
+
+static void
+get_encoder_power_domains(struct drm_i915_private *i915)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(&i915->drm, encoder) {
+ struct intel_crtc_state *crtc_state;
+
+ if (!encoder->get_power_domains)
+ continue;
+
+ /*
+ * MST-primary and inactive encoders don't have a crtc state
+ * and neither of these require any power domain references.
+ */
+ if (!encoder->base.crtc)
+ continue;
+
+ crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
+ encoder->get_power_domains(encoder, crtc_state);
+ }
+}
+
+static void intel_early_display_was(struct drm_i915_private *i915)
+{
+ /*
+ * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
+ * Also known as Wa_14010480278.
+ */
+ if (IS_DISPLAY_VER(i915, 10, 12))
+ intel_de_write(i915, GEN9_CLKGATE_DIS_0,
+ intel_de_read(i915, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
+
+ if (IS_HASWELL(i915)) {
+ /*
+ * WaRsPkgCStateDisplayPMReq:hsw
+ * System hang if this isn't done before disabling all planes!
+ */
+ intel_de_write(i915, CHICKEN_PAR1_1,
+ intel_de_read(i915, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
+ }
+
+ if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) {
+ /* Display WA #1142:kbl,cfl,cml */
+ intel_de_rmw(i915, CHICKEN_PAR1_1,
+ KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
+ intel_de_rmw(i915, CHICKEN_MISC_2,
+ KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
+ KBL_ARB_FILL_SPARE_14);
+ }
+}
+
+void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT);
+
+ intel_early_display_was(i915);
+ intel_modeset_readout_hw_state(i915);
+
+ /* HW state is read out, now we need to sanitize this mess. */
+ get_encoder_power_domains(i915);
+
+ intel_pch_sanitize(i915);
+
+ /*
+ * intel_sanitize_plane_mapping() may need to do vblank
+ * waits, so we need vblank interrupts restored beforehand.
+ */
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ intel_sanitize_fifo_underrun_reporting(crtc_state);
+
+ drm_crtc_vblank_reset(&crtc->base);
+
+ if (crtc_state->hw.active)
+ intel_crtc_vblank_on(crtc_state);
+ }
+
+ intel_fbc_sanitize(i915);
+
+ intel_sanitize_plane_mapping(i915);
+
+ for_each_intel_encoder(&i915->drm, encoder)
+ intel_sanitize_encoder(encoder);
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ intel_sanitize_crtc(crtc, ctx);
+ intel_crtc_state_dump(crtc_state, NULL, "setup_hw_state");
+ }
+
+ intel_modeset_update_connector_atomic_state(i915);
+
+ intel_dpll_sanitize_state(i915);
+
+ if (IS_G4X(i915)) {
+ g4x_wm_get_hw_state(i915);
+ g4x_wm_sanitize(i915);
+ } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ vlv_wm_get_hw_state(i915);
+ vlv_wm_sanitize(i915);
+ } else if (DISPLAY_VER(i915) >= 9) {
+ skl_wm_get_hw_state(i915);
+ skl_wm_sanitize(i915);
+ } else if (HAS_PCH_SPLIT(i915)) {
+ ilk_wm_get_hw_state(i915);
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_power_domain_mask put_domains;
+
+ intel_modeset_get_crtc_power_domains(crtc_state, &put_domains);
+ if (drm_WARN_ON(&i915->drm, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM)))
+ intel_modeset_put_crtc_power_domains(crtc, &put_domains);
+ }
+
+ intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+
+ intel_power_domains_sanitize_state(i915);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.h b/drivers/gpu/drm/i915/display/intel_modeset_setup.h
new file mode 100644
index 000000000000..3beff67b33d0
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_MODESET_SETUP_H__
+#define __INTEL_MODESET_SETUP_H__
+
+struct drm_i915_private;
+struct drm_modeset_acquire_ctx;
+
+void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
+ struct drm_modeset_acquire_ctx *ctx);
+
+#endif /* __INTEL_MODESET_SETUP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
new file mode 100644
index 000000000000..a91586d77cb6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ *
+ * High level crtc/connector/encoder modeset state verification.
+ */
+
+#include <drm/drm_atomic_state_helper.h>
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_crtc.h"
+#include "intel_crtc_state_dump.h"
+#include "intel_display.h"
+#include "intel_display_types.h"
+#include "intel_fdi.h"
+#include "intel_modeset_verify.h"
+#include "intel_pm.h"
+#include "intel_snps_phy.h"
+
+/*
+ * Cross check the actual hw state with our own modeset state tracking (and its
+ * internal consistency).
+ */
+static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.base.id, connector->base.name);
+
+ if (connector->get_hw_state(connector)) {
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+
+ I915_STATE_WARN(!crtc_state,
+ "connector enabled without attached crtc\n");
+
+ if (!crtc_state)
+ return;
+
+ I915_STATE_WARN(!crtc_state->hw.active,
+ "connector is active, but attached crtc isn't\n");
+
+ if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
+ return;
+
+ I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
+ "atomic encoder doesn't match attached encoder\n");
+
+ I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
+ "attached encoder crtc differs from connector crtc\n");
+ } else {
+ I915_STATE_WARN(crtc_state && crtc_state->hw.active,
+ "attached crtc is active, but connector isn't\n");
+ I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
+ "best encoder set without crtc!\n");
+ }
+}
+
+static void
+verify_connector_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
+ struct drm_encoder *encoder = connector->encoder;
+ struct intel_crtc_state *crtc_state = NULL;
+
+ if (new_conn_state->crtc != &crtc->base)
+ continue;
+
+ if (crtc)
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ intel_connector_verify_state(crtc_state, new_conn_state);
+
+ I915_STATE_WARN(new_conn_state->best_encoder != encoder,
+ "connector's atomic encoder doesn't match legacy encoder\n");
+ }
+}
+
+static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *pipe_config)
+{
+ if (pipe_config->has_pch_encoder) {
+ int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
+ &pipe_config->fdi_m_n);
+ int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
+
+ /*
+ * FDI already provided one idea for the dotclock.
+ * Yell if the encoder disagrees.
+ */
+ drm_WARN(&dev_priv->drm,
+ !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
+ "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
+ fdi_dotclock, dotclock);
+ }
+}
+
+static void
+verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
+{
+ struct intel_encoder *encoder;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state, *new_conn_state;
+ int i;
+
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ bool enabled = false, found = false;
+ enum pipe pipe;
+
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
+ encoder->base.base.id,
+ encoder->base.name);
+
+ for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
+ new_conn_state, i) {
+ if (old_conn_state->best_encoder == &encoder->base)
+ found = true;
+
+ if (new_conn_state->best_encoder != &encoder->base)
+ continue;
+
+ found = true;
+ enabled = true;
+
+ I915_STATE_WARN(new_conn_state->crtc !=
+ encoder->base.crtc,
+ "connector's crtc doesn't match encoder crtc\n");
+ }
+
+ if (!found)
+ continue;
+
+ I915_STATE_WARN(!!encoder->base.crtc != enabled,
+ "encoder's enabled state mismatch (expected %i, found %i)\n",
+ !!encoder->base.crtc, enabled);
+
+ if (!encoder->base.crtc) {
+ bool active;
+
+ active = encoder->get_hw_state(encoder, &pipe);
+ I915_STATE_WARN(active,
+ "encoder detached but still enabled on pipe %c.\n",
+ pipe_name(pipe));
+ }
+ }
+}
+
+static void
+verify_crtc_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_encoder *encoder;
+ struct intel_crtc_state *pipe_config = old_crtc_state;
+ struct drm_atomic_state *state = old_crtc_state->uapi.state;
+ struct intel_crtc *master_crtc;
+
+ __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
+ intel_crtc_free_hw_state(old_crtc_state);
+ intel_crtc_state_reset(old_crtc_state, crtc);
+ old_crtc_state->uapi.state = state;
+
+ drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
+ crtc->base.name);
+
+ pipe_config->hw.enable = new_crtc_state->hw.enable;
+
+ intel_crtc_get_pipe_config(pipe_config);
+
+ /* we keep both pipes enabled on 830 */
+ if (IS_I830(dev_priv) && pipe_config->hw.active)
+ pipe_config->hw.active = new_crtc_state->hw.active;
+
+ I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
+ "crtc active state doesn't match with hw state (expected %i, found %i)\n",
+ new_crtc_state->hw.active, pipe_config->hw.active);
+
+ I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
+ "transitional active state does not match atomic hw state (expected %i, found %i)\n",
+ new_crtc_state->hw.active, crtc->active);
+
+ master_crtc = intel_master_crtc(new_crtc_state);
+
+ for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) {
+ enum pipe pipe;
+ bool active;
+
+ active = encoder->get_hw_state(encoder, &pipe);
+ I915_STATE_WARN(active != new_crtc_state->hw.active,
+ "[ENCODER:%i] active %i with crtc active %i\n",
+ encoder->base.base.id, active,
+ new_crtc_state->hw.active);
+
+ I915_STATE_WARN(active && master_crtc->pipe != pipe,
+ "Encoder connected to wrong pipe %c\n",
+ pipe_name(pipe));
+
+ if (active)
+ intel_encoder_get_config(encoder, pipe_config);
+ }
+
+ if (!new_crtc_state->hw.active)
+ return;
+
+ intel_pipe_config_sanity_check(dev_priv, pipe_config);
+
+ if (!intel_pipe_config_compare(new_crtc_state,
+ pipe_config, false)) {
+ I915_STATE_WARN(1, "pipe state doesn't match!\n");
+ intel_crtc_state_dump(pipe_config, NULL, "hw state");
+ intel_crtc_state_dump(new_crtc_state, NULL, "sw state");
+ }
+}
+
+void intel_modeset_verify_crtc(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
+ return;
+
+ intel_wm_state_verify(crtc, new_crtc_state);
+ verify_connector_state(state, crtc);
+ verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
+ intel_shared_dpll_state_verify(crtc, old_crtc_state, new_crtc_state);
+ intel_mpllb_state_verify(state, new_crtc_state);
+}
+
+void intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
+ struct intel_atomic_state *state)
+{
+ verify_encoder_state(dev_priv, state);
+ verify_connector_state(state, NULL);
+ intel_shared_dpll_verify_disabled(dev_priv);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.h b/drivers/gpu/drm/i915/display/intel_modeset_verify.h
new file mode 100644
index 000000000000..2d6fbe4f7846
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_MODESET_VERIFY_H__
+#define __INTEL_MODESET_VERIFY_H__
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+
+void intel_modeset_verify_crtc(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state);
+void intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
+ struct intel_atomic_state *state);
+
+#endif /* __INTEL_MODESET_VERIFY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index f31e8c3f8ce0..1c0c745c142d 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -30,6 +30,8 @@
#include <linux/firmware.h>
#include <acpi/video.h>
+#include <drm/drm_edid.h>
+
#include "i915_drv.h"
#include "intel_acpi.h"
#include "intel_backlight.h"
@@ -53,6 +55,8 @@
#define MBOX_ASLE_EXT BIT(4) /* Mailbox #5 */
#define MBOX_BACKLIGHT BIT(5) /* Mailbox #2 (valid from v3.x) */
+#define PCON_HEADLESS_SKU BIT(13)
+
struct opregion_header {
u8 signature[16];
u32 size;
@@ -1135,6 +1139,18 @@ struct edid *intel_opregion_get_edid(struct intel_connector *intel_connector)
return new_edid;
}
+bool intel_opregion_headless_sku(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->opregion;
+ struct opregion_header *header = opregion->header;
+
+ if (!header || header->over.major < 2 ||
+ (header->over.major == 2 && header->over.minor < 3))
+ return false;
+
+ return opregion->header->pcon & PCON_HEADLESS_SKU;
+}
+
void intel_opregion_register(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->opregion;
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h
index 82cc0ba34af7..2f261f985400 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.h
+++ b/drivers/gpu/drm/i915/display/intel_opregion.h
@@ -76,6 +76,8 @@ int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
struct edid *intel_opregion_get_edid(struct intel_connector *connector);
+bool intel_opregion_headless_sku(struct drm_i915_private *i915);
+
#else /* CONFIG_ACPI*/
static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
@@ -127,6 +129,11 @@ intel_opregion_get_edid(struct intel_connector *connector)
return NULL;
}
+static inline bool intel_opregion_headless_sku(struct drm_i915_private *i915)
+{
+ return false;
+}
+
#endif /* CONFIG_ACPI */
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index ee46561b5ae8..79ed8bd04a07 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -1399,8 +1399,6 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
overlay->i915 = dev_priv;
overlay->context = engine->kernel_context;
- GEM_BUG_ON(!overlay->context);
-
overlay->color_key = 0x0101fe;
overlay->color_key_enabled = true;
overlay->brightness = -19;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index d1d1b59102d6..237a40623dd7 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -71,20 +71,41 @@ intel_panel_fixed_mode(struct intel_connector *connector,
return best_mode;
}
+static bool is_alt_drrs_mode(const struct drm_display_mode *mode,
+ const struct drm_display_mode *preferred_mode)
+{
+ return drm_mode_match(mode, preferred_mode,
+ DRM_MODE_MATCH_TIMINGS |
+ DRM_MODE_MATCH_FLAGS |
+ DRM_MODE_MATCH_3D_FLAGS) &&
+ mode->clock != preferred_mode->clock;
+}
+
+static bool is_alt_vrr_mode(const struct drm_display_mode *mode,
+ const struct drm_display_mode *preferred_mode)
+{
+ return drm_mode_match(mode, preferred_mode,
+ DRM_MODE_MATCH_FLAGS |
+ DRM_MODE_MATCH_3D_FLAGS) &&
+ mode->hdisplay == preferred_mode->hdisplay &&
+ mode->vdisplay == preferred_mode->vdisplay &&
+ mode->clock != preferred_mode->clock;
+}
+
const struct drm_display_mode *
intel_panel_downclock_mode(struct intel_connector *connector,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *fixed_mode, *best_mode = NULL;
- int min_vrefresh = i915->vbt.seamless_drrs_min_refresh_rate;
+ int min_vrefresh = connector->panel.vbt.seamless_drrs_min_refresh_rate;
int max_vrefresh = drm_mode_vrefresh(adjusted_mode);
/* pick the fixed_mode with the lowest refresh rate */
list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) {
int vrefresh = drm_mode_vrefresh(fixed_mode);
- if (vrefresh >= min_vrefresh && vrefresh < max_vrefresh) {
+ if (is_alt_drrs_mode(fixed_mode, adjusted_mode) &&
+ vrefresh >= min_vrefresh && vrefresh < max_vrefresh) {
max_vrefresh = vrefresh;
best_mode = fixed_mode;
}
@@ -113,13 +134,11 @@ int intel_panel_get_modes(struct intel_connector *connector)
enum drrs_type intel_panel_drrs_type(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
-
if (list_empty(&connector->panel.fixed_modes) ||
list_is_singular(&connector->panel.fixed_modes))
return DRRS_TYPE_NONE;
- return i915->vbt.drrs_type;
+ return connector->panel.vbt.drrs_type;
}
int intel_panel_compute_config(struct intel_connector *connector,
@@ -154,16 +173,18 @@ int intel_panel_compute_config(struct intel_connector *connector,
}
static bool is_alt_fixed_mode(const struct drm_display_mode *mode,
- const struct drm_display_mode *preferred_mode)
+ const struct drm_display_mode *preferred_mode,
+ bool has_vrr)
{
- return drm_mode_match(mode, preferred_mode,
- DRM_MODE_MATCH_TIMINGS |
- DRM_MODE_MATCH_FLAGS |
- DRM_MODE_MATCH_3D_FLAGS) &&
- mode->clock != preferred_mode->clock;
+ /* is_alt_drrs_mode() is a subset of is_alt_vrr_mode() */
+ if (has_vrr)
+ return is_alt_vrr_mode(mode, preferred_mode);
+ else
+ return is_alt_drrs_mode(mode, preferred_mode);
}
-static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector)
+static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector,
+ bool has_vrr)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
const struct drm_display_mode *preferred_mode =
@@ -171,7 +192,7 @@ static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connect
struct drm_display_mode *mode, *next;
list_for_each_entry_safe(mode, next, &connector->base.probed_modes, head) {
- if (!is_alt_fixed_mode(mode, preferred_mode))
+ if (!is_alt_fixed_mode(mode, preferred_mode, has_vrr))
continue;
drm_dbg_kms(&dev_priv->drm,
@@ -220,16 +241,21 @@ static void intel_panel_destroy_probed_modes(struct intel_connector *connector)
struct drm_display_mode *mode, *next;
list_for_each_entry_safe(mode, next, &connector->base.probed_modes, head) {
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] not using EDID mode: " DRM_MODE_FMT "\n",
+ connector->base.base.id, connector->base.name,
+ DRM_MODE_ARG(mode));
list_del(&mode->head);
drm_mode_destroy(&i915->drm, mode);
}
}
-void intel_panel_add_edid_fixed_modes(struct intel_connector *connector, bool has_drrs)
+void intel_panel_add_edid_fixed_modes(struct intel_connector *connector,
+ bool has_drrs, bool has_vrr)
{
intel_panel_add_edid_preferred_mode(connector);
- if (intel_panel_preferred_fixed_mode(connector) && has_drrs)
- intel_panel_add_edid_alt_fixed_modes(connector);
+ if (intel_panel_preferred_fixed_mode(connector) && (has_drrs || has_vrr))
+ intel_panel_add_edid_alt_fixed_modes(connector, has_vrr);
intel_panel_destroy_probed_modes(connector);
}
@@ -260,7 +286,7 @@ void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector)
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *mode;
- mode = i915->vbt.lfp_lvds_vbt_mode;
+ mode = connector->panel.vbt.lfp_lvds_vbt_mode;
if (!mode)
return;
@@ -274,7 +300,7 @@ void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector)
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *mode;
- mode = i915->vbt.sdvo_lvds_vbt_mode;
+ mode = connector->panel.vbt.sdvo_lvds_vbt_mode;
if (!mode)
return;
@@ -639,6 +665,8 @@ void intel_panel_fini(struct intel_connector *connector)
intel_backlight_destroy(panel);
+ intel_bios_fini_panel(panel);
+
list_for_each_entry_safe(fixed_mode, next, &panel->fixed_modes, head) {
list_del(&fixed_mode->head);
drm_mode_destroy(connector->base.dev, fixed_mode);
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index 2e32bb728beb..b087c0c3cc6d 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -40,7 +40,8 @@ int intel_panel_fitting(struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
int intel_panel_compute_config(struct intel_connector *connector,
struct drm_display_mode *adjusted_mode);
-void intel_panel_add_edid_fixed_modes(struct intel_connector *connector, bool has_drrs);
+void intel_panel_add_edid_fixed_modes(struct intel_connector *connector,
+ bool has_drrs, bool has_vrr);
void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector);
void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector);
void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index b688fd87e3da..9934c8a9e240 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -122,16 +122,29 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->sb_lock);
}
-/* Program iCLKIP clock to the desired frequency */
-void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
+struct iclkip_params {
+ u32 iclk_virtual_root_freq;
+ u32 iclk_pi_range;
+ u32 divsel, phaseinc, auxdiv, phasedir, desired_divisor;
+};
+
+static void iclkip_params_init(struct iclkip_params *p)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int clock = crtc_state->hw.adjusted_mode.crtc_clock;
- u32 divsel, phaseinc, auxdiv, phasedir = 0;
- u32 temp;
+ memset(p, 0, sizeof(*p));
- lpt_disable_iclkip(dev_priv);
+ p->iclk_virtual_root_freq = 172800 * 1000;
+ p->iclk_pi_range = 64;
+}
+
+static int lpt_iclkip_freq(struct iclkip_params *p)
+{
+ return DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq,
+ p->desired_divisor << p->auxdiv);
+}
+
+static void lpt_compute_iclkip(struct iclkip_params *p, int clock)
+{
+ iclkip_params_init(p);
/* The iCLK virtual clock root frequency is in MHz,
* but the adjusted_mode->crtc_clock in KHz. To get the
@@ -139,50 +152,60 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
* convert the virtual clock precision to KHz here for higher
* precision.
*/
- for (auxdiv = 0; auxdiv < 2; auxdiv++) {
- u32 iclk_virtual_root_freq = 172800 * 1000;
- u32 iclk_pi_range = 64;
- u32 desired_divisor;
-
- desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
- clock << auxdiv);
- divsel = (desired_divisor / iclk_pi_range) - 2;
- phaseinc = desired_divisor % iclk_pi_range;
+ for (p->auxdiv = 0; p->auxdiv < 2; p->auxdiv++) {
+ p->desired_divisor = DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq,
+ clock << p->auxdiv);
+ p->divsel = (p->desired_divisor / p->iclk_pi_range) - 2;
+ p->phaseinc = p->desired_divisor % p->iclk_pi_range;
/*
* Near 20MHz is a corner case which is
* out of range for the 7-bit divisor
*/
- if (divsel <= 0x7f)
+ if (p->divsel <= 0x7f)
break;
}
+}
+
+/* Program iCLKIP clock to the desired frequency */
+void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int clock = crtc_state->hw.adjusted_mode.crtc_clock;
+ struct iclkip_params p;
+ u32 temp;
+
+ lpt_disable_iclkip(dev_priv);
+
+ lpt_compute_iclkip(&p, clock);
/* This should not happen with any sane values */
- drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+ drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
- drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
+ drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) &
~SBI_SSCDIVINTPHASE_INCVAL_MASK);
drm_dbg_kms(&dev_priv->drm,
"iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
- clock, auxdiv, divsel, phasedir, phaseinc);
+ clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc);
mutex_lock(&dev_priv->sb_lock);
/* Program SSCDIVINTPHASE6 */
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
- temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
+ temp |= SBI_SSCDIVINTPHASE_DIVSEL(p.divsel);
temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
- temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
- temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
+ temp |= SBI_SSCDIVINTPHASE_INCVAL(p.phaseinc);
+ temp |= SBI_SSCDIVINTPHASE_DIR(p.phasedir);
temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
/* Program SSCAUXDIV */
temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
- temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
+ temp |= SBI_SSCAUXDIV_FINALDIV2SEL(p.auxdiv);
intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
/* Enable modulator and associated divider */
@@ -200,15 +223,14 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
int lpt_get_iclkip(struct drm_i915_private *dev_priv)
{
- u32 divsel, phaseinc, auxdiv;
- u32 iclk_virtual_root_freq = 172800 * 1000;
- u32 iclk_pi_range = 64;
- u32 desired_divisor;
+ struct iclkip_params p;
u32 temp;
if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
return 0;
+ iclkip_params_init(&p);
+
mutex_lock(&dev_priv->sb_lock);
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
@@ -218,21 +240,20 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
}
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
- divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
+ p.divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
- phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
+ p.phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
- auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
+ p.auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
mutex_unlock(&dev_priv->sb_lock);
- desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
+ p.desired_divisor = (p.divsel + 2) * p.iclk_pi_range + p.phaseinc;
- return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
- desired_divisor << auxdiv);
+ return lpt_iclkip_freq(&p);
}
/* Implements 3 different sequences from BSpec chapter "Display iCLK
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 5a598dd06039..1b21a341962f 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -209,7 +209,8 @@ static int
bxt_power_sequencer_idx(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- int backlight_controller = dev_priv->vbt.backlight.controller;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ int backlight_controller = connector->panel.vbt.backlight.controller;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -509,7 +510,7 @@ static void wait_panel_power_cycle(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
- /* take the difference of currrent time and panel power off time
+ /* take the difference of current time and panel power off time
* and then make panel wait for t11_t12 if needed. */
panel_power_on_time = ktime_get_boottime();
panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
@@ -723,6 +724,13 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
unsigned long delay;
/*
+ * We may not yet know the real power sequencing delays,
+ * so keep VDD enabled until we're done with init.
+ */
+ if (intel_dp->pps.initializing)
+ return;
+
+ /*
* Queue the timer to fire a long time from now (relative to the power
* down delay) to keep the panel power up across a sequence of
* operations.
@@ -1051,7 +1059,7 @@ void vlv_pps_init(struct intel_encoder *encoder,
pps_init_registers(intel_dp, true);
}
-static void intel_pps_vdd_sanitize(struct intel_dp *intel_dp)
+static void pps_vdd_init(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -1072,8 +1080,6 @@ static void intel_pps_vdd_sanitize(struct intel_dp *intel_dp)
drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
intel_aux_power_domain(dig_port));
-
- edp_panel_vdd_schedule_off(intel_dp);
}
bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp)
@@ -1159,53 +1165,96 @@ intel_pps_verify_state(struct intel_dp *intel_dp)
}
}
-static void pps_init_delays(struct intel_dp *intel_dp)
+static bool pps_delays_valid(struct edp_power_seq *delays)
+{
+ return delays->t1_t3 || delays->t8 || delays->t9 ||
+ delays->t10 || delays->t11_t12;
+}
+
+static void pps_init_delays_bios(struct intel_dp *intel_dp,
+ struct edp_power_seq *bios)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct edp_power_seq cur, vbt, spec,
- *final = &intel_dp->pps.pps_delays;
lockdep_assert_held(&dev_priv->pps_mutex);
- /* already initialized? */
- if (final->t11_t12 != 0)
- return;
+ if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays))
+ intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays);
- intel_pps_readout_hw_state(intel_dp, &cur);
+ *bios = intel_dp->pps.bios_pps_delays;
- intel_pps_dump_state(intel_dp, "cur", &cur);
+ intel_pps_dump_state(intel_dp, "bios", bios);
+}
+
+static void pps_init_delays_vbt(struct intel_dp *intel_dp,
+ struct edp_power_seq *vbt)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+
+ *vbt = connector->panel.vbt.edp.pps;
+
+ if (!pps_delays_valid(vbt))
+ return;
- vbt = dev_priv->vbt.edp.pps;
/* On Toshiba Satellite P50-C-18C system the VBT T12 delay
* of 500ms appears to be too short. Ocassionally the panel
* just fails to power back on. Increasing the delay to 800ms
* seems sufficient to avoid this problem.
*/
if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
- vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
+ vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
drm_dbg_kms(&dev_priv->drm,
"Increasing T12 panel delay as per the quirk to %d\n",
- vbt.t11_t12);
+ vbt->t11_t12);
}
+
/* T11_T12 delay is special and actually in units of 100ms, but zero
* based in the hw (so we need to add 100 ms). But the sw vbt
* table multiplies it with 1000 to make it in units of 100usec,
* too. */
- vbt.t11_t12 += 100 * 10;
+ vbt->t11_t12 += 100 * 10;
+
+ intel_pps_dump_state(intel_dp, "vbt", vbt);
+}
+
+static void pps_init_delays_spec(struct intel_dp *intel_dp,
+ struct edp_power_seq *spec)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
* our hw here, which are all in 100usec. */
- spec.t1_t3 = 210 * 10;
- spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
- spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
- spec.t10 = 500 * 10;
+ spec->t1_t3 = 210 * 10;
+ spec->t8 = 50 * 10; /* no limit for t8, use t7 instead */
+ spec->t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+ spec->t10 = 500 * 10;
/* This one is special and actually in units of 100ms, but zero
* based in the hw (so we need to add 100 ms). But the sw vbt
* table multiplies it with 1000 to make it in units of 100usec,
* too. */
- spec.t11_t12 = (510 + 100) * 10;
+ spec->t11_t12 = (510 + 100) * 10;
+
+ intel_pps_dump_state(intel_dp, "spec", spec);
+}
+
+static void pps_init_delays(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct edp_power_seq cur, vbt, spec,
+ *final = &intel_dp->pps.pps_delays;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* already initialized? */
+ if (pps_delays_valid(final))
+ return;
- intel_pps_dump_state(intel_dp, "vbt", &vbt);
+ pps_init_delays_bios(intel_dp, &cur);
+ pps_init_delays_vbt(intel_dp, &vbt);
+ pps_init_delays_spec(intel_dp, &spec);
/* Use the max of the register settings and vbt. If both are
* unset, fall back to the spec limits. */
@@ -1367,18 +1416,48 @@ void intel_pps_encoder_reset(struct intel_dp *intel_dp)
pps_init_delays(intel_dp);
pps_init_registers(intel_dp, false);
+ pps_vdd_init(intel_dp);
- intel_pps_vdd_sanitize(intel_dp);
+ if (edp_have_panel_vdd(intel_dp))
+ edp_panel_vdd_schedule_off(intel_dp);
}
}
void intel_pps_init(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ intel_dp->pps.initializing = true;
INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
pps_init_timestamps(intel_dp);
- intel_pps_encoder_reset(intel_dp);
+ with_intel_pps_lock(intel_dp, wakeref) {
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ vlv_initial_power_sequencer_setup(intel_dp);
+
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, false);
+ pps_vdd_init(intel_dp);
+ }
+}
+
+void intel_pps_init_late(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ /* Reinit delays after per-panel info has been parsed from VBT */
+ memset(&intel_dp->pps.pps_delays, 0, sizeof(intel_dp->pps.pps_delays));
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, false);
+
+ intel_dp->pps.initializing = false;
+
+ if (edp_have_panel_vdd(intel_dp))
+ edp_panel_vdd_schedule_off(intel_dp);
+ }
}
void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
index e64144659d31..a3a56f903f26 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.h
+++ b/drivers/gpu/drm/i915/display/intel_pps.h
@@ -41,6 +41,7 @@ bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp);
void intel_pps_wait_power_cycle(struct intel_dp *intel_dp);
void intel_pps_init(struct intel_dp *intel_dp);
+void intel_pps_init_late(struct intel_dp *intel_dp);
void intel_pps_encoder_reset(struct intel_dp *intel_dp);
void intel_pps_reset_all(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 06db407e2749..e6a870641cd2 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -86,10 +86,13 @@
static bool psr_global_enabled(struct intel_dp *intel_dp)
{
+ struct intel_connector *connector = intel_dp->attached_connector;
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
+ if (i915->params.enable_psr == -1)
+ return connector->panel.vbt.psr.enable;
return i915->params.enable_psr;
case I915_PSR_DEBUG_DISABLE:
return false;
@@ -399,6 +402,7 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
{
+ struct intel_connector *connector = intel_dp->attached_connector;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val = 0;
@@ -411,20 +415,20 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
goto check_tp3_sel;
}
- if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
+ if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
val |= EDP_PSR_TP1_TIME_0us;
- else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
+ else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
val |= EDP_PSR_TP1_TIME_100us;
- else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
+ else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
val |= EDP_PSR_TP1_TIME_500us;
else
val |= EDP_PSR_TP1_TIME_2500us;
- if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
+ if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
val |= EDP_PSR_TP2_TP3_TIME_0us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
+ else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR_TP2_TP3_TIME_100us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+ else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
val |= EDP_PSR_TP2_TP3_TIME_500us;
else
val |= EDP_PSR_TP2_TP3_TIME_2500us;
@@ -441,13 +445,14 @@ check_tp3_sel:
static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
{
+ struct intel_connector *connector = intel_dp->attached_connector;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
int idle_frames;
/* Let's use 6 as the minimum to cover all known cases including the
* off-by-one issue that HW has in some cases.
*/
- idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+ idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
@@ -483,18 +488,19 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
{
+ struct intel_connector *connector = intel_dp->attached_connector;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val = 0;
if (dev_priv->params.psr_safest_params)
return EDP_PSR2_TP2_TIME_2500us;
- if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
- dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
+ if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
+ connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
val |= EDP_PSR2_TP2_TIME_50us;
- else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
+ else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR2_TP2_TIME_100us;
- else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
+ else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
val |= EDP_PSR2_TP2_TIME_500us;
else
val |= EDP_PSR2_TP2_TIME_2500us;
@@ -549,7 +555,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
/*
* TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
* values from BSpec. In order to setting an optimal power
- * consumption, lower than 4k resoluition mode needs to decrese
+ * consumption, lower than 4k resolution mode needs to decrease
* IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
* mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
*/
@@ -953,7 +959,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
int psr_setup_time;
/*
- * Current PSR panels dont work reliably with VRR enabled
+ * Current PSR panels don't work reliably with VRR enabled
* So if VRR is enabled, do not enable PSR.
*/
if (crtc_state->vrr.enable)
@@ -1618,8 +1624,12 @@ exit:
}
static void clip_area_update(struct drm_rect *overlap_damage_area,
- struct drm_rect *damage_area)
+ struct drm_rect *damage_area,
+ struct drm_rect *pipe_src)
{
+ if (!drm_rect_intersect(damage_area, pipe_src))
+ return;
+
if (overlap_damage_area->y1 == -1) {
overlap_damage_area->y1 = damage_area->y1;
overlap_damage_area->y2 = damage_area->y2;
@@ -1654,7 +1664,7 @@ static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *c
*
* Plane scaling and rotation is not supported by selective fetch and both
* properties can change without a modeset, so need to be check at every
- * atomic commmit.
+ * atomic commit.
*/
static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
{
@@ -1685,6 +1695,7 @@ static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *c
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
struct intel_plane_state *new_plane_state, *old_plane_state;
@@ -1708,7 +1719,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
*/
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
- struct drm_rect src, damaged_area = { .y1 = -1 };
+ struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
+ .x2 = INT_MAX };
struct drm_atomic_helper_damage_iter iter;
struct drm_rect clip;
@@ -1735,20 +1747,23 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (old_plane_state->uapi.visible) {
damaged_area.y1 = old_plane_state->uapi.dst.y1;
damaged_area.y2 = old_plane_state->uapi.dst.y2;
- clip_area_update(&pipe_clip, &damaged_area);
+ clip_area_update(&pipe_clip, &damaged_area,
+ &crtc_state->pipe_src);
}
if (new_plane_state->uapi.visible) {
damaged_area.y1 = new_plane_state->uapi.dst.y1;
damaged_area.y2 = new_plane_state->uapi.dst.y2;
- clip_area_update(&pipe_clip, &damaged_area);
+ clip_area_update(&pipe_clip, &damaged_area,
+ &crtc_state->pipe_src);
}
continue;
} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
/* If alpha changed mark the whole plane area as damaged */
damaged_area.y1 = new_plane_state->uapi.dst.y1;
damaged_area.y2 = new_plane_state->uapi.dst.y2;
- clip_area_update(&pipe_clip, &damaged_area);
+ clip_area_update(&pipe_clip, &damaged_area,
+ &crtc_state->pipe_src);
continue;
}
@@ -1759,7 +1774,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
&new_plane_state->uapi);
drm_atomic_for_each_plane_damage(&iter, &clip) {
if (drm_rect_intersect(&clip, &src))
- clip_area_update(&damaged_area, &clip);
+ clip_area_update(&damaged_area, &clip,
+ &crtc_state->pipe_src);
}
if (damaged_area.y1 == -1)
@@ -1767,7 +1783,20 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
- clip_area_update(&pipe_clip, &damaged_area);
+ clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
+ }
+
+ /*
+ * TODO: For now we are just using full update in case
+ * selective fetch area calculation fails. To optimize this we
+ * should identify cases where this happens and fix the area
+ * calculation for those.
+ */
+ if (pipe_clip.y1 == -1) {
+ drm_info_once(&dev_priv->drm,
+ "Selective fetch area calculation failed in pipe %c\n",
+ pipe_name(crtc->pipe));
+ full_update = true;
}
if (full_update)
@@ -2174,7 +2203,7 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp)
}
/**
- * intel_psr_invalidate - Invalidade PSR
+ * intel_psr_invalidate - Invalidate PSR
* @dev_priv: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the invalidate
@@ -2344,6 +2373,7 @@ unlock:
*/
void intel_psr_init(struct intel_dp *intel_dp)
{
+ struct intel_connector *connector = intel_dp->attached_connector;
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -2367,14 +2397,10 @@ void intel_psr_init(struct intel_dp *intel_dp)
intel_dp->psr.source_support = true;
- if (dev_priv->params.enable_psr == -1)
- if (!dev_priv->vbt.psr.enable)
- dev_priv->params.enable_psr = 0;
-
/* Set link_standby x link_off defaults */
if (DISPLAY_VER(dev_priv) < 12)
/* For new platforms up to TGL let's respect VBT back again */
- intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
+ intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
INIT_WORK(&intel_dp->psr.work, intel_psr_work);
INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index d81855d57cdc..19122bc6d2ab 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -2869,6 +2869,7 @@ static bool
intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
{
struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_i915_private *i915 = to_i915(encoder->dev);
struct drm_connector *connector;
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
@@ -2900,6 +2901,8 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
+ intel_bios_init_panel(i915, &intel_connector->panel, NULL, NULL);
+
/*
* Fetch modes from VBT. For SDVO prefer the VBT mode since some
* SDVO->LVDS transcoders can't cope with the EDID mode.
@@ -2908,7 +2911,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
if (!intel_panel_preferred_fixed_mode(intel_connector)) {
intel_ddc_get_modes(connector, &intel_sdvo->ddc);
- intel_panel_add_edid_fixed_modes(intel_connector, false);
+ intel_panel_add_edid_fixed_modes(intel_connector, false, false);
}
intel_panel_init(intel_connector);
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 0dd4775e8195..0bdbedc67d7d 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -517,6 +517,37 @@ static const struct intel_mpllb_state dg2_hdmi_148_5 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
};
+/* values in the below table are calculted using the algo */
+static const struct intel_mpllb_state dg2_hdmi_297 = {
+ .clock = 297000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
static const struct intel_mpllb_state dg2_hdmi_594 = {
.clock = 594000,
.ref_control =
@@ -551,6 +582,7 @@ static const struct intel_mpllb_state * const dg2_hdmi_tables[] = {
&dg2_hdmi_27_0,
&dg2_hdmi_74_25,
&dg2_hdmi_148_5,
+ &dg2_hdmi_297,
&dg2_hdmi_594,
NULL,
};
@@ -597,7 +629,7 @@ int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
return -EINVAL;
for (i = 0; tables[i]; i++) {
- if (crtc_state->port_clock <= tables[i]->clock) {
+ if (crtc_state->port_clock == tables[i]->clock) {
crtc_state->mpllb_state = *tables[i];
return 0;
}
@@ -781,3 +813,46 @@ int intel_snps_phy_check_hdmi_link_rate(int clock)
return MODE_CLOCK_RANGE;
}
+
+void intel_mpllb_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_mpllb_state mpllb_hw_state = { 0 };
+ struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct intel_encoder *encoder;
+
+ if (!IS_DG2(i915))
+ return;
+
+ if (!new_crtc_state->hw.active)
+ return;
+
+ encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
+ intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
+
+#define MPLLB_CHECK(__name) \
+ I915_STATE_WARN(mpllb_sw_state->__name != mpllb_hw_state.__name, \
+ "[CRTC:%d:%s] mismatch in MPLLB: %s (expected 0x%08x, found 0x%08x)", \
+ crtc->base.base.id, crtc->base.name, \
+ __stringify(__name), \
+ mpllb_sw_state->__name, mpllb_hw_state.__name)
+
+ MPLLB_CHECK(mpllb_cp);
+ MPLLB_CHECK(mpllb_div);
+ MPLLB_CHECK(mpllb_div2);
+ MPLLB_CHECK(mpllb_fracn1);
+ MPLLB_CHECK(mpllb_fracn2);
+ MPLLB_CHECK(mpllb_sscen);
+ MPLLB_CHECK(mpllb_sscstep);
+
+ /*
+ * ref_control is handled by the hardware/firemware and never
+ * programmed by the software, but the proper values are supplied
+ * in the bspec for verification purposes.
+ */
+ MPLLB_CHECK(ref_control);
+
+#undef MPLLB_CHECK
+}
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.h b/drivers/gpu/drm/i915/display/intel_snps_phy.h
index 11dcd6deb070..557ef820bc0b 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.h
@@ -9,8 +9,9 @@
#include <linux/types.h>
struct drm_i915_private;
-struct intel_encoder;
+struct intel_atomic_state;
struct intel_crtc_state;
+struct intel_encoder;
struct intel_mpllb_state;
enum phy;
@@ -31,5 +32,7 @@ int intel_mpllb_calc_port_clock(struct intel_encoder *encoder,
int intel_snps_phy_check_hdmi_link_rate(int clock);
void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+void intel_mpllb_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc_state *new_crtc_state);
#endif /* __INTEL_SNPS_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 7c0df80612d0..2713faad0625 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -34,6 +34,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index b8b822ea3755..6773840f6cc7 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -494,7 +494,8 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
}
live_status_mask = tc_port_live_status_mask(dig_port);
- if (!(live_status_mask & (BIT(TC_PORT_DP_ALT) | BIT(TC_PORT_LEGACY)))) {
+ if (!(live_status_mask & (BIT(TC_PORT_DP_ALT) | BIT(TC_PORT_LEGACY))) &&
+ !dig_port->tc_legacy_port) {
drm_dbg_kms(&i915->drm, "Port %s: PHY ownership not required (live status %02x)\n",
dig_port->tc_port_name, live_status_mask);
goto out_set_tbt_alt_mode;
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 4b98bab3b890..509b0a419c20 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -182,6 +182,10 @@ struct bdb_general_features {
#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
+/* Device handle */
+#define DEVICE_HANDLE_LFP1 0x0008
+#define DEVICE_HANDLE_LFP2 0x0080
+
/* Pre 915 */
#define DEVICE_TYPE_NONE 0x00
#define DEVICE_TYPE_CRT 0x01
@@ -564,7 +568,9 @@ struct bdb_driver_features {
u16 tbt_enabled:1;
u16 psr_enabled:1;
u16 ips_enabled:1;
- u16 reserved3:4;
+ u16 reserved3:1;
+ u16 dmrrs_enabled:1;
+ u16 reserved4:2;
u16 pc_feature_valid:1;
} __packed;
@@ -636,6 +642,7 @@ struct bdb_sdvo_panel_dtds {
#define EDP_30BPP 2
#define EDP_RATE_1_62 0
#define EDP_RATE_2_7 1
+#define EDP_RATE_5_4 2
#define EDP_LANE_1 0
#define EDP_LANE_2 1
#define EDP_LANE_4 3
@@ -666,6 +673,16 @@ struct edp_full_link_params {
u8 vswing:4;
} __packed;
+struct edp_apical_params {
+ u32 panel_oui;
+ u32 dpcd_base_address;
+ u32 dpcd_idridix_control_0;
+ u32 dpcd_option_select;
+ u32 dpcd_backlight;
+ u32 ambient_light;
+ u32 backlight_scale;
+} __packed;
+
struct bdb_edp {
struct edp_power_seq power_seqs[16];
u32 color_depth;
@@ -681,15 +698,16 @@ struct bdb_edp {
struct edp_pwm_delays pwm_delays[16]; /* 186 */
u16 full_link_params_provided; /* 199 */
struct edp_full_link_params full_link_params[16]; /* 199 */
+ u16 apical_enable; /* 203 */
+ struct edp_apical_params apical_params[16]; /* 203 */
+ u16 edp_fast_link_training_rate[16]; /* 224 */
+ u16 edp_max_port_link_rate[16]; /* 244 */
} __packed;
/*
* Block 40 - LFP Data Block
*/
-/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
-#define MODE_MASK 0x3
-
struct bdb_lvds_options {
u8 panel_type;
u8 panel_type2; /* 212 */
@@ -717,6 +735,7 @@ struct bdb_lvds_options {
u16 lcdvcc_s0_enable; /* 200 */
u32 rotation; /* 228 */
+ u32 position; /* 240 */
} __packed;
/*
@@ -843,28 +862,43 @@ struct bdb_lfp_backlight_data {
u8 level[16]; /* Obsolete from 234+ */
struct lfp_backlight_control_method backlight_control[16];
struct lfp_brightness_level brightness_level[16]; /* 234+ */
- struct lfp_brightness_level brightness_min_level[16]; /* 234+ */
- u8 brightness_precision_bits[16]; /* 236+ */
+ struct lfp_brightness_level brightness_min_level[16]; /* 234+ */
+ u8 brightness_precision_bits[16]; /* 236+ */
+ u16 hdr_dpcd_refresh_timeout[16]; /* 239+ */
} __packed;
/*
* Block 44 - LFP Power Conservation Features Block
*/
+struct lfp_power_features {
+ u8 reserved1:1;
+ u8 power_conservation_pref:3;
+ u8 reserved2:1;
+ u8 lace_enabled_status:1;
+ u8 lace_support:1;
+ u8 als_enable:1;
+} __packed;
struct als_data_entry {
u16 backlight_adjust;
u16 lux;
} __packed;
-struct agressiveness_profile_entry {
- u8 dpst_agressiveness : 4;
- u8 lace_agressiveness : 4;
+struct aggressiveness_profile_entry {
+ u8 dpst_aggressiveness : 4;
+ u8 lace_aggressiveness : 4;
+} __packed;
+
+struct aggressiveness_profile2_entry {
+ u8 opst_aggressiveness : 4;
+ u8 elp_aggressiveness : 4;
} __packed;
struct bdb_lfp_power {
- u8 lfp_feature_bits;
+ struct lfp_power_features features;
struct als_data_entry als[5];
- u8 lace_aggressiveness_profile;
+ u8 lace_aggressiveness_profile:3;
+ u8 reserved1:5;
u16 dpst;
u16 psr;
u16 drrs;
@@ -873,9 +907,12 @@ struct bdb_lfp_power {
u16 dmrrs;
u16 adb;
u16 lace_enabled_status;
- struct agressiveness_profile_entry aggressivenes[16];
+ struct aggressiveness_profile_entry aggressiveness[16];
u16 hobl; /* 232+ */
u16 vrr_feature_enabled; /* 233+ */
+ u16 elp; /* 247+ */
+ u16 opst; /* 247+ */
+ struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247+ */
} __packed;
/*
@@ -885,8 +922,10 @@ struct bdb_lfp_power {
#define MAX_MIPI_CONFIGURATIONS 6
struct bdb_mipi_config {
- struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
- struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
+ struct mipi_config config[MAX_MIPI_CONFIGURATIONS]; /* 175 */
+ struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; /* 177 */
+ struct edp_pwm_delays pwm_delays[MAX_MIPI_CONFIGURATIONS]; /* 186 */
+ u8 pmic_i2c_bus_number[MAX_MIPI_CONFIGURATIONS]; /* 190 */
} __packed;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 396f2f994fa0..04250a0fec3c 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -9,25 +9,35 @@
#include "intel_display_types.h"
#include "intel_vrr.h"
-bool intel_vrr_is_capable(struct drm_connector *connector)
+bool intel_vrr_is_capable(struct intel_connector *connector)
{
+ const struct drm_display_info *info = &connector->base.display_info;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dp *intel_dp;
- const struct drm_display_info *info = &connector->display_info;
- struct drm_i915_private *i915 = to_i915(connector->dev);
-
- if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
- connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
- return false;
- intel_dp = intel_attached_dp(to_intel_connector(connector));
/*
* DP Sink is capable of VRR video timings if
* Ignore MSA bit is set in DPCD.
* EDID monitor range also should be atleast 10 for reasonable
* Adaptive Sync or Variable Refresh Rate end user experience.
*/
+ switch (connector->base.connector_type) {
+ case DRM_MODE_CONNECTOR_eDP:
+ if (!connector->panel.vbt.vrr)
+ return false;
+ fallthrough;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ intel_dp = intel_attached_dp(connector);
+
+ if (!drm_dp_sink_can_do_video_without_timing_msa(intel_dp->dpcd))
+ return false;
+
+ break;
+ default:
+ return false;
+ }
+
return HAS_VRR(i915) &&
- drm_dp_sink_can_do_video_without_timing_msa(intel_dp->dpcd) &&
info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10;
}
@@ -97,7 +107,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
const struct drm_display_info *info = &connector->base.display_info;
int vmin, vmax;
- if (!intel_vrr_is_capable(&connector->base))
+ if (!intel_vrr_is_capable(connector))
return;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
index 1c2da572693d..9fda1135b0dd 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -8,15 +8,15 @@
#include <linux/types.h>
-struct drm_connector;
struct drm_connector_state;
struct intel_atomic_state;
+struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
struct intel_dp;
struct intel_encoder;
-bool intel_vrr_is_capable(struct drm_connector *connector);
+bool intel_vrr_is_capable(struct intel_connector *connector);
void intel_vrr_check_modeset(struct intel_atomic_state *state);
void intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index caa03324a733..c11e15a93164 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 1954f07f0d3e..b9b1fed99874 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -782,6 +782,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum port port;
@@ -838,7 +839,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
* the delay in that case. If there is no deassert-seq, then an
* unconditional msleep is used to give the panel time to power-on.
*/
- if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
+ if (connector->panel.vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
} else {
@@ -1690,7 +1691,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ struct intel_connector *connector = intel_dsi->attached_connector;
+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
u32 tlpx_ns, extra_byte_count, tlpx_ui;
u32 ui_num, ui_den;
u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
@@ -1924,13 +1926,15 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_dsi->panel_power_off_time = ktime_get_boottime();
- if (dev_priv->vbt.dsi.config->dual_link)
+ intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL, NULL);
+
+ if (intel_connector->panel.vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
else
intel_dsi->ports = BIT(port);
- intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
- intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
+ intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
+ intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
/* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 321af109d484..dabdfe09f5e5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1368,7 +1368,8 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
return engine;
}
-static void kill_engines(struct i915_gem_engines *engines, bool ban)
+static void
+kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent)
{
struct i915_gem_engines_iter it;
struct intel_context *ce;
@@ -1382,9 +1383,15 @@ static void kill_engines(struct i915_gem_engines *engines, bool ban)
*/
for_each_gem_engine(ce, engines, it) {
struct intel_engine_cs *engine;
+ bool skip = false;
- if (ban && intel_context_ban(ce, NULL))
- continue;
+ if (exit)
+ skip = intel_context_set_exiting(ce);
+ else if (!persistent)
+ skip = intel_context_exit_nonpersistent(ce, NULL);
+
+ if (skip)
+ continue; /* Already marked. */
/*
* Check the current active state of this context; if we
@@ -1396,7 +1403,7 @@ static void kill_engines(struct i915_gem_engines *engines, bool ban)
engine = active_engine(ce);
/* First attempt to gracefully cancel the context */
- if (engine && !__cancel_engine(engine) && ban)
+ if (engine && !__cancel_engine(engine) && (exit || !persistent))
/*
* If we are unable to send a preemptive pulse to bump
* the context from the GPU, we have to resort to a full
@@ -1408,8 +1415,6 @@ static void kill_engines(struct i915_gem_engines *engines, bool ban)
static void kill_context(struct i915_gem_context *ctx)
{
- bool ban = (!i915_gem_context_is_persistent(ctx) ||
- !ctx->i915->params.enable_hangcheck);
struct i915_gem_engines *pos, *next;
spin_lock_irq(&ctx->stale.lock);
@@ -1422,7 +1427,8 @@ static void kill_context(struct i915_gem_context *ctx)
spin_unlock_irq(&ctx->stale.lock);
- kill_engines(pos, ban);
+ kill_engines(pos, !ctx->i915->params.enable_hangcheck,
+ i915_gem_context_is_persistent(ctx));
spin_lock_irq(&ctx->stale.lock);
GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
@@ -1468,7 +1474,8 @@ static void engines_idle_release(struct i915_gem_context *ctx,
kill:
if (list_empty(&engines->link)) /* raced, already closed */
- kill_engines(engines, true);
+ kill_engines(engines, true,
+ i915_gem_context_is_persistent(ctx));
i915_sw_fence_commit(&engines->fence);
}
@@ -1876,6 +1883,7 @@ i915_gem_user_to_context_sseu(struct intel_gt *gt,
{
const struct sseu_dev_info *device = &gt->info.sseu;
struct drm_i915_private *i915 = gt->i915;
+ unsigned int dev_subslice_mask = intel_sseu_get_hsw_subslices(device, 0);
/* No zeros in any field. */
if (!user->slice_mask || !user->subslice_mask ||
@@ -1902,7 +1910,7 @@ i915_gem_user_to_context_sseu(struct intel_gt *gt,
if (user->slice_mask & ~device->slice_mask)
return -EINVAL;
- if (user->subslice_mask & ~device->subslice_mask[0])
+ if (user->subslice_mask & ~dev_subslice_mask)
return -EINVAL;
if (user->max_eus_per_subslice > device->max_eus_per_subslice)
@@ -1916,7 +1924,7 @@ i915_gem_user_to_context_sseu(struct intel_gt *gt,
/* Part specific restrictions. */
if (GRAPHICS_VER(i915) == 11) {
unsigned int hw_s = hweight8(device->slice_mask);
- unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
+ unsigned int hw_ss_per_s = hweight8(dev_subslice_mask);
unsigned int req_s = hweight8(context->slice_mask);
unsigned int req_ss = hweight8(context->subslice_mask);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index 5802692ea604..33673fe7ee0a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -241,6 +241,7 @@ struct create_ext {
struct drm_i915_private *i915;
struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
unsigned int n_placements;
+ unsigned int placement_mask;
unsigned long flags;
};
@@ -337,6 +338,7 @@ static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args,
for (i = 0; i < args->num_regions; i++)
ext_data->placements[i] = placements[i];
+ ext_data->placement_mask = mask;
return 0;
out_dump:
@@ -411,7 +413,7 @@ i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int ret;
- if (args->flags)
+ if (args->flags & ~I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS)
return -EINVAL;
ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
@@ -427,6 +429,22 @@ i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
ext_data.n_placements = 1;
}
+ if (args->flags & I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) {
+ if (ext_data.n_placements == 1)
+ return -EINVAL;
+
+ /*
+ * We always need to be able to spill to system memory, if we
+ * can't place in the mappable part of LMEM.
+ */
+ if (!(ext_data.placement_mask & BIT(INTEL_REGION_SMEM)))
+ return -EINVAL;
+ } else {
+ if (ext_data.n_placements > 1 ||
+ ext_data.placements[0]->type != INTEL_MEMORY_SYSTEM)
+ ext_data.flags |= I915_BO_ALLOC_GPU_ONLY;
+ }
+
obj = __i915_gem_object_create_user_ext(i915, args->size,
ext_data.placements,
ext_data.n_placements,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 30fe847c6664..b7b2c14fd9e1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1951,7 +1951,7 @@ eb_find_first_request_added(struct i915_execbuffer *eb)
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
/* Stage with GFP_KERNEL allocations before we enter the signaling critical path */
-static void eb_capture_stage(struct i915_execbuffer *eb)
+static int eb_capture_stage(struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
unsigned int i = count, j;
@@ -1964,6 +1964,10 @@ static void eb_capture_stage(struct i915_execbuffer *eb)
if (!(flags & EXEC_OBJECT_CAPTURE))
continue;
+ if (i915_gem_context_is_recoverable(eb->gem_context) &&
+ (IS_DGFX(eb->i915) || GRAPHICS_VER_FULL(eb->i915) > IP_VER(12, 0)))
+ return -EINVAL;
+
for_each_batch_create_order(eb, j) {
struct i915_capture_list *capture;
@@ -1976,6 +1980,8 @@ static void eb_capture_stage(struct i915_execbuffer *eb)
eb->capture_lists[j] = capture;
}
}
+
+ return 0;
}
/* Commit once we're in the critical path */
@@ -2017,8 +2023,9 @@ static void eb_capture_list_clear(struct i915_execbuffer *eb)
#else
-static void eb_capture_stage(struct i915_execbuffer *eb)
+static int eb_capture_stage(struct i915_execbuffer *eb)
{
+ return 0;
}
static void eb_capture_commit(struct i915_execbuffer *eb)
@@ -3410,7 +3417,9 @@ i915_gem_do_execbuffer(struct drm_device *dev,
}
ww_acquire_done(&eb.ww.ctx);
- eb_capture_stage(&eb);
+ err = eb_capture_stage(&eb);
+ if (err)
+ goto err_vma;
out_fence = eb_requests_create(&eb, in_fence, out_fence_fd);
if (IS_ERR(out_fence)) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 06b1b188ce5a..ccec4055fde3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -717,6 +717,32 @@ bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
return false;
}
+/**
+ * i915_gem_object_needs_ccs_pages - Check whether the object requires extra
+ * pages when placed in system-memory, in order to save and later restore the
+ * flat-CCS aux state when the object is moved between local-memory and
+ * system-memory
+ * @obj: Pointer to the object
+ *
+ * Return: True if the object needs extra ccs pages. False otherwise.
+ */
+bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
+{
+ bool lmem_placement = false;
+ int i;
+
+ for (i = 0; i < obj->mm.n_placements; i++) {
+ /* Compression is not allowed for the objects with smem placement */
+ if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
+ return false;
+ if (!lmem_placement &&
+ obj->mm.placements[i]->type == INTEL_MEMORY_LOCAL)
+ lmem_placement = true;
+ }
+
+ return lmem_placement;
+}
+
void i915_gem_init__objects(struct drm_i915_private *i915)
{
INIT_DELAYED_WORK(&i915->mm.free_work, __i915_gem_free_work);
@@ -783,10 +809,31 @@ int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
intr, MAX_SCHEDULE_TIMEOUT);
if (!ret)
ret = -ETIME;
+ else if (ret > 0 && i915_gem_object_has_unknown_state(obj))
+ ret = -EIO;
return ret < 0 ? ret : 0;
}
+/**
+ * i915_gem_object_has_unknown_state - Return true if the object backing pages are
+ * in an unknown_state. This means that userspace must NEVER be allowed to touch
+ * the pages, with either the GPU or CPU.
+ *
+ * ONLY valid to be called after ensuring that all kernel fences have signalled
+ * (in particular the fence for moving/clearing the object).
+ */
+bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj)
+{
+ /*
+ * The below barrier pairs with the dma_fence_signal() in
+ * __memcpy_work(). We should only sample the unknown_state after all
+ * the kernel fences have signalled.
+ */
+ smp_rmb();
+ return obj->mm.unknown_state;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/huge_gem_object.c"
#include "selftests/huge_pages.c"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index e11d82a9f7c3..6f0a3ce35567 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -524,6 +524,7 @@ int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
struct dma_fence **fence);
int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
bool intr);
+bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj);
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
unsigned int cache_level);
@@ -617,6 +618,8 @@ int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
enum intel_memory_type type);
+bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj);
+
int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
size_t size, struct intel_memory_region *mr,
struct address_space *mapping,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 2c88bdb8ff7c..5cf36a130061 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -548,6 +548,24 @@ struct drm_i915_gem_object {
bool ttm_shrinkable;
/**
+ * @unknown_state: Indicate that the object is effectively
+ * borked. This is write-once and set if we somehow encounter a
+ * fatal error when moving/clearing the pages, and we are not
+ * able to fallback to memcpy/memset, like on small-BAR systems.
+ * The GPU should also be wedged (or in the process) at this
+ * point.
+ *
+ * Only valid to read this after acquiring the dma-resv lock and
+ * waiting for all DMA_RESV_USAGE_KERNEL fences to be signalled,
+ * or if we otherwise know that the moving fence has signalled,
+ * and we are certain the pages underneath are valid for
+ * immediate access (under normal operation), like just prior to
+ * binding the object or when setting up the CPU fault handler.
+ * See i915_gem_object_has_unknown_state();
+ */
+ bool unknown_state;
+
+ /**
* Priority list of potential placements for this object.
*/
struct intel_memory_region **placements;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 2e16e91a5a56..4eed3dd90ba8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -670,17 +670,10 @@ fail:
static int init_shmem(struct intel_memory_region *mem)
{
- int err;
-
- err = i915_gemfs_init(mem->i915);
- if (err) {
- DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n",
- err);
- }
-
+ i915_gemfs_init(mem->i915);
intel_memory_region_set_name(mem, "system");
- return 0; /* Don't error, we can simply fallback to the kernel mnt */
+ return 0; /* We have fallback to the kernel mnt if gemfs init failed. */
}
static int release_shmem(struct intel_memory_region *mem)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 6a6ff98a8746..1030053571a2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -36,7 +36,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
}
-static int drop_pages(struct drm_i915_gem_object *obj,
+static bool drop_pages(struct drm_i915_gem_object *obj,
unsigned long shrink, bool trylock_vm)
{
unsigned long flags;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 47b5e0e342ab..166d0a4b9e8c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -13,6 +13,8 @@
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_mcr.h"
+#include "gt/intel_gt_regs.h"
#include "gt/intel_region_lmem.h"
#include "i915_drv.h"
#include "i915_gem_stolen.h"
@@ -834,8 +836,8 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
} else {
resource_size_t lmem_range;
- lmem_range = intel_gt_read_register(&i915->gt0, XEHPSDV_TILE0_ADDR_RANGE) & 0xFFFF;
- lmem_size = lmem_range >> XEHPSDV_TILE_LMEM_RANGE_SHIFT;
+ lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
+ lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
lmem_size *= SZ_1G;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 80ac0db1ae8c..85518b28cd72 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -114,7 +114,7 @@ u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size,
return i915_gem_fence_size(i915, size, tiling, stride);
}
-/* Check pitch constriants for all chips & tiling formats */
+/* Check pitch constraints for all chips & tiling formats */
static bool
i915_tiling_ok(struct drm_i915_gem_object *obj,
unsigned int tiling, unsigned int stride)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 8f1bb6a4b7d1..f131dc065f47 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -266,24 +266,6 @@ static const struct i915_refct_sgt_ops tt_rsgt_ops = {
.release = i915_ttm_tt_release
};
-static inline bool
-i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
-{
- bool lmem_placement = false;
- int i;
-
- for (i = 0; i < obj->mm.n_placements; i++) {
- /* Compression is not allowed for the objects with smem placement */
- if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
- return false;
- if (!lmem_placement &&
- obj->mm.placements[i]->type == INTEL_MEMORY_LOCAL)
- lmem_placement = true;
- }
-
- return lmem_placement;
-}
-
static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
@@ -682,7 +664,15 @@ static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
i915_ttm_purge(obj);
}
-static bool i915_ttm_resource_mappable(struct ttm_resource *res)
+/**
+ * i915_ttm_resource_mappable - Return true if the ttm resource is CPU
+ * accessible.
+ * @res: The TTM resource to check.
+ *
+ * This is interesting on small-BAR systems where we may encounter lmem objects
+ * that can't be accessed via the CPU.
+ */
+bool i915_ttm_resource_mappable(struct ttm_resource *res)
{
struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
@@ -694,6 +684,22 @@ static bool i915_ttm_resource_mappable(struct ttm_resource *res)
static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(mem->bo);
+ bool unknown_state;
+
+ if (!obj)
+ return -EINVAL;
+
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ return -EINVAL;
+
+ assert_object_held(obj);
+
+ unknown_state = i915_gem_object_has_unknown_state(obj);
+ i915_gem_object_put(obj);
+ if (unknown_state)
+ return -EINVAL;
+
if (!i915_ttm_cpu_maps_iomem(mem))
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index 73e371aa3850..e4842b4296fc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -92,4 +92,7 @@ static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem)
/* Once / if we support GGTT, this is also false for cached ttm_tts */
return mem->mem_type != I915_PL_SYSTEM;
}
+
+bool i915_ttm_resource_mappable(struct ttm_resource *res);
+
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index a10716f4e717..9a7e50534b84 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -33,6 +33,7 @@
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
static bool fail_gpu_migration;
static bool fail_work_allocation;
+static bool ban_memcpy;
void i915_ttm_migrate_set_failure_modes(bool gpu_migration,
bool work_allocation)
@@ -40,6 +41,11 @@ void i915_ttm_migrate_set_failure_modes(bool gpu_migration,
fail_gpu_migration = gpu_migration;
fail_work_allocation = work_allocation;
}
+
+void i915_ttm_migrate_set_ban_memcpy(bool ban)
+{
+ ban_memcpy = ban;
+}
#endif
static enum i915_cache_level
@@ -258,15 +264,23 @@ struct i915_ttm_memcpy_arg {
* from the callback for lockdep reasons.
* @cb: Callback for the accelerated migration fence.
* @arg: The argument for the memcpy functionality.
+ * @i915: The i915 pointer.
+ * @obj: The GEM object.
+ * @memcpy_allowed: Instead of processing the @arg, and falling back to memcpy
+ * or memset, we wedge the device and set the @obj unknown_state, to prevent
+ * further access to the object with the CPU or GPU. On some devices we might
+ * only be permitted to use the blitter engine for such operations.
*/
struct i915_ttm_memcpy_work {
struct dma_fence fence;
struct work_struct work;
- /* The fence lock */
spinlock_t lock;
struct irq_work irq_work;
struct dma_fence_cb cb;
struct i915_ttm_memcpy_arg arg;
+ struct drm_i915_private *i915;
+ struct drm_i915_gem_object *obj;
+ bool memcpy_allowed;
};
static void i915_ttm_move_memcpy(struct i915_ttm_memcpy_arg *arg)
@@ -317,14 +331,42 @@ static void __memcpy_work(struct work_struct *work)
struct i915_ttm_memcpy_work *copy_work =
container_of(work, typeof(*copy_work), work);
struct i915_ttm_memcpy_arg *arg = &copy_work->arg;
- bool cookie = dma_fence_begin_signalling();
+ bool cookie;
+
+ /*
+ * FIXME: We need to take a closer look here. We should be able to plonk
+ * this into the fence critical section.
+ */
+ if (!copy_work->memcpy_allowed) {
+ struct intel_gt *gt;
+ unsigned int id;
+
+ for_each_gt(gt, copy_work->i915, id)
+ intel_gt_set_wedged(gt);
+ }
+
+ cookie = dma_fence_begin_signalling();
+
+ if (copy_work->memcpy_allowed) {
+ i915_ttm_move_memcpy(arg);
+ } else {
+ /*
+ * Prevent further use of the object. Any future GTT binding or
+ * CPU access is not allowed once we signal the fence. Outside
+ * of the fence critical section, we then also then wedge the gpu
+ * to indicate the device is not functional.
+ *
+ * The below dma_fence_signal() is our write-memory-barrier.
+ */
+ copy_work->obj->mm.unknown_state = true;
+ }
- i915_ttm_move_memcpy(arg);
dma_fence_end_signalling(cookie);
dma_fence_signal(&copy_work->fence);
i915_ttm_memcpy_release(arg);
+ i915_gem_object_put(copy_work->obj);
dma_fence_put(&copy_work->fence);
}
@@ -336,6 +378,7 @@ static void __memcpy_irq_work(struct irq_work *irq_work)
dma_fence_signal(&copy_work->fence);
i915_ttm_memcpy_release(arg);
+ i915_gem_object_put(copy_work->obj);
dma_fence_put(&copy_work->fence);
}
@@ -389,6 +432,19 @@ i915_ttm_memcpy_work_arm(struct i915_ttm_memcpy_work *work,
return &work->fence;
}
+static bool i915_ttm_memcpy_allowed(struct ttm_buffer_object *bo,
+ struct ttm_resource *dst_mem)
+{
+ if (i915_gem_object_needs_ccs_pages(i915_ttm_to_gem(bo)))
+ return false;
+
+ if (!(i915_ttm_resource_mappable(bo->resource) &&
+ i915_ttm_resource_mappable(dst_mem)))
+ return false;
+
+ return I915_SELFTEST_ONLY(ban_memcpy) ? false : true;
+}
+
static struct dma_fence *
__i915_ttm_move(struct ttm_buffer_object *bo,
const struct ttm_operation_ctx *ctx, bool clear,
@@ -396,6 +452,9 @@ __i915_ttm_move(struct ttm_buffer_object *bo,
struct i915_refct_sgt *dst_rsgt, bool allow_accel,
const struct i915_deps *move_deps)
{
+ const bool memcpy_allowed = i915_ttm_memcpy_allowed(bo, dst_mem);
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ struct drm_i915_private *i915 = to_i915(bo->base.dev);
struct i915_ttm_memcpy_work *copy_work = NULL;
struct i915_ttm_memcpy_arg _arg, *arg = &_arg;
struct dma_fence *fence = ERR_PTR(-EINVAL);
@@ -423,9 +482,14 @@ __i915_ttm_move(struct ttm_buffer_object *bo,
copy_work = kzalloc(sizeof(*copy_work), GFP_KERNEL);
if (copy_work) {
+ copy_work->i915 = i915;
+ copy_work->memcpy_allowed = memcpy_allowed;
+ copy_work->obj = i915_gem_object_get(obj);
arg = &copy_work->arg;
- i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm,
- dst_rsgt);
+ if (memcpy_allowed)
+ i915_ttm_memcpy_init(arg, bo, clear, dst_mem,
+ dst_ttm, dst_rsgt);
+
fence = i915_ttm_memcpy_work_arm(copy_work, dep);
} else {
dma_fence_wait(dep, false);
@@ -450,17 +514,23 @@ __i915_ttm_move(struct ttm_buffer_object *bo,
}
/* Error intercept failed or no accelerated migration to start with */
- if (!copy_work)
- i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm,
- dst_rsgt);
- i915_ttm_move_memcpy(arg);
- i915_ttm_memcpy_release(arg);
+
+ if (memcpy_allowed) {
+ if (!copy_work)
+ i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm,
+ dst_rsgt);
+ i915_ttm_move_memcpy(arg);
+ i915_ttm_memcpy_release(arg);
+ }
+ if (copy_work)
+ i915_gem_object_put(copy_work->obj);
kfree(copy_work);
- return NULL;
+ return memcpy_allowed ? NULL : ERR_PTR(-EIO);
out:
if (!fence && copy_work) {
i915_ttm_memcpy_release(arg);
+ i915_gem_object_put(copy_work->obj);
kfree(copy_work);
}
@@ -539,8 +609,11 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
}
if (migration_fence) {
- ret = ttm_bo_move_accel_cleanup(bo, migration_fence, evict,
- true, dst_mem);
+ if (I915_SELFTEST_ONLY(evict && fail_gpu_migration))
+ ret = -EIO; /* never feed non-migrate fences into ttm */
+ else
+ ret = ttm_bo_move_accel_cleanup(bo, migration_fence, evict,
+ true, dst_mem);
if (ret) {
dma_fence_wait(migration_fence, false);
ttm_bo_move_sync_cleanup(bo, dst_mem);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h
index d2e7f149e05c..8a5d5ab0cc34 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h
@@ -22,6 +22,7 @@ int i915_ttm_move_notify(struct ttm_buffer_object *bo);
I915_SELFTEST_DECLARE(void i915_ttm_migrate_set_failure_modes(bool gpu_migration,
bool work_allocation));
+I915_SELFTEST_DECLARE(void i915_ttm_migrate_set_ban_memcpy(bool ban));
int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
struct drm_i915_gem_object *src,
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index ee87874e59dc..46b9a17d6abc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -11,16 +11,11 @@
#include "i915_gemfs.h"
#include "i915_utils.h"
-int i915_gemfs_init(struct drm_i915_private *i915)
+void i915_gemfs_init(struct drm_i915_private *i915)
{
char huge_opt[] = "huge=within_size"; /* r/w */
struct file_system_type *type;
struct vfsmount *gemfs;
- char *opts;
-
- type = get_fs_type("tmpfs");
- if (!type)
- return -ENODEV;
/*
* By creating our own shmemfs mountpoint, we can pass in
@@ -28,30 +23,35 @@ int i915_gemfs_init(struct drm_i915_private *i915)
*
* One example, although it is probably better with a per-file
* control, is selecting huge page allocations ("huge=within_size").
- * However, we only do so to offset the overhead of iommu lookups
- * due to bandwidth issues (slow reads) on Broadwell+.
+ * However, we only do so on platforms which benefit from it, or to
+ * offset the overhead of iommu lookups, where with latter it is a net
+ * win even on platforms which would otherwise see some performance
+ * regressions such a slow reads issue on Broadwell and Skylake.
*/
- opts = NULL;
- if (i915_vtd_active(i915)) {
- if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
- opts = huge_opt;
- drm_info(&i915->drm,
- "Transparent Hugepage mode '%s'\n",
- opts);
- } else {
- drm_notice(&i915->drm,
- "Transparent Hugepage support is recommended for optimal performance when IOMMU is enabled!\n");
- }
- }
-
- gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, opts);
+ if (GRAPHICS_VER(i915) < 11 && !i915_vtd_active(i915))
+ return;
+
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ goto err;
+
+ type = get_fs_type("tmpfs");
+ if (!type)
+ goto err;
+
+ gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, huge_opt);
if (IS_ERR(gemfs))
- return PTR_ERR(gemfs);
+ goto err;
i915->mm.gemfs = gemfs;
-
- return 0;
+ drm_info(&i915->drm, "Using Transparent Hugepages\n");
+ return;
+
+err:
+ drm_notice(&i915->drm,
+ "Transparent Hugepage support is recommended for optimal performance%s\n",
+ GRAPHICS_VER(i915) >= 11 ? " on this platform!" :
+ " when IOMMU is enabled!");
}
void i915_gemfs_fini(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.h b/drivers/gpu/drm/i915/gem/i915_gemfs.h
index 2a1e59af3e4a..5d835e44c4f6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.h
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.h
@@ -9,8 +9,7 @@
struct drm_i915_private;
-int i915_gemfs_init(struct drm_i915_private *i915);
-
+void i915_gemfs_init(struct drm_i915_private *i915);
void i915_gemfs_fini(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index ef15967be51a..72ce2c9f42fd 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1623,6 +1623,7 @@ static int igt_shrink_thp(void *arg)
struct file *file;
unsigned int flags = PIN_USER;
unsigned int n;
+ intel_wakeref_t wf;
bool should_swap;
int err;
@@ -1659,9 +1660,11 @@ static int igt_shrink_thp(void *arg)
goto out_put;
}
+ wf = intel_runtime_pm_get(&i915->runtime_pm); /* active shrink */
+
err = i915_vma_pin(vma, 0, 0, flags);
if (err)
- goto out_put;
+ goto out_wf;
if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
pr_info("failed to allocate THP, finishing test early\n");
@@ -1732,6 +1735,8 @@ static int igt_shrink_thp(void *arg)
out_unpin:
i915_vma_unpin(vma);
+out_wf:
+ intel_runtime_pm_put(&i915->runtime_pm, wf);
out_put:
i915_gem_object_put(obj);
out_vm:
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index ddd0772fd828..3cfc621ef363 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -6,6 +6,7 @@
#include "i915_selftest.h"
#include "gt/intel_context.h"
+#include "gt/intel_engine_regs.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
@@ -18,10 +19,71 @@
#include "huge_gem_object.h"
#include "mock_context.h"
+#define OW_SIZE 16 /* in bytes */
+#define F_SUBTILE_SIZE 64 /* in bytes */
+#define F_TILE_WIDTH 128 /* in bytes */
+#define F_TILE_HEIGHT 32 /* in pixels */
+#define F_SUBTILE_WIDTH OW_SIZE /* in bytes */
+#define F_SUBTILE_HEIGHT 4 /* in pixels */
+
+static int linear_x_y_to_ftiled_pos(int x, int y, u32 stride, int bpp)
+{
+ int tile_base;
+ int tile_x, tile_y;
+ int swizzle, subtile;
+ int pixel_size = bpp / 8;
+ int pos;
+
+ /*
+ * Subtile remapping for F tile. Note that map[a]==b implies map[b]==a
+ * so we can use the same table to tile and until.
+ */
+ static const u8 f_subtile_map[] = {
+ 0, 1, 2, 3, 8, 9, 10, 11,
+ 4, 5, 6, 7, 12, 13, 14, 15,
+ 16, 17, 18, 19, 24, 25, 26, 27,
+ 20, 21, 22, 23, 28, 29, 30, 31,
+ 32, 33, 34, 35, 40, 41, 42, 43,
+ 36, 37, 38, 39, 44, 45, 46, 47,
+ 48, 49, 50, 51, 56, 57, 58, 59,
+ 52, 53, 54, 55, 60, 61, 62, 63
+ };
+
+ x *= pixel_size;
+ /*
+ * Where does the 4k tile start (in bytes)? This is the same for Y and
+ * F so we can use the Y-tile algorithm to get to that point.
+ */
+ tile_base =
+ y / F_TILE_HEIGHT * stride * F_TILE_HEIGHT +
+ x / F_TILE_WIDTH * 4096;
+
+ /* Find pixel within tile */
+ tile_x = x % F_TILE_WIDTH;
+ tile_y = y % F_TILE_HEIGHT;
+
+ /* And figure out the subtile within the 4k tile */
+ subtile = tile_y / F_SUBTILE_HEIGHT * 8 + tile_x / F_SUBTILE_WIDTH;
+
+ /* Swizzle the subtile number according to the bspec diagram */
+ swizzle = f_subtile_map[subtile];
+
+ /* Calculate new position */
+ pos = tile_base +
+ swizzle * F_SUBTILE_SIZE +
+ tile_y % F_SUBTILE_HEIGHT * OW_SIZE +
+ tile_x % F_SUBTILE_WIDTH;
+
+ GEM_BUG_ON(!IS_ALIGNED(pos, pixel_size));
+
+ return pos / pixel_size * 4;
+}
+
enum client_tiling {
CLIENT_TILING_LINEAR,
CLIENT_TILING_X,
CLIENT_TILING_Y,
+ CLIENT_TILING_4,
CLIENT_NUM_TILING_TYPES
};
@@ -45,6 +107,36 @@ struct tiled_blits {
u32 height;
};
+static bool supports_x_tiling(const struct drm_i915_private *i915)
+{
+ int gen = GRAPHICS_VER(i915);
+
+ if (gen < 12)
+ return true;
+
+ if (!HAS_LMEM(i915) || IS_DG1(i915))
+ return false;
+
+ return true;
+}
+
+static bool fast_blit_ok(const struct blit_buffer *buf)
+{
+ int gen = GRAPHICS_VER(buf->vma->vm->i915);
+
+ if (gen < 9)
+ return false;
+
+ if (gen < 12)
+ return true;
+
+ /* filter out platforms with unsupported X-tile support in fastblit */
+ if (buf->tiling == CLIENT_TILING_X && !supports_x_tiling(buf->vma->vm->i915))
+ return false;
+
+ return true;
+}
+
static int prepare_blit(const struct tiled_blits *t,
struct blit_buffer *dst,
struct blit_buffer *src,
@@ -59,51 +151,103 @@ static int prepare_blit(const struct tiled_blits *t,
if (IS_ERR(cs))
return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(BCS_SWCTRL);
- cmd = (BCS_SRC_Y | BCS_DST_Y) << 16;
- if (src->tiling == CLIENT_TILING_Y)
- cmd |= BCS_SRC_Y;
- if (dst->tiling == CLIENT_TILING_Y)
- cmd |= BCS_DST_Y;
- *cs++ = cmd;
-
- cmd = MI_FLUSH_DW;
- if (ver >= 8)
- cmd++;
- *cs++ = cmd;
- *cs++ = 0;
- *cs++ = 0;
- *cs++ = 0;
-
- cmd = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (8 - 2);
- if (ver >= 8)
- cmd += 2;
-
- src_pitch = t->width * 4;
- if (src->tiling) {
- cmd |= XY_SRC_COPY_BLT_SRC_TILED;
- src_pitch /= 4;
- }
+ if (fast_blit_ok(dst) && fast_blit_ok(src)) {
+ struct intel_gt *gt = t->ce->engine->gt;
+ u32 src_tiles = 0, dst_tiles = 0;
+ u32 src_4t = 0, dst_4t = 0;
+
+ /* Need to program BLIT_CCTL if it is not done previously
+ * before using XY_FAST_COPY_BLT
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(BLIT_CCTL(t->ce->engine->mmio_base));
+ *cs++ = (BLIT_CCTL_SRC_MOCS(gt->mocs.uc_index) |
+ BLIT_CCTL_DST_MOCS(gt->mocs.uc_index));
+
+ src_pitch = t->width; /* in dwords */
+ if (src->tiling == CLIENT_TILING_4) {
+ src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR);
+ src_4t = XY_FAST_COPY_BLT_D1_SRC_TILE4;
+ } else if (src->tiling == CLIENT_TILING_Y) {
+ src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR);
+ } else if (src->tiling == CLIENT_TILING_X) {
+ src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(TILE_X);
+ } else {
+ src_pitch *= 4; /* in bytes */
+ }
- dst_pitch = t->width * 4;
- if (dst->tiling) {
- cmd |= XY_SRC_COPY_BLT_DST_TILED;
- dst_pitch /= 4;
- }
+ dst_pitch = t->width; /* in dwords */
+ if (dst->tiling == CLIENT_TILING_4) {
+ dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR);
+ dst_4t = XY_FAST_COPY_BLT_D1_DST_TILE4;
+ } else if (dst->tiling == CLIENT_TILING_Y) {
+ dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR);
+ } else if (dst->tiling == CLIENT_TILING_X) {
+ dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(TILE_X);
+ } else {
+ dst_pitch *= 4; /* in bytes */
+ }
- *cs++ = cmd;
- *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | dst_pitch;
- *cs++ = 0;
- *cs++ = t->height << 16 | t->width;
- *cs++ = lower_32_bits(dst->vma->node.start);
- if (use_64b_reloc)
+ *cs++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2) |
+ src_tiles | dst_tiles;
+ *cs++ = src_4t | dst_4t | BLT_DEPTH_32 | dst_pitch;
+ *cs++ = 0;
+ *cs++ = t->height << 16 | t->width;
+ *cs++ = lower_32_bits(dst->vma->node.start);
*cs++ = upper_32_bits(dst->vma->node.start);
- *cs++ = 0;
- *cs++ = src_pitch;
- *cs++ = lower_32_bits(src->vma->node.start);
- if (use_64b_reloc)
+ *cs++ = 0;
+ *cs++ = src_pitch;
+ *cs++ = lower_32_bits(src->vma->node.start);
*cs++ = upper_32_bits(src->vma->node.start);
+ } else {
+ if (ver >= 6) {
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(BCS_SWCTRL);
+ cmd = (BCS_SRC_Y | BCS_DST_Y) << 16;
+ if (src->tiling == CLIENT_TILING_Y)
+ cmd |= BCS_SRC_Y;
+ if (dst->tiling == CLIENT_TILING_Y)
+ cmd |= BCS_DST_Y;
+ *cs++ = cmd;
+
+ cmd = MI_FLUSH_DW;
+ if (ver >= 8)
+ cmd++;
+ *cs++ = cmd;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ }
+
+ cmd = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (8 - 2);
+ if (ver >= 8)
+ cmd += 2;
+
+ src_pitch = t->width * 4;
+ if (src->tiling) {
+ cmd |= XY_SRC_COPY_BLT_SRC_TILED;
+ src_pitch /= 4;
+ }
+
+ dst_pitch = t->width * 4;
+ if (dst->tiling) {
+ cmd |= XY_SRC_COPY_BLT_DST_TILED;
+ dst_pitch /= 4;
+ }
+
+ *cs++ = cmd;
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | dst_pitch;
+ *cs++ = 0;
+ *cs++ = t->height << 16 | t->width;
+ *cs++ = lower_32_bits(dst->vma->node.start);
+ if (use_64b_reloc)
+ *cs++ = upper_32_bits(dst->vma->node.start);
+ *cs++ = 0;
+ *cs++ = src_pitch;
+ *cs++ = lower_32_bits(src->vma->node.start);
+ if (use_64b_reloc)
+ *cs++ = upper_32_bits(src->vma->node.start);
+ }
*cs++ = MI_BATCH_BUFFER_END;
@@ -181,7 +325,13 @@ static int tiled_blits_create_buffers(struct tiled_blits *t,
t->buffers[i].vma = vma;
t->buffers[i].tiling =
- i915_prandom_u32_max_state(CLIENT_TILING_Y + 1, prng);
+ i915_prandom_u32_max_state(CLIENT_NUM_TILING_TYPES, prng);
+
+ /* Platforms support either TileY or Tile4, not both */
+ if (HAS_4TILE(i915) && t->buffers[i].tiling == CLIENT_TILING_Y)
+ t->buffers[i].tiling = CLIENT_TILING_4;
+ else if (!HAS_4TILE(i915) && t->buffers[i].tiling == CLIENT_TILING_4)
+ t->buffers[i].tiling = CLIENT_TILING_Y;
}
return 0;
@@ -206,7 +356,8 @@ static u64 swizzle_bit(unsigned int bit, u64 offset)
static u64 tiled_offset(const struct intel_gt *gt,
u64 v,
unsigned int stride,
- enum client_tiling tiling)
+ enum client_tiling tiling,
+ int x_pos, int y_pos)
{
unsigned int swizzle;
u64 x, y;
@@ -216,7 +367,12 @@ static u64 tiled_offset(const struct intel_gt *gt,
y = div64_u64_rem(v, stride, &x);
- if (tiling == CLIENT_TILING_X) {
+ if (tiling == CLIENT_TILING_4) {
+ v = linear_x_y_to_ftiled_pos(x_pos, y_pos, stride, 32);
+
+ /* no swizzling for f-tiling */
+ swizzle = I915_BIT_6_SWIZZLE_NONE;
+ } else if (tiling == CLIENT_TILING_X) {
v = div64_u64_rem(y, 8, &y) * stride * 8;
v += y * 512;
v += div64_u64_rem(x, 512, &x) << 12;
@@ -259,6 +415,7 @@ static const char *repr_tiling(enum client_tiling tiling)
case CLIENT_TILING_LINEAR: return "linear";
case CLIENT_TILING_X: return "X";
case CLIENT_TILING_Y: return "Y";
+ case CLIENT_TILING_4: return "F";
default: return "unknown";
}
}
@@ -284,7 +441,7 @@ static int verify_buffer(const struct tiled_blits *t,
} else {
u64 v = tiled_offset(buf->vma->vm->gt,
p * 4, t->width * 4,
- buf->tiling);
+ buf->tiling, x, y);
if (vaddr[v / sizeof(*vaddr)] != buf->start_val + p)
ret = -EINVAL;
@@ -504,6 +661,9 @@ static int tiled_blits_bounce(struct tiled_blits *t, struct rnd_state *prng)
if (err)
return err;
+ /* Simulating GTT eviction of the same buffer / layout */
+ t->buffers[2].tiling = t->buffers[0].tiling;
+
/* Reposition so that we overlap the old addresses, and slightly off */
err = tiled_blit(t,
&t->buffers[2], t->hole + t->align,
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 93a67422ca3b..c6ad67b90e8a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -212,7 +212,7 @@ static int __live_parallel_switch1(void *data)
i915_request_add(rq);
}
- if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ if (i915_request_wait(rq, 0, HZ) < 0)
err = -ETIME;
i915_request_put(rq);
if (err)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
index 801af51aff62..fe6c37fd7859 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
@@ -9,6 +9,7 @@
#include "i915_deps.h"
+#include "selftests/igt_reset.h"
#include "selftests/igt_spinner.h"
static int igt_fill_check_buffer(struct drm_i915_gem_object *obj,
@@ -109,7 +110,8 @@ static int igt_same_create_migrate(void *arg)
static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
struct drm_i915_gem_object *obj,
- struct i915_vma *vma)
+ struct i915_vma *vma,
+ bool silent_migrate)
{
int err;
@@ -138,7 +140,8 @@ static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
if (i915_gem_object_is_lmem(obj)) {
err = i915_gem_object_migrate(obj, ww, INTEL_REGION_SMEM);
if (err) {
- pr_err("Object failed migration to smem\n");
+ if (!silent_migrate)
+ pr_err("Object failed migration to smem\n");
if (err)
return err;
}
@@ -156,7 +159,8 @@ static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
} else {
err = i915_gem_object_migrate(obj, ww, INTEL_REGION_LMEM_0);
if (err) {
- pr_err("Object failed migration to lmem\n");
+ if (!silent_migrate)
+ pr_err("Object failed migration to lmem\n");
if (err)
return err;
}
@@ -179,7 +183,8 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
struct i915_address_space *vm,
struct i915_deps *deps,
struct igt_spinner *spin,
- struct dma_fence *spin_fence)
+ struct dma_fence *spin_fence,
+ bool borked_migrate)
{
struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj;
@@ -242,7 +247,8 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
*/
for (i = 1; i <= 5; ++i) {
for_i915_gem_ww(&ww, err, true)
- err = lmem_pages_migrate_one(&ww, obj, vma);
+ err = lmem_pages_migrate_one(&ww, obj, vma,
+ borked_migrate);
if (err)
goto out_put;
}
@@ -283,23 +289,70 @@ out_put:
static int igt_lmem_pages_failsafe_migrate(void *arg)
{
- int fail_gpu, fail_alloc, ret;
+ int fail_gpu, fail_alloc, ban_memcpy, ret;
struct intel_gt *gt = arg;
for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) {
for (fail_alloc = 0; fail_alloc < 2; ++fail_alloc) {
- pr_info("Simulated failure modes: gpu: %d, alloc: %d\n",
- fail_gpu, fail_alloc);
- i915_ttm_migrate_set_failure_modes(fail_gpu,
- fail_alloc);
- ret = __igt_lmem_pages_migrate(gt, NULL, NULL, NULL, NULL);
- if (ret)
- goto out_err;
+ for (ban_memcpy = 0; ban_memcpy < 2; ++ban_memcpy) {
+ pr_info("Simulated failure modes: gpu: %d, alloc:%d, ban_memcpy: %d\n",
+ fail_gpu, fail_alloc, ban_memcpy);
+ i915_ttm_migrate_set_ban_memcpy(ban_memcpy);
+ i915_ttm_migrate_set_failure_modes(fail_gpu,
+ fail_alloc);
+ ret = __igt_lmem_pages_migrate(gt, NULL, NULL,
+ NULL, NULL,
+ ban_memcpy &&
+ fail_gpu);
+
+ if (ban_memcpy && fail_gpu) {
+ struct intel_gt *__gt;
+ unsigned int id;
+
+ if (ret != -EIO) {
+ pr_err("expected -EIO, got (%d)\n", ret);
+ ret = -EINVAL;
+ } else {
+ ret = 0;
+ }
+
+ for_each_gt(__gt, gt->i915, id) {
+ intel_wakeref_t wakeref;
+ bool wedged;
+
+ mutex_lock(&__gt->reset.mutex);
+ wedged = test_bit(I915_WEDGED, &__gt->reset.flags);
+ mutex_unlock(&__gt->reset.mutex);
+
+ if (fail_gpu && !fail_alloc) {
+ if (!wedged) {
+ pr_err("gt(%u) not wedged\n", id);
+ ret = -EINVAL;
+ continue;
+ }
+ } else if (wedged) {
+ pr_err("gt(%u) incorrectly wedged\n", id);
+ ret = -EINVAL;
+ } else {
+ continue;
+ }
+
+ wakeref = intel_runtime_pm_get(__gt->uncore->rpm);
+ igt_global_reset_lock(__gt);
+ intel_gt_reset(__gt, ALL_ENGINES, NULL);
+ igt_global_reset_unlock(__gt);
+ intel_runtime_pm_put(__gt->uncore->rpm, wakeref);
+ }
+ if (ret)
+ goto out_err;
+ }
+ }
}
}
out_err:
i915_ttm_migrate_set_failure_modes(false, false);
+ i915_ttm_migrate_set_ban_memcpy(false);
return ret;
}
@@ -370,7 +423,7 @@ static int igt_async_migrate(struct intel_gt *gt)
goto out_ce;
err = __igt_lmem_pages_migrate(gt, &ppgtt->vm, &deps, &spin,
- spin_fence);
+ spin_fence, false);
i915_deps_fini(&deps);
dma_fence_put(spin_fence);
if (err)
@@ -394,23 +447,67 @@ out_spin:
#define ASYNC_FAIL_ALLOC 1
static int igt_lmem_async_migrate(void *arg)
{
- int fail_gpu, fail_alloc, ret;
+ int fail_gpu, fail_alloc, ban_memcpy, ret;
struct intel_gt *gt = arg;
for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) {
for (fail_alloc = 0; fail_alloc < ASYNC_FAIL_ALLOC; ++fail_alloc) {
- pr_info("Simulated failure modes: gpu: %d, alloc: %d\n",
- fail_gpu, fail_alloc);
- i915_ttm_migrate_set_failure_modes(fail_gpu,
- fail_alloc);
- ret = igt_async_migrate(gt);
- if (ret)
- goto out_err;
+ for (ban_memcpy = 0; ban_memcpy < 2; ++ban_memcpy) {
+ pr_info("Simulated failure modes: gpu: %d, alloc: %d, ban_memcpy: %d\n",
+ fail_gpu, fail_alloc, ban_memcpy);
+ i915_ttm_migrate_set_ban_memcpy(ban_memcpy);
+ i915_ttm_migrate_set_failure_modes(fail_gpu,
+ fail_alloc);
+ ret = igt_async_migrate(gt);
+
+ if (fail_gpu && ban_memcpy) {
+ struct intel_gt *__gt;
+ unsigned int id;
+
+ if (ret != -EIO) {
+ pr_err("expected -EIO, got (%d)\n", ret);
+ ret = -EINVAL;
+ } else {
+ ret = 0;
+ }
+
+ for_each_gt(__gt, gt->i915, id) {
+ intel_wakeref_t wakeref;
+ bool wedged;
+
+ mutex_lock(&__gt->reset.mutex);
+ wedged = test_bit(I915_WEDGED, &__gt->reset.flags);
+ mutex_unlock(&__gt->reset.mutex);
+
+ if (fail_gpu && !fail_alloc) {
+ if (!wedged) {
+ pr_err("gt(%u) not wedged\n", id);
+ ret = -EINVAL;
+ continue;
+ }
+ } else if (wedged) {
+ pr_err("gt(%u) incorrectly wedged\n", id);
+ ret = -EINVAL;
+ } else {
+ continue;
+ }
+
+ wakeref = intel_runtime_pm_get(__gt->uncore->rpm);
+ igt_global_reset_lock(__gt);
+ intel_gt_reset(__gt, ALL_ENGINES, NULL);
+ igt_global_reset_unlock(__gt);
+ intel_runtime_pm_put(__gt->uncore->rpm, wakeref);
+ }
+ }
+ if (ret)
+ goto out_err;
+ }
}
}
out_err:
i915_ttm_migrate_set_failure_modes(false, false);
+ i915_ttm_migrate_set_ban_memcpy(false);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 5bc93a1ce3e3..3ced9948a331 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -10,6 +10,7 @@
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h"
+#include "gem/i915_gem_ttm_move.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
@@ -21,6 +22,7 @@
#include "i915_selftest.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
+#include "selftests/igt_reset.h"
#include "selftests/igt_mmap.h"
struct tile {
@@ -979,6 +981,9 @@ static int igt_mmap(void *arg)
};
int i;
+ if (mr->private)
+ continue;
+
for (i = 0; i < ARRAY_SIZE(sizes); i++) {
struct drm_i915_gem_object *obj;
int err;
@@ -1160,6 +1165,7 @@ out_unmap:
#define IGT_MMAP_MIGRATE_FILL (1 << 1)
#define IGT_MMAP_MIGRATE_EVICTABLE (1 << 2)
#define IGT_MMAP_MIGRATE_UNFAULTABLE (1 << 3)
+#define IGT_MMAP_MIGRATE_FAIL_GPU (1 << 4)
static int __igt_mmap_migrate(struct intel_memory_region **placements,
int n_placements,
struct intel_memory_region *expected_mr,
@@ -1221,8 +1227,10 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
expand32(POISON_INUSE), &rq);
i915_gem_object_unpin_pages(obj);
if (rq) {
- dma_resv_add_fence(obj->base.resv, &rq->fence,
- DMA_RESV_USAGE_KERNEL);
+ err = dma_resv_reserve_fences(obj->base.resv, 1);
+ if (!err)
+ dma_resv_add_fence(obj->base.resv, &rq->fence,
+ DMA_RESV_USAGE_KERNEL);
i915_request_put(rq);
}
i915_gem_object_unlock(obj);
@@ -1232,13 +1240,62 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
if (flags & IGT_MMAP_MIGRATE_EVICTABLE)
igt_make_evictable(&objects);
+ if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) {
+ err = i915_gem_object_lock(obj, NULL);
+ if (err)
+ goto out_put;
+
+ /*
+ * Ensure we only simulate the gpu failuire when faulting the
+ * pages.
+ */
+ err = i915_gem_object_wait_moving_fence(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err)
+ goto out_put;
+ i915_ttm_migrate_set_failure_modes(true, false);
+ }
+
err = ___igt_mmap_migrate(i915, obj, addr,
flags & IGT_MMAP_MIGRATE_UNFAULTABLE);
+
if (!err && obj->mm.region != expected_mr) {
pr_err("%s region mismatch %s\n", __func__, expected_mr->name);
err = -EINVAL;
}
+ if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) {
+ struct intel_gt *gt;
+ unsigned int id;
+
+ i915_ttm_migrate_set_failure_modes(false, false);
+
+ for_each_gt(gt, i915, id) {
+ intel_wakeref_t wakeref;
+ bool wedged;
+
+ mutex_lock(&gt->reset.mutex);
+ wedged = test_bit(I915_WEDGED, &gt->reset.flags);
+ mutex_unlock(&gt->reset.mutex);
+ if (!wedged) {
+ pr_err("gt(%u) not wedged\n", id);
+ err = -EINVAL;
+ continue;
+ }
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+ igt_global_reset_lock(gt);
+ intel_gt_reset(gt, ALL_ENGINES, NULL);
+ igt_global_reset_unlock(gt);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ }
+
+ if (!i915_gem_object_has_unknown_state(obj)) {
+ pr_err("object missing unknown_state\n");
+ err = -EINVAL;
+ }
+ }
+
out_put:
i915_gem_object_put(obj);
igt_close_objects(i915, &objects);
@@ -1319,6 +1376,23 @@ static int igt_mmap_migrate(void *arg)
IGT_MMAP_MIGRATE_TOPDOWN |
IGT_MMAP_MIGRATE_FILL |
IGT_MMAP_MIGRATE_UNFAULTABLE);
+ if (err)
+ goto out_io_size;
+
+ /*
+ * Allocate in the non-mappable portion, but force migrating to
+ * the mappable portion on fault (LMEM -> LMEM). We then also
+ * simulate a gpu error when moving the pages when faulting the
+ * pages, which should result in wedging the gpu and returning
+ * SIGBUS in the fault handler, since we can't fallback to
+ * memcpy.
+ */
+ err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
+ IGT_MMAP_MIGRATE_TOPDOWN |
+ IGT_MMAP_MIGRATE_FILL |
+ IGT_MMAP_MIGRATE_EVICTABLE |
+ IGT_MMAP_MIGRATE_FAIL_GPU |
+ IGT_MMAP_MIGRATE_UNFAULTABLE);
out_io_size:
mr->io_size = saved_io_size;
i915_ttm_buddy_man_force_visible_size(man,
@@ -1435,6 +1509,9 @@ static int igt_mmap_access(void *arg)
struct drm_i915_gem_object *obj;
int err;
+ if (mr->private)
+ continue;
+
obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
if (obj == ERR_PTR(-ENODEV))
continue;
@@ -1580,6 +1657,9 @@ static int igt_mmap_gpu(void *arg)
struct drm_i915_gem_object *obj;
int err;
+ if (mr->private)
+ continue;
+
obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
if (obj == ERR_PTR(-ENODEV))
continue;
@@ -1727,6 +1807,9 @@ static int igt_mmap_revoke(void *arg)
struct drm_i915_gem_object *obj;
int err;
+ if (mr->private)
+ continue;
+
obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
if (obj == ERR_PTR(-ENODEV))
continue;
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 3e13960615bd..98645797962f 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -197,8 +197,10 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
flags |= PIPE_CONTROL_CS_STALL;
- if (engine->class == COMPUTE_CLASS)
- flags &= ~PIPE_CONTROL_3D_FLAGS;
+ if (!HAS_3D_PIPELINE(engine->i915))
+ flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
+ else if (engine->class == COMPUTE_CLASS)
+ flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -227,8 +229,10 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
flags |= PIPE_CONTROL_CS_STALL;
- if (engine->class == COMPUTE_CLASS)
- flags &= ~PIPE_CONTROL_3D_FLAGS;
+ if (!HAS_3D_PIPELINE(engine->i915))
+ flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
+ else if (engine->class == COMPUTE_CLASS)
+ flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
if (!HAS_FLAT_CCS(rq->engine->i915))
count = 8 + 4;
@@ -272,7 +276,8 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
if (!HAS_FLAT_CCS(rq->engine->i915) &&
(rq->engine->class == VIDEO_DECODE_CLASS ||
rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) {
- aux_inv = rq->engine->mask & ~BIT(BCS0);
+ aux_inv = rq->engine->mask &
+ ~GENMASK(_BCS(I915_MAX_BCS - 1), BCS0);
if (aux_inv)
cmd += 4;
}
@@ -716,8 +721,10 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
/* Wa_1409600907 */
flags |= PIPE_CONTROL_DEPTH_STALL;
- if (rq->engine->class == COMPUTE_CLASS)
- flags &= ~PIPE_CONTROL_3D_FLAGS;
+ if (!HAS_3D_PIPELINE(rq->engine->i915))
+ flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
+ else if (rq->engine->class == COMPUTE_CLASS)
+ flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
cs = gen12_emit_ggtt_write_rcs(cs,
rq->fence.seqno,
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 9dc9dccf7b09..ecc990ec1b95 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -399,7 +399,8 @@ static void insert_breadcrumb(struct i915_request *rq)
* the request as it may have completed and raised the interrupt as
* we were attaching it into the lists.
*/
- irq_work_queue(&b->irq_work);
+ if (!b->irq_armed || __i915_request_is_complete(rq))
+ irq_work_queue(&b->irq_work);
}
bool i915_request_enable_breadcrumb(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 4070cb5711d8..654a092ed3d6 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -601,6 +601,30 @@ u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
return avg;
}
+bool intel_context_ban(struct intel_context *ce, struct i915_request *rq)
+{
+ bool ret = intel_context_set_banned(ce);
+
+ trace_intel_context_ban(ce);
+
+ if (ce->ops->revoke)
+ ce->ops->revoke(ce, rq,
+ INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS);
+
+ return ret;
+}
+
+bool intel_context_exit_nonpersistent(struct intel_context *ce,
+ struct i915_request *rq)
+{
+ bool ret = intel_context_set_exiting(ce);
+
+ if (ce->ops->revoke)
+ ce->ops->revoke(ce, rq, ce->engine->props.preempt_timeout_ms);
+
+ return ret;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_context.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index b7d3214d2cdd..8e2d70630c49 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -25,6 +25,8 @@
##__VA_ARGS__); \
} while (0)
+#define INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS (1)
+
struct i915_gem_ww_ctx;
void intel_context_init(struct intel_context *ce,
@@ -309,18 +311,27 @@ static inline bool intel_context_set_banned(struct intel_context *ce)
return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
}
-static inline bool intel_context_ban(struct intel_context *ce,
- struct i915_request *rq)
+bool intel_context_ban(struct intel_context *ce, struct i915_request *rq);
+
+static inline bool intel_context_is_schedulable(const struct intel_context *ce)
{
- bool ret = intel_context_set_banned(ce);
+ return !test_bit(CONTEXT_EXITING, &ce->flags) &&
+ !test_bit(CONTEXT_BANNED, &ce->flags);
+}
- trace_intel_context_ban(ce);
- if (ce->ops->ban)
- ce->ops->ban(ce, rq);
+static inline bool intel_context_is_exiting(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_EXITING, &ce->flags);
+}
- return ret;
+static inline bool intel_context_set_exiting(struct intel_context *ce)
+{
+ return test_and_set_bit(CONTEXT_EXITING, &ce->flags);
}
+bool intel_context_exit_nonpersistent(struct intel_context *ce,
+ struct i915_request *rq);
+
static inline bool
intel_context_force_single_submission(const struct intel_context *ce)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 44e7339e7a4a..04eacae1aca5 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -40,7 +40,8 @@ struct intel_context_ops {
int (*alloc)(struct intel_context *ce);
- void (*ban)(struct intel_context *ce, struct i915_request *rq);
+ void (*revoke)(struct intel_context *ce, struct i915_request *rq,
+ unsigned int preempt_timeout_ms);
int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
int (*pin)(struct intel_context *ce, void *vaddr);
@@ -122,6 +123,7 @@ struct intel_context {
#define CONTEXT_GUC_INIT 10
#define CONTEXT_PERMA_PIN 11
#define CONTEXT_IS_PARKING 12
+#define CONTEXT_EXITING 13
struct {
u64 timeout_us;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 5b6ce10cb158..37fa813af766 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -21,8 +21,9 @@
#include "intel_engine_user.h"
#include "intel_execlists_submission.h"
#include "intel_gt.h"
-#include "intel_gt_requests.h"
+#include "intel_gt_mcr.h"
#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
#include "intel_lrc.h"
#include "intel_lrc_reg.h"
#include "intel_reset.h"
@@ -71,6 +72,62 @@ static const struct engine_info intel_engines[] = {
{ .graphics_ver = 6, .base = BLT_RING_BASE }
},
},
+ [BCS1] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 1,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS1_RING_BASE }
+ },
+ },
+ [BCS2] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 2,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS2_RING_BASE }
+ },
+ },
+ [BCS3] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 3,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS3_RING_BASE }
+ },
+ },
+ [BCS4] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 4,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS4_RING_BASE }
+ },
+ },
+ [BCS5] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 5,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS5_RING_BASE }
+ },
+ },
+ [BCS6] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 6,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS6_RING_BASE }
+ },
+ },
+ [BCS7] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 7,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS7_RING_BASE }
+ },
+ },
+ [BCS8] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 8,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS8_RING_BASE }
+ },
+ },
[VCS0] = {
.class = VIDEO_DECODE_CLASS,
.instance = 0,
@@ -334,6 +391,14 @@ static u32 get_reset_domain(u8 ver, enum intel_engine_id id)
static const u32 engine_reset_domains[] = {
[RCS0] = GEN11_GRDOM_RENDER,
[BCS0] = GEN11_GRDOM_BLT,
+ [BCS1] = XEHPC_GRDOM_BLT1,
+ [BCS2] = XEHPC_GRDOM_BLT2,
+ [BCS3] = XEHPC_GRDOM_BLT3,
+ [BCS4] = XEHPC_GRDOM_BLT4,
+ [BCS5] = XEHPC_GRDOM_BLT5,
+ [BCS6] = XEHPC_GRDOM_BLT6,
+ [BCS7] = XEHPC_GRDOM_BLT7,
+ [BCS8] = XEHPC_GRDOM_BLT8,
[VCS0] = GEN11_GRDOM_MEDIA,
[VCS1] = GEN11_GRDOM_MEDIA2,
[VCS2] = GEN11_GRDOM_MEDIA3,
@@ -610,8 +675,8 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
return;
- ccs_mask = intel_slicemask_from_dssmask(intel_sseu_get_compute_subslices(&info->sseu),
- ss_per_ccs);
+ ccs_mask = intel_slicemask_from_xehp_dssmask(info->sseu.compute_subslice_mask,
+ ss_per_ccs);
/*
* If all DSS in a quadrant are fused off, the corresponding CCS
* engine is not available for use.
@@ -622,6 +687,34 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
}
}
+static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_gt_info *info = &gt->info;
+ unsigned long meml3_mask;
+ unsigned long quad;
+
+ meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3);
+ meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask);
+
+ /*
+ * Link Copy engines may be fused off according to meml3_mask. Each
+ * bit is a quad that houses 2 Link Copy and two Sub Copy engines.
+ */
+ for_each_clear_bit(quad, &meml3_mask, GEN12_MAX_MSLICES) {
+ unsigned int instance = quad * 2 + 1;
+ intel_engine_mask_t mask = GENMASK(_BCS(instance + 1),
+ _BCS(instance));
+
+ if (mask & info->engine_mask) {
+ drm_dbg(&i915->drm, "bcs%u fused off\n", instance);
+ drm_dbg(&i915->drm, "bcs%u fused off\n", instance + 1);
+
+ info->engine_mask &= ~mask;
+ }
+ }
+}
+
/*
* Determine which engines are fused off in our particular hardware.
* Note that we have a catch-22 situation where we need to be able to access
@@ -704,6 +797,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
engine_mask_apply_compute_fuses(gt);
+ engine_mask_apply_copy_fuses(gt);
return info->engine_mask;
}
@@ -1418,20 +1512,11 @@ void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine)
__gpm_wait_for_fw_complete(engine->gt, fw_pending);
}
-static u32
-read_subslice_reg(const struct intel_engine_cs *engine,
- int slice, int subslice, i915_reg_t reg)
-{
- return intel_uncore_read_with_mcr_steering(engine->uncore, reg,
- slice, subslice);
-}
-
/* NB: please notice the memset */
void intel_engine_get_instdone(const struct intel_engine_cs *engine,
struct intel_instdone *instdone)
{
struct drm_i915_private *i915 = engine->i915;
- const struct sseu_dev_info *sseu = &engine->gt->info.sseu;
struct intel_uncore *uncore = engine->uncore;
u32 mmio_base = engine->mmio_base;
int slice;
@@ -1456,31 +1541,23 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2);
}
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
- for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice) {
- instdone->sampler[slice][subslice] =
- read_subslice_reg(engine, slice, subslice,
- GEN7_SAMPLER_INSTDONE);
- instdone->row[slice][subslice] =
- read_subslice_reg(engine, slice, subslice,
- GEN7_ROW_INSTDONE);
- }
- } else {
- for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
- instdone->sampler[slice][subslice] =
- read_subslice_reg(engine, slice, subslice,
- GEN7_SAMPLER_INSTDONE);
- instdone->row[slice][subslice] =
- read_subslice_reg(engine, slice, subslice,
- GEN7_ROW_INSTDONE);
- }
+ for_each_ss_steering(iter, engine->gt, slice, subslice) {
+ instdone->sampler[slice][subslice] =
+ intel_gt_mcr_read(engine->gt,
+ GEN7_SAMPLER_INSTDONE,
+ slice, subslice);
+ instdone->row[slice][subslice] =
+ intel_gt_mcr_read(engine->gt,
+ GEN7_ROW_INSTDONE,
+ slice, subslice);
}
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
- for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice)
+ for_each_ss_steering(iter, engine->gt, slice, subslice)
instdone->geom_svg[slice][subslice] =
- read_subslice_reg(engine, slice, subslice,
- XEHPG_INSTDONE_GEOM_SVG);
+ intel_gt_mcr_read(engine->gt,
+ XEHPG_INSTDONE_GEOM_SVG,
+ slice, subslice);
}
} else if (GRAPHICS_VER(i915) >= 7) {
instdone->instdone =
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
index 75a0c55c5aa5..889f0df3940b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
@@ -8,6 +8,7 @@
#include "i915_reg_defs.h"
+#define RING_EXCC(base) _MMIO((base) + 0x28)
#define RING_TAIL(base) _MMIO((base) + 0x30)
#define TAIL_ADDR 0x001FFFF8
#define RING_HEAD(base) _MMIO((base) + 0x34)
@@ -133,6 +134,8 @@
(REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, (dst) << 1) | \
REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, (src) << 1))
+#define RING_CSCMDOP(base) _MMIO((base) + 0x20c)
+
/*
* CMD_CCTL read/write fields take a MOCS value and _not_ a table index.
* The lsb of each can be considered a separate enabling bit for encryption.
@@ -149,6 +152,7 @@
REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, (read) << 1))
#define RING_PREDICATE_RESULT(base) _MMIO((base) + 0x3b8) /* gen12+ */
+
#define MI_PREDICATE_RESULT_2(base) _MMIO((base) + 0x3bc)
#define LOWER_SLICE_ENABLED (1 << 0)
#define LOWER_SLICE_DISABLED (0 << 0)
@@ -172,6 +176,7 @@
#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT REG_BIT(2)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3)
#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE REG_BIT(8)
+#define RING_CTX_SR_CTL(base) _MMIO((base) + 0x244)
#define RING_SEMA_WAIT_POLL(base) _MMIO((base) + 0x24c)
#define GEN8_RING_PDP_UDW(base, n) _MMIO((base) + 0x270 + (n) * 8 + 4)
#define GEN8_RING_PDP_LDW(base, n) _MMIO((base) + 0x270 + (n) * 8)
@@ -196,6 +201,7 @@
#define RING_CTX_TIMESTAMP(base) _MMIO((base) + 0x3a8) /* gen8+ */
#define RING_PREDICATE_RESULT(base) _MMIO((base) + 0x3b8)
#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
+#define RING_FORCE_TO_NONPRIV_DENY REG_BIT(30)
#define RING_FORCE_TO_NONPRIV_ADDRESS_MASK REG_GENMASK(25, 2)
#define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */
#define RING_FORCE_TO_NONPRIV_ACCESS_RD (1 << 28)
@@ -208,7 +214,9 @@
#define RING_FORCE_TO_NONPRIV_RANGE_64 (3 << 0)
#define RING_FORCE_TO_NONPRIV_RANGE_MASK (3 << 0)
#define RING_FORCE_TO_NONPRIV_MASK_VALID \
- (RING_FORCE_TO_NONPRIV_RANGE_MASK | RING_FORCE_TO_NONPRIV_ACCESS_MASK)
+ (RING_FORCE_TO_NONPRIV_RANGE_MASK | \
+ RING_FORCE_TO_NONPRIV_ACCESS_MASK | \
+ RING_FORCE_TO_NONPRIV_DENY)
#define RING_MAX_NONPRIV_SLOTS 12
#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 298f2cc7a879..633a7e5dba3b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -35,7 +35,7 @@
#define OTHER_CLASS 4
#define COMPUTE_CLASS 5
#define MAX_ENGINE_CLASS 5
-#define MAX_ENGINE_INSTANCE 7
+#define MAX_ENGINE_INSTANCE 8
#define I915_MAX_SLICES 3
#define I915_MAX_SUBSLICES 8
@@ -99,6 +99,7 @@ struct i915_ctx_workarounds {
#define I915_MAX_SFC (I915_MAX_VCS / 2)
#define I915_MAX_CCS 4
#define I915_MAX_RCS 1
+#define I915_MAX_BCS 9
/*
* Engine IDs definitions.
@@ -107,6 +108,15 @@ struct i915_ctx_workarounds {
enum intel_engine_id {
RCS0 = 0,
BCS0,
+ BCS1,
+ BCS2,
+ BCS3,
+ BCS4,
+ BCS5,
+ BCS6,
+ BCS7,
+ BCS8,
+#define _BCS(n) (BCS0 + (n))
VCS0,
VCS1,
VCS2,
@@ -637,26 +647,4 @@ intel_engine_uses_wa_hold_ccs_switchout(struct intel_engine_cs *engine)
return engine->flags & I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
}
-#define instdone_has_slice(dev_priv___, sseu___, slice___) \
- ((GRAPHICS_VER(dev_priv___) == 7 ? 1 : ((sseu___)->slice_mask)) & BIT(slice___))
-
-#define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \
- (GRAPHICS_VER(dev_priv__) == 7 ? (1 & BIT(subslice__)) : \
- intel_sseu_has_subslice(sseu__, 0, subslice__))
-
-#define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \
- for ((slice_) = 0, (subslice_) = 0; (slice_) < I915_MAX_SLICES; \
- (subslice_) = ((subslice_) + 1) % I915_MAX_SUBSLICES, \
- (slice_) += ((subslice_) == 0)) \
- for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \
- (instdone_has_subslice(dev_priv_, sseu_, slice_, \
- subslice_)))
-
-#define for_each_instdone_gslice_dss_xehp(dev_priv_, sseu_, iter_, gslice_, dss_) \
- for ((iter_) = 0, (gslice_) = 0, (dss_) = 0; \
- (iter_) < GEN_SS_MASK_SIZE; \
- (iter_)++, (gslice_) = (iter_) / GEN_DSS_PER_GSLICE, \
- (dss_) = (iter_) % GEN_DSS_PER_GSLICE) \
- for_each_if(intel_sseu_has_subslice((sseu_), 0, (iter_)))
-
#endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 0627fa10d2dc..4b909cb88cdf 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -480,9 +480,9 @@ __execlists_schedule_in(struct i915_request *rq)
if (unlikely(intel_context_is_closed(ce) &&
!intel_engine_has_heartbeat(engine)))
- intel_context_set_banned(ce);
+ intel_context_set_exiting(ce);
- if (unlikely(intel_context_is_banned(ce) || bad_request(rq)))
+ if (unlikely(!intel_context_is_schedulable(ce) || bad_request(rq)))
reset_active(rq, engine);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
@@ -1243,7 +1243,7 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
/* Force a fast reset for terminated contexts (ignoring sysfs!) */
if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
- return 1;
+ return INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS;
return READ_ONCE(engine->props.preempt_timeout_ms);
}
@@ -1360,7 +1360,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* submission. If we don't cancel the timer now,
* we will see that the timer has expired and
* reschedule the tasklet; continually until the
- * next context switch or other preeemption event.
+ * next context switch or other preemption event.
*
* Since we have decided to reschedule based on
* consumption of this timeslice, if we submit the
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index e6b2eb122ad7..15a915bb4088 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -3,16 +3,18 @@
* Copyright © 2020 Intel Corporation
*/
-#include <linux/types.h>
#include <asm/set_memory.h>
#include <asm/smp.h>
+#include <linux/types.h>
+#include <linux/stop_machine.h>
#include <drm/i915_drm.h>
+#include <drm/intel-gtt.h>
#include "gem/i915_gem_lmem.h"
+#include "intel_ggtt_gmch.h"
#include "intel_gt.h"
-#include "intel_gt_gmch.h"
#include "intel_gt_regs.h"
#include "i915_drv.h"
#include "i915_scatterlist.h"
@@ -22,6 +24,13 @@
#include "intel_gtt.h"
#include "gen8_ppgtt.h"
+static inline bool suspend_retains_ptes(struct i915_address_space *vm)
+{
+ return GRAPHICS_VER(vm->i915) >= 8 &&
+ !HAS_LMEM(vm->i915) &&
+ vm->is_ggtt;
+}
+
static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
@@ -93,6 +102,23 @@ int i915_ggtt_init_hw(struct drm_i915_private *i915)
return 0;
}
+/*
+ * Return the value of the last GGTT pte cast to an u64, if
+ * the system is supposed to retain ptes across resume. 0 otherwise.
+ */
+static u64 read_last_pte(struct i915_address_space *vm)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *ptep;
+
+ if (!suspend_retains_ptes(vm))
+ return 0;
+
+ GEM_BUG_ON(GRAPHICS_VER(vm->i915) < 8);
+ ptep = (typeof(ptep))ggtt->gsm + (ggtt_total_entries(ggtt) - 1);
+ return readq(ptep);
+}
+
/**
* i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
* @vm: The VM to suspend the mappings for
@@ -156,7 +182,10 @@ retry:
i915_gem_object_unlock(obj);
}
- vm->clear_range(vm, 0, vm->total);
+ if (!suspend_retains_ptes(vm))
+ vm->clear_range(vm, 0, vm->total);
+ else
+ i915_vm_to_ggtt(vm)->probed_pte = read_last_pte(vm);
vm->skip_pte_rewrite = save_skip_rewrite;
@@ -181,7 +210,7 @@ void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
spin_unlock_irq(&uncore->lock);
}
-void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
+static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
{
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
@@ -218,11 +247,232 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr,
return pte;
}
+static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
+{
+ writeq(pte, addr);
+}
+
+static void gen8_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *pte =
+ (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
+
+ ggtt->invalidate(ggtt);
+}
+
+static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *gte;
+ gen8_pte_t __iomem *end;
+ struct sgt_iter iter;
+ dma_addr_t addr;
+
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
+
+ gte = (gen8_pte_t __iomem *)ggtt->gsm;
+ gte += vma_res->start / I915_GTT_PAGE_SIZE;
+ end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
+
+ for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
+ gen8_set_pte(gte++, pte_encode | addr);
+ GEM_BUG_ON(gte > end);
+
+ /* Fill the allocated but "unused" space beyond the end of the buffer */
+ while (gte < end)
+ gen8_set_pte(gte++, vm->scratch[0]->encode);
+
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+ * updates have finished.
+ */
+ ggtt->invalidate(ggtt);
+}
+
+static void gen6_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *pte =
+ (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ iowrite32(vm->pte_encode(addr, level, flags), pte);
+
+ ggtt->invalidate(ggtt);
+}
+
+/*
+ * Binds an object into the global gtt with the specified cache level.
+ * The object will be accessible to the GPU via commands whose operands
+ * reference offsets within the global GTT as well as accessible by the GPU
+ * through the GMADR mapped BAR (i915->mm.gtt->gtt).
+ */
+static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *gte;
+ gen6_pte_t __iomem *end;
+ struct sgt_iter iter;
+ dma_addr_t addr;
+
+ gte = (gen6_pte_t __iomem *)ggtt->gsm;
+ gte += vma_res->start / I915_GTT_PAGE_SIZE;
+ end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
+
+ for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
+ iowrite32(vm->pte_encode(addr, level, flags), gte++);
+ GEM_BUG_ON(gte > end);
+
+ /* Fill the allocated but "unused" space beyond the end of the buffer */
+ while (gte < end)
+ iowrite32(vm->scratch[0]->encode, gte++);
+
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+ * updates have finished.
+ */
+ ggtt->invalidate(ggtt);
+}
+
+static void nop_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+}
+
+static void gen8_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+ const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
+ gen8_pte_t __iomem *gtt_base =
+ (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ for (i = 0; i < num_entries; i++)
+ gen8_set_pte(&gtt_base[i], scratch_pte);
+}
+
+static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
+{
+ /*
+ * Make sure the internal GAM fifo has been cleared of all GTT
+ * writes before exiting stop_machine(). This guarantees that
+ * any aperture accesses waiting to start in another process
+ * cannot back up behind the GTT writes causing a hang.
+ * The register can be any arbitrary GAM register.
+ */
+ intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
+}
+
+struct insert_page {
+ struct i915_address_space *vm;
+ dma_addr_t addr;
+ u64 offset;
+ enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
+{
+ struct insert_page *arg = _arg;
+
+ gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 unused)
+{
+ struct insert_page arg = { vm, addr, offset, level };
+
+ stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
+}
+
+struct insert_entries {
+ struct i915_address_space *vm;
+ struct i915_vma_resource *vma_res;
+ enum i915_cache_level level;
+ u32 flags;
+};
+
+static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
+{
+ struct insert_entries *arg = _arg;
+
+ gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct insert_entries arg = { vm, vma_res, level, flags };
+
+ stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
+}
+
+static void gen6_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+ gen6_pte_t scratch_pte, __iomem *gtt_base =
+ (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ scratch_pte = vm->scratch[0]->encode;
+ for (i = 0; i < num_entries; i++)
+ iowrite32(scratch_pte, &gtt_base[i]);
+}
+
void intel_ggtt_bind_vma(struct i915_address_space *vm,
- struct i915_vm_pt_stash *stash,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
- u32 flags)
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
u32 pte_flags;
@@ -243,7 +493,7 @@ void intel_ggtt_bind_vma(struct i915_address_space *vm,
}
void intel_ggtt_unbind_vma(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res)
+ struct i915_vma_resource *vma_res)
{
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
@@ -299,6 +549,8 @@ static int init_ggtt(struct i915_ggtt *ggtt)
struct drm_mm_node *entry;
int ret;
+ ggtt->pte_lost = true;
+
/*
* GuC requires all resources that we're sharing with it to be placed in
* non-WOPCM memory. If GuC is not present or not in use we still need a
@@ -560,12 +812,326 @@ void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
dma_resv_fini(&ggtt->vm._resv);
}
-struct resource intel_pci_resource(struct pci_dev *pdev, int bar)
+static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
+ return snb_gmch_ctl << 20;
+}
+
+static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
+{
+ bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
+ bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
+ if (bdw_gmch_ctl)
+ bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+
+#ifdef CONFIG_X86_32
+ /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
+ if (bdw_gmch_ctl > 4)
+ bdw_gmch_ctl = 4;
+#endif
+
+ return bdw_gmch_ctl << 20;
+}
+
+static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
+{
+ gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
+ gmch_ctrl &= SNB_GMCH_GGMS_MASK;
+
+ if (gmch_ctrl)
+ return 1 << (20 + gmch_ctrl);
+
+ return 0;
+}
+
+static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
+{
+ /*
+ * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
+ * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
+ */
+ GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
+ return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
+}
+
+static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
+{
+ return gen6_gttmmadr_size(i915) / 2;
+}
+
+static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ phys_addr_t phys_addr;
+ u32 pte_flags;
+ int ret;
+
+ GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
+ phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
+
+ /*
+ * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
+ * will be dropped. For WC mappings in general we have 64 byte burst
+ * writes when the WC buffer is flushed, so we can't use it, but have to
+ * resort to an uncached mapping. The WC issue is easily caught by the
+ * readback check when writing GTT PTE entries.
+ */
+ if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
+ ggtt->gsm = ioremap(phys_addr, size);
+ else
+ ggtt->gsm = ioremap_wc(phys_addr, size);
+ if (!ggtt->gsm) {
+ drm_err(&i915->drm, "Failed to map the ggtt page table\n");
+ return -ENOMEM;
+ }
+
+ kref_init(&ggtt->vm.resv_ref);
+ ret = setup_scratch_page(&ggtt->vm);
+ if (ret) {
+ drm_err(&i915->drm, "Scratch setup failed\n");
+ /* iounmap will also get called at remove, but meh */
+ iounmap(ggtt->gsm);
+ return ret;
+ }
+
+ pte_flags = 0;
+ if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
+ pte_flags |= PTE_LM;
+
+ ggtt->vm.scratch[0]->encode =
+ ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
+ I915_CACHE_NONE, pte_flags);
+
+ return 0;
+}
+
+static void gen6_gmch_remove(struct i915_address_space *vm)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+ iounmap(ggtt->gsm);
+ free_scratch(vm);
+}
+
+static struct resource pci_resource(struct pci_dev *pdev, int bar)
{
return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar));
}
+static int gen8_gmch_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ unsigned int size;
+ u16 snb_gmch_ctl;
+
+ if (!HAS_LMEM(i915)) {
+ ggtt->gmadr = pci_resource(pdev, 2);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+ }
+
+ pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ if (IS_CHERRYVIEW(i915))
+ size = chv_get_total_gtt_size(snb_gmch_ctl);
+ else
+ size = gen8_get_total_gtt_size(snb_gmch_ctl);
+
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
+ ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
+
+ ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+ ggtt->vm.insert_page = gen8_ggtt_insert_page;
+ ggtt->vm.clear_range = nop_clear_range;
+ if (intel_scanout_needs_vtd_wa(i915))
+ ggtt->vm.clear_range = gen8_ggtt_clear_range;
+
+ ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
+
+ /*
+ * Serialize GTT updates with aperture access on BXT if VT-d is on,
+ * and always on CHV.
+ */
+ if (intel_vm_no_concurrent_access_wa(i915)) {
+ ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+ ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
+
+ /*
+ * Calling stop_machine() version of GGTT update function
+ * at error capture/reset path will raise lockdep warning.
+ * Allow calling gen8_ggtt_insert_* directly at reset path
+ * which is safe from parallel GGTT updates.
+ */
+ ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
+ ggtt->vm.raw_insert_entries = gen8_ggtt_insert_entries;
+
+ ggtt->vm.bind_async_flags =
+ I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+ }
+
+ ggtt->invalidate = gen8_ggtt_invalidate;
+
+ ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
+
+ ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
+
+ setup_private_pat(ggtt->vm.gt->uncore);
+
+ return ggtt_probe_common(ggtt, size);
+}
+
+static u64 snb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ MISSING_CASE(level);
+ }
+
+ return pte;
+}
+
+static u64 ivb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ pte |= GEN7_PTE_CACHE_L3_LLC;
+ break;
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ MISSING_CASE(level);
+ }
+
+ return pte;
+}
+
+static u64 byt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ if (!(flags & PTE_READ_ONLY))
+ pte |= BYT_PTE_WRITEABLE;
+
+ if (level != I915_CACHE_NONE)
+ pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
+
+ return pte;
+}
+
+static u64 hsw_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ if (level != I915_CACHE_NONE)
+ pte |= HSW_WB_LLC_AGE3;
+
+ return pte;
+}
+
+static u64 iris_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ break;
+ case I915_CACHE_WT:
+ pte |= HSW_WT_ELLC_LLC_AGE3;
+ break;
+ default:
+ pte |= HSW_WB_ELLC_LLC_AGE3;
+ break;
+ }
+
+ return pte;
+}
+
+static int gen6_gmch_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ unsigned int size;
+ u16 snb_gmch_ctl;
+
+ ggtt->gmadr = pci_resource(pdev, 2);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+
+ /*
+ * 64/512MB is the current min/max we actually know of, but this is
+ * just a coarse sanity check.
+ */
+ if (ggtt->mappable_end < (64 << 20) ||
+ ggtt->mappable_end > (512 << 20)) {
+ drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
+ &ggtt->mappable_end);
+ return -ENXIO;
+ }
+
+ pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+
+ size = gen6_get_total_gtt_size(snb_gmch_ctl);
+ ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
+
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
+
+ ggtt->vm.clear_range = nop_clear_range;
+ if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
+ ggtt->vm.clear_range = gen6_ggtt_clear_range;
+ ggtt->vm.insert_page = gen6_ggtt_insert_page;
+ ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+
+ ggtt->invalidate = gen6_ggtt_invalidate;
+
+ if (HAS_EDRAM(i915))
+ ggtt->vm.pte_encode = iris_pte_encode;
+ else if (IS_HASWELL(i915))
+ ggtt->vm.pte_encode = hsw_pte_encode;
+ else if (IS_VALLEYVIEW(i915))
+ ggtt->vm.pte_encode = byt_pte_encode;
+ else if (GRAPHICS_VER(i915) >= 7)
+ ggtt->vm.pte_encode = ivb_pte_encode;
+ else
+ ggtt->vm.pte_encode = snb_pte_encode;
+
+ ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
+
+ return ggtt_probe_common(ggtt, size);
+}
+
static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
@@ -576,12 +1142,13 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
ggtt->vm.dma = i915->drm.dev;
dma_resv_init(&ggtt->vm._resv);
- if (GRAPHICS_VER(i915) <= 5)
- ret = intel_gt_gmch_gen5_probe(ggtt);
- else if (GRAPHICS_VER(i915) < 8)
- ret = intel_gt_gmch_gen6_probe(ggtt);
+ if (GRAPHICS_VER(i915) >= 8)
+ ret = gen8_gmch_probe(ggtt);
+ else if (GRAPHICS_VER(i915) >= 6)
+ ret = gen6_gmch_probe(ggtt);
else
- ret = intel_gt_gmch_gen8_probe(ggtt);
+ ret = intel_ggtt_gmch_probe(ggtt);
+
if (ret) {
dma_resv_fini(&ggtt->vm._resv);
return ret;
@@ -635,7 +1202,10 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
int i915_ggtt_enable_hw(struct drm_i915_private *i915)
{
- return intel_gt_gmch_gen5_enable_hw(i915);
+ if (GRAPHICS_VER(i915) < 6)
+ return intel_ggtt_gmch_enable_hw(i915);
+
+ return 0;
}
void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
@@ -675,11 +1245,20 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
{
struct i915_vma *vma;
bool write_domain_objs = false;
+ bool retained_ptes;
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
- /* First fill our portion of the GTT with scratch pages */
- vm->clear_range(vm, 0, vm->total);
+ /*
+ * First fill our portion of the GTT with scratch pages if
+ * they were not retained across suspend.
+ */
+ retained_ptes = suspend_retains_ptes(vm) &&
+ !i915_vm_to_ggtt(vm)->pte_lost &&
+ !GEM_WARN_ON(i915_vm_to_ggtt(vm)->probed_pte != read_last_pte(vm));
+
+ if (!retained_ptes)
+ vm->clear_range(vm, 0, vm->total);
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry(vma, &vm->bound_list, vm_link) {
@@ -688,9 +1267,10 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
GEM_BUG_ON(!was_bound);
- vma->ops->bind_vma(vm, NULL, vma->resource,
- obj ? obj->cache_level : 0,
- was_bound);
+ if (!retained_ptes)
+ vma->ops->bind_vma(vm, NULL, vma->resource,
+ obj ? obj->cache_level : 0,
+ was_bound);
if (obj) { /* only used during resume => exclusive access */
write_domain_objs |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
@@ -718,3 +1298,8 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
intel_ggtt_restore_fences(ggtt);
}
+
+void i915_ggtt_mark_pte_lost(struct drm_i915_private *i915, bool val)
+{
+ to_gt(i915)->ggtt->pte_lost = val;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
new file mode 100644
index 000000000000..4e2163a1aa46
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "intel_ggtt_gmch.h"
+
+#include <drm/intel-gtt.h>
+#include <drm/i915_drm.h>
+
+#include <linux/agp_backend.h>
+
+#include "i915_drv.h"
+#include "i915_utils.h"
+#include "intel_gtt.h"
+#include "intel_gt_regs.h"
+#include "intel_gt.h"
+
+static void gmch_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ intel_gmch_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
+}
+
+static void gmch_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ intel_gmch_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
+ flags);
+}
+
+static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ intel_gmch_gtt_flush();
+}
+
+static void gmch_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ intel_gmch_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
+}
+
+static void gmch_ggtt_remove(struct i915_address_space *vm)
+{
+ intel_gmch_remove();
+}
+
+/*
+ * Certain Gen5 chipsets require idling the GPU before unmapping anything from
+ * the GTT when VT-d is enabled.
+ */
+static bool needs_idle_maps(struct drm_i915_private *i915)
+{
+ /*
+ * Query intel_iommu to see if we need the workaround. Presumably that
+ * was loaded first.
+ */
+ if (!i915_vtd_active(i915))
+ return false;
+
+ if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
+ return true;
+
+ return false;
+}
+
+int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ phys_addr_t gmadr_base;
+ int ret;
+
+ ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
+ if (!ret) {
+ drm_err(&i915->drm, "failed to set up gmch\n");
+ return -EIO;
+ }
+
+ intel_gmch_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
+
+ ggtt->gmadr =
+ (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
+
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
+
+ if (needs_idle_maps(i915)) {
+ drm_notice(&i915->drm,
+ "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
+ ggtt->do_idle_maps = true;
+ }
+
+ ggtt->vm.insert_page = gmch_ggtt_insert_page;
+ ggtt->vm.insert_entries = gmch_ggtt_insert_entries;
+ ggtt->vm.clear_range = gmch_ggtt_clear_range;
+ ggtt->vm.cleanup = gmch_ggtt_remove;
+
+ ggtt->invalidate = gmch_ggtt_invalidate;
+
+ ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
+
+ if (unlikely(ggtt->do_idle_maps))
+ drm_notice(&i915->drm,
+ "Applying Ironlake quirks for intel_iommu\n");
+
+ return 0;
+}
+
+int intel_ggtt_gmch_enable_hw(struct drm_i915_private *i915)
+{
+ if (!intel_gmch_enable_gtt())
+ return -EIO;
+
+ return 0;
+}
+
+void intel_ggtt_gmch_flush(void)
+{
+ intel_gmch_gtt_flush();
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.h b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.h
new file mode 100644
index 000000000000..370bf321b4e2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_GGTT_GMCH_H__
+#define __INTEL_GGTT_GMCH_H__
+
+#include "intel_gtt.h"
+
+/* For x86 platforms */
+#if IS_ENABLED(CONFIG_X86)
+
+void intel_ggtt_gmch_flush(void);
+int intel_ggtt_gmch_enable_hw(struct drm_i915_private *i915);
+int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt);
+
+/* Stubs for non-x86 platforms */
+#else
+
+static inline void intel_ggtt_gmch_flush(void) { }
+static inline int intel_ggtt_gmch_enable_hw(struct drm_i915_private *i915) { return -ENODEV; }
+static inline int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt) { return -ENODEV; }
+
+#endif
+
+#endif /* __INTEL_GGTT_GMCH_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 556bca3be804..d4e9702d3c8e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -236,6 +236,28 @@
#define XY_FAST_COLOR_BLT_DW 16
#define XY_FAST_COLOR_BLT_MOCS_MASK GENMASK(27, 21)
#define XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT 31
+
+#define XY_FAST_COPY_BLT_D0_SRC_TILING_MASK REG_GENMASK(21, 20)
+#define XY_FAST_COPY_BLT_D0_DST_TILING_MASK REG_GENMASK(14, 13)
+#define XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(mode) \
+ REG_FIELD_PREP(XY_FAST_COPY_BLT_D0_SRC_TILING_MASK, mode)
+#define XY_FAST_COPY_BLT_D0_DST_TILE_MODE(mode) \
+ REG_FIELD_PREP(XY_FAST_COPY_BLT_D0_DST_TILING_MASK, mode)
+#define LINEAR 0
+#define TILE_X 0x1
+#define XMAJOR 0x1
+#define YMAJOR 0x2
+#define TILE_64 0x3
+#define XY_FAST_COPY_BLT_D1_SRC_TILE4 REG_BIT(31)
+#define XY_FAST_COPY_BLT_D1_DST_TILE4 REG_BIT(30)
+#define BLIT_CCTL_SRC_MOCS_MASK REG_GENMASK(6, 0)
+#define BLIT_CCTL_DST_MOCS_MASK REG_GENMASK(14, 8)
+/* Note: MOCS value = (index << 1) */
+#define BLIT_CCTL_SRC_MOCS(idx) \
+ REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, (idx) << 1)
+#define BLIT_CCTL_DST_MOCS(idx) \
+ REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, (idx) << 1)
+
#define SRC_COPY_BLT_CMD (2 << 29 | 0x43 << 22)
#define GEN9_XY_FAST_COPY_BLT_CMD (2 << 29 | 0x42 << 22)
#define XY_SRC_COPY_BLT_CMD (2 << 29 | 0x53 << 22)
@@ -288,8 +310,11 @@
#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
-/* 3D-related flags can't be set on compute engine */
-#define PIPE_CONTROL_3D_FLAGS (\
+/*
+ * 3D-related flags that can't be set on _engines_ that lack access to the 3D
+ * pipeline (i.e., CCS engines).
+ */
+#define PIPE_CONTROL_3D_ENGINE_FLAGS (\
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | \
PIPE_CONTROL_DEPTH_CACHE_FLUSH | \
PIPE_CONTROL_TILE_CACHE_FLUSH | \
@@ -300,6 +325,14 @@
PIPE_CONTROL_VF_CACHE_INVALIDATE | \
PIPE_CONTROL_GLOBAL_SNAPSHOT_RESET)
+/* 3D-related flags that can't be set on _platforms_ that lack a 3D pipeline */
+#define PIPE_CONTROL_3D_ARCH_FLAGS ( \
+ PIPE_CONTROL_3D_ENGINE_FLAGS | \
+ PIPE_CONTROL_INDIRECT_STATE_DISABLE | \
+ PIPE_CONTROL_FLUSH_ENABLE | \
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | \
+ PIPE_CONTROL_DC_FLUSH_ENABLE)
+
#define MI_MATH(x) MI_INSTR(0x1a, (x) - 1)
#define MI_MATH_INSTR(opcode, op1, op2) ((opcode) << 20 | (op1) << 10 | (op2))
/* Opcodes for MI_MATH_INSTR */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 531af6ad7007..68c2b0d8f187 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_managed.h>
+#include <drm/intel-gtt.h>
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
@@ -12,11 +13,12 @@
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_engine_regs.h"
+#include "intel_ggtt_gmch.h"
#include "intel_gt.h"
#include "intel_gt_buffer_pool.h"
#include "intel_gt_clock_utils.h"
#include "intel_gt_debugfs.h"
-#include "intel_gt_gmch.h"
+#include "intel_gt_mcr.h"
#include "intel_gt_pm.h"
#include "intel_gt_regs.h"
#include "intel_gt_requests.h"
@@ -102,78 +104,13 @@ int intel_gt_assign_ggtt(struct intel_gt *gt)
return gt->ggtt ? 0 : -ENOMEM;
}
-static const char * const intel_steering_types[] = {
- "L3BANK",
- "MSLICE",
- "LNCF",
-};
-
-static const struct intel_mmio_range icl_l3bank_steering_table[] = {
- { 0x00B100, 0x00B3FF },
- {},
-};
-
-static const struct intel_mmio_range xehpsdv_mslice_steering_table[] = {
- { 0x004000, 0x004AFF },
- { 0x00C800, 0x00CFFF },
- { 0x00DD00, 0x00DDFF },
- { 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */
- {},
-};
-
-static const struct intel_mmio_range xehpsdv_lncf_steering_table[] = {
- { 0x00B000, 0x00B0FF },
- { 0x00D800, 0x00D8FF },
- {},
-};
-
-static const struct intel_mmio_range dg2_lncf_steering_table[] = {
- { 0x00B000, 0x00B0FF },
- { 0x00D880, 0x00D8FF },
- {},
-};
-
-static u16 slicemask(struct intel_gt *gt, int count)
-{
- u64 dss_mask = intel_sseu_get_subslices(&gt->info.sseu, 0);
-
- return intel_slicemask_from_dssmask(dss_mask, count);
-}
-
int intel_gt_init_mmio(struct intel_gt *gt)
{
- struct drm_i915_private *i915 = gt->i915;
-
intel_gt_init_clock_frequency(gt);
intel_uc_init_mmio(&gt->uc);
intel_sseu_info_init(gt);
-
- /*
- * An mslice is unavailable only if both the meml3 for the slice is
- * disabled *and* all of the DSS in the slice (quadrant) are disabled.
- */
- if (HAS_MSLICES(i915))
- gt->info.mslice_mask =
- slicemask(gt, GEN_DSS_PER_MSLICE) |
- (intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
- GEN12_MEML3_EN_MASK);
-
- if (IS_DG2(i915)) {
- gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
- gt->steering_table[LNCF] = dg2_lncf_steering_table;
- } else if (IS_XEHPSDV(i915)) {
- gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
- gt->steering_table[LNCF] = xehpsdv_lncf_steering_table;
- } else if (GRAPHICS_VER(i915) >= 11 &&
- GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) {
- gt->steering_table[L3BANK] = icl_l3bank_steering_table;
- gt->info.l3bank_mask =
- ~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
- GEN10_L3BANK_MASK;
- } else if (HAS_MSLICES(i915)) {
- MISSING_CASE(INTEL_INFO(i915)->platform);
- }
+ intel_gt_mcr_init(gt);
return intel_engines_init_mmio(gt);
}
@@ -451,7 +388,7 @@ void intel_gt_chipset_flush(struct intel_gt *gt)
{
wmb();
if (GRAPHICS_VER(gt->i915) < 6)
- intel_gt_gmch_gen5_chipset_flush(gt);
+ intel_ggtt_gmch_flush();
}
void intel_gt_driver_register(struct intel_gt *gt)
@@ -835,200 +772,6 @@ void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
}
}
-/**
- * intel_gt_reg_needs_read_steering - determine whether a register read
- * requires explicit steering
- * @gt: GT structure
- * @reg: the register to check steering requirements for
- * @type: type of multicast steering to check
- *
- * Determines whether @reg needs explicit steering of a specific type for
- * reads.
- *
- * Returns false if @reg does not belong to a register range of the given
- * steering type, or if the default (subslice-based) steering IDs are suitable
- * for @type steering too.
- */
-static bool intel_gt_reg_needs_read_steering(struct intel_gt *gt,
- i915_reg_t reg,
- enum intel_steering_type type)
-{
- const u32 offset = i915_mmio_reg_offset(reg);
- const struct intel_mmio_range *entry;
-
- if (likely(!intel_gt_needs_read_steering(gt, type)))
- return false;
-
- for (entry = gt->steering_table[type]; entry->end; entry++) {
- if (offset >= entry->start && offset <= entry->end)
- return true;
- }
-
- return false;
-}
-
-/**
- * intel_gt_get_valid_steering - determines valid IDs for a class of MCR steering
- * @gt: GT structure
- * @type: multicast register type
- * @sliceid: Slice ID returned
- * @subsliceid: Subslice ID returned
- *
- * Determines sliceid and subsliceid values that will steer reads
- * of a specific multicast register class to a valid value.
- */
-static void intel_gt_get_valid_steering(struct intel_gt *gt,
- enum intel_steering_type type,
- u8 *sliceid, u8 *subsliceid)
-{
- switch (type) {
- case L3BANK:
- GEM_DEBUG_WARN_ON(!gt->info.l3bank_mask); /* should be impossible! */
-
- *sliceid = 0; /* unused */
- *subsliceid = __ffs(gt->info.l3bank_mask);
- break;
- case MSLICE:
- GEM_DEBUG_WARN_ON(!gt->info.mslice_mask); /* should be impossible! */
-
- *sliceid = __ffs(gt->info.mslice_mask);
- *subsliceid = 0; /* unused */
- break;
- case LNCF:
- GEM_DEBUG_WARN_ON(!gt->info.mslice_mask); /* should be impossible! */
-
- /*
- * An LNCF is always present if its mslice is present, so we
- * can safely just steer to LNCF 0 in all cases.
- */
- *sliceid = __ffs(gt->info.mslice_mask) << 1;
- *subsliceid = 0; /* unused */
- break;
- default:
- MISSING_CASE(type);
- *sliceid = 0;
- *subsliceid = 0;
- }
-}
-
-/**
- * intel_gt_read_register_fw - reads a GT register with support for multicast
- * @gt: GT structure
- * @reg: register to read
- *
- * This function will read a GT register. If the register is a multicast
- * register, the read will be steered to a valid instance (i.e., one that
- * isn't fused off or powered down by power gating).
- *
- * Returns the value from a valid instance of @reg.
- */
-u32 intel_gt_read_register_fw(struct intel_gt *gt, i915_reg_t reg)
-{
- int type;
- u8 sliceid, subsliceid;
-
- for (type = 0; type < NUM_STEERING_TYPES; type++) {
- if (intel_gt_reg_needs_read_steering(gt, reg, type)) {
- intel_gt_get_valid_steering(gt, type, &sliceid,
- &subsliceid);
- return intel_uncore_read_with_mcr_steering_fw(gt->uncore,
- reg,
- sliceid,
- subsliceid);
- }
- }
-
- return intel_uncore_read_fw(gt->uncore, reg);
-}
-
-/**
- * intel_gt_get_valid_steering_for_reg - get a valid steering for a register
- * @gt: GT structure
- * @reg: register for which the steering is required
- * @sliceid: return variable for slice steering
- * @subsliceid: return variable for subslice steering
- *
- * This function returns a slice/subslice pair that is guaranteed to work for
- * read steering of the given register. Note that a value will be returned even
- * if the register is not replicated and therefore does not actually require
- * steering.
- */
-void intel_gt_get_valid_steering_for_reg(struct intel_gt *gt, i915_reg_t reg,
- u8 *sliceid, u8 *subsliceid)
-{
- int type;
-
- for (type = 0; type < NUM_STEERING_TYPES; type++) {
- if (intel_gt_reg_needs_read_steering(gt, reg, type)) {
- intel_gt_get_valid_steering(gt, type, sliceid,
- subsliceid);
- return;
- }
- }
-
- *sliceid = gt->default_steering.groupid;
- *subsliceid = gt->default_steering.instanceid;
-}
-
-u32 intel_gt_read_register(struct intel_gt *gt, i915_reg_t reg)
-{
- int type;
- u8 sliceid, subsliceid;
-
- for (type = 0; type < NUM_STEERING_TYPES; type++) {
- if (intel_gt_reg_needs_read_steering(gt, reg, type)) {
- intel_gt_get_valid_steering(gt, type, &sliceid,
- &subsliceid);
- return intel_uncore_read_with_mcr_steering(gt->uncore,
- reg,
- sliceid,
- subsliceid);
- }
- }
-
- return intel_uncore_read(gt->uncore, reg);
-}
-
-static void report_steering_type(struct drm_printer *p,
- struct intel_gt *gt,
- enum intel_steering_type type,
- bool dump_table)
-{
- const struct intel_mmio_range *entry;
- u8 slice, subslice;
-
- BUILD_BUG_ON(ARRAY_SIZE(intel_steering_types) != NUM_STEERING_TYPES);
-
- if (!gt->steering_table[type]) {
- drm_printf(p, "%s steering: uses default steering\n",
- intel_steering_types[type]);
- return;
- }
-
- intel_gt_get_valid_steering(gt, type, &slice, &subslice);
- drm_printf(p, "%s steering: sliceid=0x%x, subsliceid=0x%x\n",
- intel_steering_types[type], slice, subslice);
-
- if (!dump_table)
- return;
-
- for (entry = gt->steering_table[type]; entry->end; entry++)
- drm_printf(p, "\t0x%06x - 0x%06x\n", entry->start, entry->end);
-}
-
-void intel_gt_report_steering(struct drm_printer *p, struct intel_gt *gt,
- bool dump_table)
-{
- drm_printf(p, "Default steering: sliceid=0x%x, subsliceid=0x%x\n",
- gt->default_steering.groupid,
- gt->default_steering.instanceid);
-
- if (HAS_MSLICES(gt->i915)) {
- report_steering_type(p, gt, MSLICE, dump_table);
- report_steering_type(p, gt, LNCF, dump_table);
- }
-}
-
static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
{
int ret;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 44c6cb63ccbc..82d6f248d876 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -13,13 +13,6 @@
struct drm_i915_private;
struct drm_printer;
-struct insert_entries {
- struct i915_address_space *vm;
- struct i915_vma_resource *vma_res;
- enum i915_cache_level level;
- u32 flags;
-};
-
#define GT_TRACE(gt, fmt, ...) do { \
const struct intel_gt *gt__ __maybe_unused = (gt); \
GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \
@@ -93,21 +86,6 @@ static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
return unlikely(test_bit(I915_WEDGED, &gt->reset.flags));
}
-static inline bool intel_gt_needs_read_steering(struct intel_gt *gt,
- enum intel_steering_type type)
-{
- return gt->steering_table[type];
-}
-
-void intel_gt_get_valid_steering_for_reg(struct intel_gt *gt, i915_reg_t reg,
- u8 *sliceid, u8 *subsliceid);
-
-u32 intel_gt_read_register_fw(struct intel_gt *gt, i915_reg_t reg);
-u32 intel_gt_read_register(struct intel_gt *gt, i915_reg_t reg);
-
-void intel_gt_report_steering(struct drm_printer *p, struct intel_gt *gt,
- bool dump_table);
-
int intel_gt_probe_all(struct drm_i915_private *i915);
int intel_gt_tiles_init(struct drm_i915_private *i915);
void intel_gt_release_all(struct drm_i915_private *i915);
@@ -125,6 +103,4 @@ void intel_gt_watchdog_work(struct work_struct *work);
void intel_gt_invalidate_tlbs(struct intel_gt *gt);
-struct resource intel_pci_resource(struct pci_dev *pdev, int bar);
-
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
index d886fdc2c694..dd53641f3637 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
@@ -9,6 +9,7 @@
#include "intel_gt.h"
#include "intel_gt_debugfs.h"
#include "intel_gt_engines_debugfs.h"
+#include "intel_gt_mcr.h"
#include "intel_gt_pm_debugfs.h"
#include "intel_sseu_debugfs.h"
#include "pxp/intel_pxp_debugfs.h"
@@ -64,7 +65,7 @@ static int steering_show(struct seq_file *m, void *data)
struct drm_printer p = drm_seq_file_printer(m);
struct intel_gt *gt = m->private;
- intel_gt_report_steering(&p, gt, true);
+ intel_gt_mcr_report_steering(&p, gt, true);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_gmch.c b/drivers/gpu/drm/i915/gt/intel_gt_gmch.c
deleted file mode 100644
index 18e488672d1b..000000000000
--- a/drivers/gpu/drm/i915/gt/intel_gt_gmch.c
+++ /dev/null
@@ -1,654 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#include <drm/intel-gtt.h>
-#include <drm/i915_drm.h>
-
-#include <linux/agp_backend.h>
-#include <linux/stop_machine.h>
-
-#include "i915_drv.h"
-#include "intel_gt_gmch.h"
-#include "intel_gt_regs.h"
-#include "intel_gt.h"
-#include "i915_utils.h"
-
-#include "gen8_ppgtt.h"
-
-struct insert_page {
- struct i915_address_space *vm;
- dma_addr_t addr;
- u64 offset;
- enum i915_cache_level level;
-};
-
-static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
-{
- writeq(pte, addr);
-}
-
-static void nop_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
-}
-
-static u64 snb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- switch (level) {
- case I915_CACHE_L3_LLC:
- case I915_CACHE_LLC:
- pte |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- pte |= GEN6_PTE_UNCACHED;
- break;
- default:
- MISSING_CASE(level);
- }
-
- return pte;
-}
-
-static u64 ivb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- switch (level) {
- case I915_CACHE_L3_LLC:
- pte |= GEN7_PTE_CACHE_L3_LLC;
- break;
- case I915_CACHE_LLC:
- pte |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- pte |= GEN6_PTE_UNCACHED;
- break;
- default:
- MISSING_CASE(level);
- }
-
- return pte;
-}
-
-static u64 byt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- if (!(flags & PTE_READ_ONLY))
- pte |= BYT_PTE_WRITEABLE;
-
- if (level != I915_CACHE_NONE)
- pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
-
- return pte;
-}
-
-static u64 hsw_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- if (level != I915_CACHE_NONE)
- pte |= HSW_WB_LLC_AGE3;
-
- return pte;
-}
-
-static u64 iris_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- switch (level) {
- case I915_CACHE_NONE:
- break;
- case I915_CACHE_WT:
- pte |= HSW_WT_ELLC_LLC_AGE3;
- break;
- default:
- pte |= HSW_WB_ELLC_LLC_AGE3;
- break;
- }
-
- return pte;
-}
-
-static void gen5_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-
- intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
-}
-
-static void gen6_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen6_pte_t __iomem *pte =
- (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
-
- iowrite32(vm->pte_encode(addr, level, flags), pte);
-
- ggtt->invalidate(ggtt);
-}
-
-static void gen8_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen8_pte_t __iomem *pte =
- (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
-
- gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
-
- ggtt->invalidate(ggtt);
-}
-
-static void gen5_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-
- intel_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
- flags);
-}
-
-/*
- * Binds an object into the global gtt with the specified cache level.
- * The object will be accessible to the GPU via commands whose operands
- * reference offsets within the global GTT as well as accessible by the GPU
- * through the GMADR mapped BAR (i915->mm.gtt->gtt).
- */
-static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen6_pte_t __iomem *gte;
- gen6_pte_t __iomem *end;
- struct sgt_iter iter;
- dma_addr_t addr;
-
- gte = (gen6_pte_t __iomem *)ggtt->gsm;
- gte += vma_res->start / I915_GTT_PAGE_SIZE;
- end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
-
- for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
- iowrite32(vm->pte_encode(addr, level, flags), gte++);
- GEM_BUG_ON(gte > end);
-
- /* Fill the allocated but "unused" space beyond the end of the buffer */
- while (gte < end)
- iowrite32(vm->scratch[0]->encode, gte++);
-
- /*
- * We want to flush the TLBs only after we're certain all the PTE
- * updates have finished.
- */
- ggtt->invalidate(ggtt);
-}
-
-static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level level,
- u32 flags)
-{
- const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen8_pte_t __iomem *gte;
- gen8_pte_t __iomem *end;
- struct sgt_iter iter;
- dma_addr_t addr;
-
- /*
- * Note that we ignore PTE_READ_ONLY here. The caller must be careful
- * not to allow the user to override access to a read only page.
- */
-
- gte = (gen8_pte_t __iomem *)ggtt->gsm;
- gte += vma_res->start / I915_GTT_PAGE_SIZE;
- end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
-
- for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
- gen8_set_pte(gte++, pte_encode | addr);
- GEM_BUG_ON(gte > end);
-
- /* Fill the allocated but "unused" space beyond the end of the buffer */
- while (gte < end)
- gen8_set_pte(gte++, vm->scratch[0]->encode);
-
- /*
- * We want to flush the TLBs only after we're certain all the PTE
- * updates have finished.
- */
- ggtt->invalidate(ggtt);
-}
-
-static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
-{
- /*
- * Make sure the internal GAM fifo has been cleared of all GTT
- * writes before exiting stop_machine(). This guarantees that
- * any aperture accesses waiting to start in another process
- * cannot back up behind the GTT writes causing a hang.
- * The register can be any arbitrary GAM register.
- */
- intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
-}
-
-static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
-{
- struct insert_page *arg = _arg;
-
- gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 unused)
-{
- struct insert_page arg = { vm, addr, offset, level };
-
- stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
-}
-
-static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
-{
- struct insert_entries *arg = _arg;
-
- gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level level,
- u32 flags)
-{
- struct insert_entries arg = { vm, vma_res, level, flags };
-
- stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
-}
-
-void intel_gt_gmch_gen5_chipset_flush(struct intel_gt *gt)
-{
- intel_gtt_chipset_flush();
-}
-
-static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
-{
- intel_gtt_chipset_flush();
-}
-
-static void gen5_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
-}
-
-static void gen6_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
- unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
- gen6_pte_t scratch_pte, __iomem *gtt_base =
- (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
- const int max_entries = ggtt_total_entries(ggtt) - first_entry;
- int i;
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- scratch_pte = vm->scratch[0]->encode;
- for (i = 0; i < num_entries; i++)
- iowrite32(scratch_pte, &gtt_base[i]);
-}
-
-static void gen8_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
- unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
- const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
- gen8_pte_t __iomem *gtt_base =
- (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
- const int max_entries = ggtt_total_entries(ggtt) - first_entry;
- int i;
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- for (i = 0; i < num_entries; i++)
- gen8_set_pte(&gtt_base[i], scratch_pte);
-}
-
-static void gen5_gmch_remove(struct i915_address_space *vm)
-{
- intel_gmch_remove();
-}
-
-static void gen6_gmch_remove(struct i915_address_space *vm)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
-
- iounmap(ggtt->gsm);
- free_scratch(vm);
-}
-
-/*
- * Certain Gen5 chipsets require idling the GPU before
- * unmapping anything from the GTT when VT-d is enabled.
- */
-static bool needs_idle_maps(struct drm_i915_private *i915)
-{
- /*
- * Query intel_iommu to see if we need the workaround. Presumably that
- * was loaded first.
- */
- if (!i915_vtd_active(i915))
- return false;
-
- if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
- return true;
-
- if (GRAPHICS_VER(i915) == 12)
- return true; /* XXX DMAR fault reason 7 */
-
- return false;
-}
-
-static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
-{
- /*
- * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
- * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
- */
- GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
- return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
-}
-
-static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
-{
- snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
- snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
- return snb_gmch_ctl << 20;
-}
-
-static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
-{
- bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
- bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
- if (bdw_gmch_ctl)
- bdw_gmch_ctl = 1 << bdw_gmch_ctl;
-
-#ifdef CONFIG_X86_32
- /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
- if (bdw_gmch_ctl > 4)
- bdw_gmch_ctl = 4;
-#endif
-
- return bdw_gmch_ctl << 20;
-}
-
-static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
-{
- return gen6_gttmmadr_size(i915) / 2;
-}
-
-static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
- phys_addr_t phys_addr;
- u32 pte_flags;
- int ret;
-
- GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
- phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
-
- /*
- * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
- * will be dropped. For WC mappings in general we have 64 byte burst
- * writes when the WC buffer is flushed, so we can't use it, but have to
- * resort to an uncached mapping. The WC issue is easily caught by the
- * readback check when writing GTT PTE entries.
- */
- if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
- ggtt->gsm = ioremap(phys_addr, size);
- else
- ggtt->gsm = ioremap_wc(phys_addr, size);
- if (!ggtt->gsm) {
- drm_err(&i915->drm, "Failed to map the ggtt page table\n");
- return -ENOMEM;
- }
-
- kref_init(&ggtt->vm.resv_ref);
- ret = setup_scratch_page(&ggtt->vm);
- if (ret) {
- drm_err(&i915->drm, "Scratch setup failed\n");
- /* iounmap will also get called at remove, but meh */
- iounmap(ggtt->gsm);
- return ret;
- }
-
- pte_flags = 0;
- if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
- pte_flags |= PTE_LM;
-
- ggtt->vm.scratch[0]->encode =
- ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
- I915_CACHE_NONE, pte_flags);
-
- return 0;
-}
-
-int intel_gt_gmch_gen5_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
- phys_addr_t gmadr_base;
- int ret;
-
- ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
- if (!ret) {
- drm_err(&i915->drm, "failed to set up gmch\n");
- return -EIO;
- }
-
- intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
-
- ggtt->gmadr =
- (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
-
- ggtt->vm.alloc_pt_dma = alloc_pt_dma;
- ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
-
- if (needs_idle_maps(i915)) {
- drm_notice(&i915->drm,
- "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
- ggtt->do_idle_maps = true;
- }
-
- ggtt->vm.insert_page = gen5_ggtt_insert_page;
- ggtt->vm.insert_entries = gen5_ggtt_insert_entries;
- ggtt->vm.clear_range = gen5_ggtt_clear_range;
- ggtt->vm.cleanup = gen5_gmch_remove;
-
- ggtt->invalidate = gmch_ggtt_invalidate;
-
- ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
-
- if (unlikely(ggtt->do_idle_maps))
- drm_notice(&i915->drm,
- "Applying Ironlake quirks for intel_iommu\n");
-
- return 0;
-}
-
-int intel_gt_gmch_gen6_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
- unsigned int size;
- u16 snb_gmch_ctl;
-
- ggtt->gmadr = intel_pci_resource(pdev, 2);
- ggtt->mappable_end = resource_size(&ggtt->gmadr);
-
- /*
- * 64/512MB is the current min/max we actually know of, but this is
- * just a coarse sanity check.
- */
- if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
- drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
- &ggtt->mappable_end);
- return -ENXIO;
- }
-
- pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-
- size = gen6_get_total_gtt_size(snb_gmch_ctl);
- ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
-
- ggtt->vm.alloc_pt_dma = alloc_pt_dma;
- ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
-
- ggtt->vm.clear_range = nop_clear_range;
- if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
- ggtt->vm.clear_range = gen6_ggtt_clear_range;
- ggtt->vm.insert_page = gen6_ggtt_insert_page;
- ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
- ggtt->vm.cleanup = gen6_gmch_remove;
-
- ggtt->invalidate = gen6_ggtt_invalidate;
-
- if (HAS_EDRAM(i915))
- ggtt->vm.pte_encode = iris_pte_encode;
- else if (IS_HASWELL(i915))
- ggtt->vm.pte_encode = hsw_pte_encode;
- else if (IS_VALLEYVIEW(i915))
- ggtt->vm.pte_encode = byt_pte_encode;
- else if (GRAPHICS_VER(i915) >= 7)
- ggtt->vm.pte_encode = ivb_pte_encode;
- else
- ggtt->vm.pte_encode = snb_pte_encode;
-
- ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
-
- return ggtt_probe_common(ggtt, size);
-}
-
-static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
-{
- gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
- gmch_ctrl &= SNB_GMCH_GGMS_MASK;
-
- if (gmch_ctrl)
- return 1 << (20 + gmch_ctrl);
-
- return 0;
-}
-
-int intel_gt_gmch_gen8_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
- unsigned int size;
- u16 snb_gmch_ctl;
-
- /* TODO: We're not aware of mappable constraints on gen8 yet */
- if (!HAS_LMEM(i915)) {
- ggtt->gmadr = intel_pci_resource(pdev, 2);
- ggtt->mappable_end = resource_size(&ggtt->gmadr);
- }
-
- pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- if (IS_CHERRYVIEW(i915))
- size = chv_get_total_gtt_size(snb_gmch_ctl);
- else
- size = gen8_get_total_gtt_size(snb_gmch_ctl);
-
- ggtt->vm.alloc_pt_dma = alloc_pt_dma;
- ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
- ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
-
- ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
- ggtt->vm.cleanup = gen6_gmch_remove;
- ggtt->vm.insert_page = gen8_ggtt_insert_page;
- ggtt->vm.clear_range = nop_clear_range;
- if (intel_scanout_needs_vtd_wa(i915))
- ggtt->vm.clear_range = gen8_ggtt_clear_range;
-
- ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
-
- /*
- * Serialize GTT updates with aperture access on BXT if VT-d is on,
- * and always on CHV.
- */
- if (intel_vm_no_concurrent_access_wa(i915)) {
- ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
- ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
- ggtt->vm.bind_async_flags =
- I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
- }
-
- ggtt->invalidate = gen8_ggtt_invalidate;
-
- ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
-
- ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
-
- setup_private_pat(ggtt->vm.gt->uncore);
-
- return ggtt_probe_common(ggtt, size);
-}
-
-int intel_gt_gmch_gen5_enable_hw(struct drm_i915_private *i915)
-{
- if (GRAPHICS_VER(i915) < 6 && !intel_enable_gtt())
- return -EIO;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_gmch.h b/drivers/gpu/drm/i915/gt/intel_gt_gmch.h
deleted file mode 100644
index 75ed55c1f30a..000000000000
--- a/drivers/gpu/drm/i915/gt/intel_gt_gmch.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#ifndef __INTEL_GT_GMCH_H__
-#define __INTEL_GT_GMCH_H__
-
-#include "intel_gtt.h"
-
-/* For x86 platforms */
-#if IS_ENABLED(CONFIG_X86)
-void intel_gt_gmch_gen5_chipset_flush(struct intel_gt *gt);
-int intel_gt_gmch_gen6_probe(struct i915_ggtt *ggtt);
-int intel_gt_gmch_gen8_probe(struct i915_ggtt *ggtt);
-int intel_gt_gmch_gen5_probe(struct i915_ggtt *ggtt);
-int intel_gt_gmch_gen5_enable_hw(struct drm_i915_private *i915);
-
-/* Stubs for non-x86 platforms */
-#else
-static inline void intel_gt_gmch_gen5_chipset_flush(struct intel_gt *gt)
-{
-}
-static inline int intel_gt_gmch_gen5_probe(struct i915_ggtt *ggtt)
-{
- /* No HW should be probed for this case yet, return fail */
- return -ENODEV;
-}
-static inline int intel_gt_gmch_gen6_probe(struct i915_ggtt *ggtt)
-{
- /* No HW should be probed for this case yet, return fail */
- return -ENODEV;
-}
-static inline int intel_gt_gmch_gen8_probe(struct i915_ggtt *ggtt)
-{
- /* No HW should be probed for this case yet, return fail */
- return -ENODEV;
-}
-static inline int intel_gt_gmch_gen5_enable_hw(struct drm_i915_private *i915)
-{
- /* No HW should be enabled for this case yet, return fail */
- return -ENODEV;
-}
-#endif
-
-#endif /* __INTEL_GT_GMCH_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 88b4becfcb17..3a72d4fd0214 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -193,6 +193,14 @@ void gen11_gt_irq_reset(struct intel_gt *gt)
/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0);
+ if (HAS_ENGINE(gt, BCS1) || HAS_ENGINE(gt, BCS2))
+ intel_uncore_write(uncore, XEHPC_BCS1_BCS2_INTR_MASK, ~0);
+ if (HAS_ENGINE(gt, BCS3) || HAS_ENGINE(gt, BCS4))
+ intel_uncore_write(uncore, XEHPC_BCS3_BCS4_INTR_MASK, ~0);
+ if (HAS_ENGINE(gt, BCS5) || HAS_ENGINE(gt, BCS6))
+ intel_uncore_write(uncore, XEHPC_BCS5_BCS6_INTR_MASK, ~0);
+ if (HAS_ENGINE(gt, BCS7) || HAS_ENGINE(gt, BCS8))
+ intel_uncore_write(uncore, XEHPC_BCS7_BCS8_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0);
if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
@@ -248,6 +256,14 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
+ if (HAS_ENGINE(gt, BCS1) || HAS_ENGINE(gt, BCS2))
+ intel_uncore_write(uncore, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
+ if (HAS_ENGINE(gt, BCS3) || HAS_ENGINE(gt, BCS4))
+ intel_uncore_write(uncore, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
+ if (HAS_ENGINE(gt, BCS5) || HAS_ENGINE(gt, BCS6))
+ intel_uncore_write(uncore, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
+ if (HAS_ENGINE(gt, BCS7) || HAS_ENGINE(gt, BCS8))
+ intel_uncore_write(uncore, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
new file mode 100644
index 000000000000..e79405a45312
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "i915_drv.h"
+
+#include "intel_gt_mcr.h"
+#include "intel_gt_regs.h"
+
+/**
+ * DOC: GT Multicast/Replicated (MCR) Register Support
+ *
+ * Some GT registers are designed as "multicast" or "replicated" registers:
+ * multiple instances of the same register share a single MMIO offset. MCR
+ * registers are generally used when the hardware needs to potentially track
+ * independent values of a register per hardware unit (e.g., per-subslice,
+ * per-L3bank, etc.). The specific types of replication that exist vary
+ * per-platform.
+ *
+ * MMIO accesses to MCR registers are controlled according to the settings
+ * programmed in the platform's MCR_SELECTOR register(s). MMIO writes to MCR
+ * registers can be done in either a (i.e., a single write updates all
+ * instances of the register to the same value) or unicast (a write updates only
+ * one specific instance). Reads of MCR registers always operate in a unicast
+ * manner regardless of how the multicast/unicast bit is set in MCR_SELECTOR.
+ * Selection of a specific MCR instance for unicast operations is referred to
+ * as "steering."
+ *
+ * If MCR register operations are steered toward a hardware unit that is
+ * fused off or currently powered down due to power gating, the MMIO operation
+ * is "terminated" by the hardware. Terminated read operations will return a
+ * value of zero and terminated unicast write operations will be silently
+ * ignored.
+ */
+
+#define HAS_MSLICE_STEERING(dev_priv) (INTEL_INFO(dev_priv)->has_mslice_steering)
+
+static const char * const intel_steering_types[] = {
+ "L3BANK",
+ "MSLICE",
+ "LNCF",
+ "INSTANCE 0",
+};
+
+static const struct intel_mmio_range icl_l3bank_steering_table[] = {
+ { 0x00B100, 0x00B3FF },
+ {},
+};
+
+static const struct intel_mmio_range xehpsdv_mslice_steering_table[] = {
+ { 0x004000, 0x004AFF },
+ { 0x00C800, 0x00CFFF },
+ { 0x00DD00, 0x00DDFF },
+ { 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */
+ {},
+};
+
+static const struct intel_mmio_range xehpsdv_lncf_steering_table[] = {
+ { 0x00B000, 0x00B0FF },
+ { 0x00D800, 0x00D8FF },
+ {},
+};
+
+static const struct intel_mmio_range dg2_lncf_steering_table[] = {
+ { 0x00B000, 0x00B0FF },
+ { 0x00D880, 0x00D8FF },
+ {},
+};
+
+/*
+ * We have several types of MCR registers on PVC where steering to (0,0)
+ * will always provide us with a non-terminated value. We'll stick them
+ * all in the same table for simplicity.
+ */
+static const struct intel_mmio_range pvc_instance0_steering_table[] = {
+ { 0x004000, 0x004AFF }, /* HALF-BSLICE */
+ { 0x008800, 0x00887F }, /* CC */
+ { 0x008A80, 0x008AFF }, /* TILEPSMI */
+ { 0x00B000, 0x00B0FF }, /* HALF-BSLICE */
+ { 0x00B100, 0x00B3FF }, /* L3BANK */
+ { 0x00C800, 0x00CFFF }, /* HALF-BSLICE */
+ { 0x00D800, 0x00D8FF }, /* HALF-BSLICE */
+ { 0x00DD00, 0x00DDFF }, /* BSLICE */
+ { 0x00E900, 0x00E9FF }, /* HALF-BSLICE */
+ { 0x00EC00, 0x00EEFF }, /* HALF-BSLICE */
+ { 0x00F000, 0x00FFFF }, /* HALF-BSLICE */
+ { 0x024180, 0x0241FF }, /* HALF-BSLICE */
+ {},
+};
+
+void intel_gt_mcr_init(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ /*
+ * An mslice is unavailable only if both the meml3 for the slice is
+ * disabled *and* all of the DSS in the slice (quadrant) are disabled.
+ */
+ if (HAS_MSLICE_STEERING(i915)) {
+ gt->info.mslice_mask =
+ intel_slicemask_from_xehp_dssmask(gt->info.sseu.subslice_mask,
+ GEN_DSS_PER_MSLICE);
+ gt->info.mslice_mask |=
+ (intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
+ GEN12_MEML3_EN_MASK);
+
+ if (!gt->info.mslice_mask) /* should be impossible! */
+ drm_warn(&i915->drm, "mslice mask all zero!\n");
+ }
+
+ if (IS_PONTEVECCHIO(i915)) {
+ gt->steering_table[INSTANCE0] = pvc_instance0_steering_table;
+ } else if (IS_DG2(i915)) {
+ gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
+ gt->steering_table[LNCF] = dg2_lncf_steering_table;
+ } else if (IS_XEHPSDV(i915)) {
+ gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
+ gt->steering_table[LNCF] = xehpsdv_lncf_steering_table;
+ } else if (GRAPHICS_VER(i915) >= 11 &&
+ GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) {
+ gt->steering_table[L3BANK] = icl_l3bank_steering_table;
+ gt->info.l3bank_mask =
+ ~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
+ GEN10_L3BANK_MASK;
+ if (!gt->info.l3bank_mask) /* should be impossible! */
+ drm_warn(&i915->drm, "L3 bank mask is all zero!\n");
+ } else if (GRAPHICS_VER(i915) >= 11) {
+ /*
+ * We expect all modern platforms to have at least some
+ * type of steering that needs to be initialized.
+ */
+ MISSING_CASE(INTEL_INFO(i915)->platform);
+ }
+}
+
+/*
+ * rw_with_mcr_steering_fw - Access a register with specific MCR steering
+ * @uncore: pointer to struct intel_uncore
+ * @reg: register being accessed
+ * @rw_flag: FW_REG_READ for read access or FW_REG_WRITE for write access
+ * @group: group number (documented as "sliceid" on older platforms)
+ * @instance: instance number (documented as "subsliceid" on older platforms)
+ * @value: register value to be written (ignored for read)
+ *
+ * Return: 0 for write access. register value for read access.
+ *
+ * Caller needs to make sure the relevant forcewake wells are up.
+ */
+static u32 rw_with_mcr_steering_fw(struct intel_uncore *uncore,
+ i915_reg_t reg, u8 rw_flag,
+ int group, int instance, u32 value)
+{
+ u32 mcr_mask, mcr_ss, mcr, old_mcr, val = 0;
+
+ lockdep_assert_held(&uncore->lock);
+
+ if (GRAPHICS_VER(uncore->i915) >= 11) {
+ mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
+ mcr_ss = GEN11_MCR_SLICE(group) | GEN11_MCR_SUBSLICE(instance);
+
+ /*
+ * Wa_22013088509
+ *
+ * The setting of the multicast/unicast bit usually wouldn't
+ * matter for read operations (which always return the value
+ * from a single register instance regardless of how that bit
+ * is set), but some platforms have a workaround requiring us
+ * to remain in multicast mode for reads. There's no real
+ * downside to this, so we'll just go ahead and do so on all
+ * platforms; we'll only clear the multicast bit from the mask
+ * when exlicitly doing a write operation.
+ */
+ if (rw_flag == FW_REG_WRITE)
+ mcr_mask |= GEN11_MCR_MULTICAST;
+ } else {
+ mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
+ mcr_ss = GEN8_MCR_SLICE(group) | GEN8_MCR_SUBSLICE(instance);
+ }
+
+ old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
+
+ mcr &= ~mcr_mask;
+ mcr |= mcr_ss;
+ intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
+
+ if (rw_flag == FW_REG_READ)
+ val = intel_uncore_read_fw(uncore, reg);
+ else
+ intel_uncore_write_fw(uncore, reg, value);
+
+ mcr &= ~mcr_mask;
+ mcr |= old_mcr & mcr_mask;
+
+ intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
+
+ return val;
+}
+
+static u32 rw_with_mcr_steering(struct intel_uncore *uncore,
+ i915_reg_t reg, u8 rw_flag,
+ int group, int instance,
+ u32 value)
+{
+ enum forcewake_domains fw_domains;
+ u32 val;
+
+ fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
+ rw_flag);
+ fw_domains |= intel_uncore_forcewake_for_reg(uncore,
+ GEN8_MCR_SELECTOR,
+ FW_REG_READ | FW_REG_WRITE);
+
+ spin_lock_irq(&uncore->lock);
+ intel_uncore_forcewake_get__locked(uncore, fw_domains);
+
+ val = rw_with_mcr_steering_fw(uncore, reg, rw_flag, group, instance, value);
+
+ intel_uncore_forcewake_put__locked(uncore, fw_domains);
+ spin_unlock_irq(&uncore->lock);
+
+ return val;
+}
+
+/**
+ * intel_gt_mcr_read - read a specific instance of an MCR register
+ * @gt: GT structure
+ * @reg: the MCR register to read
+ * @group: the MCR group
+ * @instance: the MCR instance
+ *
+ * Returns the value read from an MCR register after steering toward a specific
+ * group/instance.
+ */
+u32 intel_gt_mcr_read(struct intel_gt *gt,
+ i915_reg_t reg,
+ int group, int instance)
+{
+ return rw_with_mcr_steering(gt->uncore, reg, FW_REG_READ, group, instance, 0);
+}
+
+/**
+ * intel_gt_mcr_unicast_write - write a specific instance of an MCR register
+ * @gt: GT structure
+ * @reg: the MCR register to write
+ * @value: value to write
+ * @group: the MCR group
+ * @instance: the MCR instance
+ *
+ * Write an MCR register in unicast mode after steering toward a specific
+ * group/instance.
+ */
+void intel_gt_mcr_unicast_write(struct intel_gt *gt, i915_reg_t reg, u32 value,
+ int group, int instance)
+{
+ rw_with_mcr_steering(gt->uncore, reg, FW_REG_WRITE, group, instance, value);
+}
+
+/**
+ * intel_gt_mcr_multicast_write - write a value to all instances of an MCR register
+ * @gt: GT structure
+ * @reg: the MCR register to write
+ * @value: value to write
+ *
+ * Write an MCR register in multicast mode to update all instances.
+ */
+void intel_gt_mcr_multicast_write(struct intel_gt *gt,
+ i915_reg_t reg, u32 value)
+{
+ intel_uncore_write(gt->uncore, reg, value);
+}
+
+/**
+ * intel_gt_mcr_multicast_write_fw - write a value to all instances of an MCR register
+ * @gt: GT structure
+ * @reg: the MCR register to write
+ * @value: value to write
+ *
+ * Write an MCR register in multicast mode to update all instances. This
+ * function assumes the caller is already holding any necessary forcewake
+ * domains; use intel_gt_mcr_multicast_write() in cases where forcewake should
+ * be obtained automatically.
+ */
+void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt, i915_reg_t reg, u32 value)
+{
+ intel_uncore_write_fw(gt->uncore, reg, value);
+}
+
+/*
+ * reg_needs_read_steering - determine whether a register read requires
+ * explicit steering
+ * @gt: GT structure
+ * @reg: the register to check steering requirements for
+ * @type: type of multicast steering to check
+ *
+ * Determines whether @reg needs explicit steering of a specific type for
+ * reads.
+ *
+ * Returns false if @reg does not belong to a register range of the given
+ * steering type, or if the default (subslice-based) steering IDs are suitable
+ * for @type steering too.
+ */
+static bool reg_needs_read_steering(struct intel_gt *gt,
+ i915_reg_t reg,
+ enum intel_steering_type type)
+{
+ const u32 offset = i915_mmio_reg_offset(reg);
+ const struct intel_mmio_range *entry;
+
+ if (likely(!gt->steering_table[type]))
+ return false;
+
+ for (entry = gt->steering_table[type]; entry->end; entry++) {
+ if (offset >= entry->start && offset <= entry->end)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * get_nonterminated_steering - determines valid IDs for a class of MCR steering
+ * @gt: GT structure
+ * @type: multicast register type
+ * @group: Group ID returned
+ * @instance: Instance ID returned
+ *
+ * Determines group and instance values that will steer reads of the specified
+ * MCR class to a non-terminated instance.
+ */
+static void get_nonterminated_steering(struct intel_gt *gt,
+ enum intel_steering_type type,
+ u8 *group, u8 *instance)
+{
+ switch (type) {
+ case L3BANK:
+ *group = 0; /* unused */
+ *instance = __ffs(gt->info.l3bank_mask);
+ break;
+ case MSLICE:
+ GEM_WARN_ON(!HAS_MSLICE_STEERING(gt->i915));
+ *group = __ffs(gt->info.mslice_mask);
+ *instance = 0; /* unused */
+ break;
+ case LNCF:
+ /*
+ * An LNCF is always present if its mslice is present, so we
+ * can safely just steer to LNCF 0 in all cases.
+ */
+ GEM_WARN_ON(!HAS_MSLICE_STEERING(gt->i915));
+ *group = __ffs(gt->info.mslice_mask) << 1;
+ *instance = 0; /* unused */
+ break;
+ case INSTANCE0:
+ /*
+ * There are a lot of MCR types for which instance (0, 0)
+ * will always provide a non-terminated value.
+ */
+ *group = 0;
+ *instance = 0;
+ break;
+ default:
+ MISSING_CASE(type);
+ *group = 0;
+ *instance = 0;
+ }
+}
+
+/**
+ * intel_gt_mcr_get_nonterminated_steering - find group/instance values that
+ * will steer a register to a non-terminated instance
+ * @gt: GT structure
+ * @reg: register for which the steering is required
+ * @group: return variable for group steering
+ * @instance: return variable for instance steering
+ *
+ * This function returns a group/instance pair that is guaranteed to work for
+ * read steering of the given register. Note that a value will be returned even
+ * if the register is not replicated and therefore does not actually require
+ * steering.
+ */
+void intel_gt_mcr_get_nonterminated_steering(struct intel_gt *gt,
+ i915_reg_t reg,
+ u8 *group, u8 *instance)
+{
+ int type;
+
+ for (type = 0; type < NUM_STEERING_TYPES; type++) {
+ if (reg_needs_read_steering(gt, reg, type)) {
+ get_nonterminated_steering(gt, type, group, instance);
+ return;
+ }
+ }
+
+ *group = gt->default_steering.groupid;
+ *instance = gt->default_steering.instanceid;
+}
+
+/**
+ * intel_gt_mcr_read_any_fw - reads one instance of an MCR register
+ * @gt: GT structure
+ * @reg: register to read
+ *
+ * Reads a GT MCR register. The read will be steered to a non-terminated
+ * instance (i.e., one that isn't fused off or powered down by power gating).
+ * This function assumes the caller is already holding any necessary forcewake
+ * domains; use intel_gt_mcr_read_any() in cases where forcewake should be
+ * obtained automatically.
+ *
+ * Returns the value from a non-terminated instance of @reg.
+ */
+u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_reg_t reg)
+{
+ int type;
+ u8 group, instance;
+
+ for (type = 0; type < NUM_STEERING_TYPES; type++) {
+ if (reg_needs_read_steering(gt, reg, type)) {
+ get_nonterminated_steering(gt, type, &group, &instance);
+ return rw_with_mcr_steering_fw(gt->uncore, reg,
+ FW_REG_READ,
+ group, instance, 0);
+ }
+ }
+
+ return intel_uncore_read_fw(gt->uncore, reg);
+}
+
+/**
+ * intel_gt_mcr_read_any - reads one instance of an MCR register
+ * @gt: GT structure
+ * @reg: register to read
+ *
+ * Reads a GT MCR register. The read will be steered to a non-terminated
+ * instance (i.e., one that isn't fused off or powered down by power gating).
+ *
+ * Returns the value from a non-terminated instance of @reg.
+ */
+u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_reg_t reg)
+{
+ int type;
+ u8 group, instance;
+
+ for (type = 0; type < NUM_STEERING_TYPES; type++) {
+ if (reg_needs_read_steering(gt, reg, type)) {
+ get_nonterminated_steering(gt, type, &group, &instance);
+ return rw_with_mcr_steering(gt->uncore, reg,
+ FW_REG_READ,
+ group, instance, 0);
+ }
+ }
+
+ return intel_uncore_read(gt->uncore, reg);
+}
+
+static void report_steering_type(struct drm_printer *p,
+ struct intel_gt *gt,
+ enum intel_steering_type type,
+ bool dump_table)
+{
+ const struct intel_mmio_range *entry;
+ u8 group, instance;
+
+ BUILD_BUG_ON(ARRAY_SIZE(intel_steering_types) != NUM_STEERING_TYPES);
+
+ if (!gt->steering_table[type]) {
+ drm_printf(p, "%s steering: uses default steering\n",
+ intel_steering_types[type]);
+ return;
+ }
+
+ get_nonterminated_steering(gt, type, &group, &instance);
+ drm_printf(p, "%s steering: group=0x%x, instance=0x%x\n",
+ intel_steering_types[type], group, instance);
+
+ if (!dump_table)
+ return;
+
+ for (entry = gt->steering_table[type]; entry->end; entry++)
+ drm_printf(p, "\t0x%06x - 0x%06x\n", entry->start, entry->end);
+}
+
+void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
+ bool dump_table)
+{
+ drm_printf(p, "Default steering: group=0x%x, instance=0x%x\n",
+ gt->default_steering.groupid,
+ gt->default_steering.instanceid);
+
+ if (IS_PONTEVECCHIO(gt->i915)) {
+ report_steering_type(p, gt, INSTANCE0, dump_table);
+ } else if (HAS_MSLICE_STEERING(gt->i915)) {
+ report_steering_type(p, gt, MSLICE, dump_table);
+ report_steering_type(p, gt, LNCF, dump_table);
+ }
+}
+
+/**
+ * intel_gt_mcr_get_ss_steering - returns the group/instance steering for a SS
+ * @gt: GT structure
+ * @dss: DSS ID to obtain steering for
+ * @group: pointer to storage for steering group ID
+ * @instance: pointer to storage for steering instance ID
+ *
+ * Returns the steering IDs (via the @group and @instance parameters) that
+ * correspond to a specific subslice/DSS ID.
+ */
+void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
+ unsigned int *group, unsigned int *instance)
+{
+ if (IS_PONTEVECCHIO(gt->i915)) {
+ *group = dss / GEN_DSS_PER_CSLICE;
+ *instance = dss % GEN_DSS_PER_CSLICE;
+ } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) {
+ *group = dss / GEN_DSS_PER_GSLICE;
+ *instance = dss % GEN_DSS_PER_GSLICE;
+ } else {
+ *group = dss / GEN_MAX_SS_PER_HSW_SLICE;
+ *instance = dss % GEN_MAX_SS_PER_HSW_SLICE;
+ return;
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.h b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
new file mode 100644
index 000000000000..77a8b11c287d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_MCR__
+#define __INTEL_GT_MCR__
+
+#include "intel_gt_types.h"
+
+void intel_gt_mcr_init(struct intel_gt *gt);
+
+u32 intel_gt_mcr_read(struct intel_gt *gt,
+ i915_reg_t reg,
+ int group, int instance);
+u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_reg_t reg);
+u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_reg_t reg);
+
+void intel_gt_mcr_unicast_write(struct intel_gt *gt,
+ i915_reg_t reg, u32 value,
+ int group, int instance);
+void intel_gt_mcr_multicast_write(struct intel_gt *gt,
+ i915_reg_t reg, u32 value);
+void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt,
+ i915_reg_t reg, u32 value);
+
+void intel_gt_mcr_get_nonterminated_steering(struct intel_gt *gt,
+ i915_reg_t reg,
+ u8 *group, u8 *instance);
+
+void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
+ bool dump_table);
+
+void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
+ unsigned int *group, unsigned int *instance);
+
+/*
+ * Helper for for_each_ss_steering loop. On pre-Xe_HP platforms, subslice
+ * presence is determined by using the group/instance as direct lookups in the
+ * slice/subslice topology. On Xe_HP and beyond, the steering is unrelated to
+ * the topology, so we lookup the DSS ID directly in "slice 0."
+ */
+#define _HAS_SS(ss_, gt_, group_, instance_) ( \
+ GRAPHICS_VER_FULL(gt_->i915) >= IP_VER(12, 50) ? \
+ intel_sseu_has_subslice(&(gt_)->info.sseu, 0, ss_) : \
+ intel_sseu_has_subslice(&(gt_)->info.sseu, group_, instance_))
+
+/*
+ * Loop over each subslice/DSS and determine the group and instance IDs that
+ * should be used to steer MCR accesses toward this DSS.
+ */
+#define for_each_ss_steering(ss_, gt_, group_, instance_) \
+ for (ss_ = 0, intel_gt_mcr_get_ss_steering(gt_, 0, &group_, &instance_); \
+ ss_ < I915_MAX_SS_FUSE_BITS; \
+ ss_++, intel_gt_mcr_get_ss_steering(gt_, ss_, &group_, &instance_)) \
+ for_each_if(_HAS_SS(ss_, gt_, group_, instance_))
+
+#endif /* __INTEL_GT_MCR__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index 0c6b9eb724ae..40bdd4cb629f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -100,14 +100,16 @@ static int vlv_drpc(struct seq_file *m)
{
struct intel_gt *gt = m->private;
struct intel_uncore *uncore = gt->uncore;
- u32 rcctl1, pw_status;
+ u32 rcctl1, pw_status, mt_fwake_req;
+ mt_fwake_req = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
pw_status = intel_uncore_read(uncore, VLV_GTLC_PW_STATUS);
rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
seq_printf(m, "RC6 Enabled: %s\n",
str_yes_no(rcctl1 & (GEN7_RC_CTL_TO_MODE |
GEN6_RC_CTL_EI_MODE(1))));
+ seq_printf(m, "Multi-threaded Forcewake Request: 0x%x\n", mt_fwake_req);
seq_printf(m, "Render Power Well: %s\n",
(pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
seq_printf(m, "Media Power Well: %s\n",
@@ -124,9 +126,10 @@ static int gen6_drpc(struct seq_file *m)
struct intel_gt *gt = m->private;
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
- u32 gt_core_status, rcctl1, rc6vids = 0;
+ u32 gt_core_status, mt_fwake_req, rcctl1, rc6vids = 0;
u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
+ mt_fwake_req = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS);
rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
@@ -138,7 +141,7 @@ static int gen6_drpc(struct seq_file *m)
}
if (GRAPHICS_VER(i915) <= 7)
- snb_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
+ snb_pcode_read(gt->uncore, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
seq_printf(m, "RC1e Enabled: %s\n",
str_yes_no(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
@@ -178,6 +181,7 @@ static int gen6_drpc(struct seq_file *m)
seq_printf(m, "Core Power Down: %s\n",
str_yes_no(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+ seq_printf(m, "Multi-threaded Forcewake Request: 0x%x\n", mt_fwake_req);
if (GRAPHICS_VER(i915) >= 9) {
seq_printf(m, "Render Power Well: %s\n",
(gen9_powergate_status &
@@ -545,7 +549,7 @@ static int llc_show(struct seq_file *m, void *data)
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
ia_freq = gpu_freq;
- snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ snb_pcode_read(gt->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq, NULL);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
intel_gpu_freq(rps,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index a0a49c16babd..60d6eb5f245b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -140,6 +140,7 @@
#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
#define GEN9_TSG_BARRIER_ACK_DISABLE (1 << 8)
#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1 << 10)
+#define GEN12_PERF_FIX_BALANCING_CFE_DISABLE REG_BIT(15)
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
#define FF_DOP_CLOCK_GATE_DISABLE REG_BIT(1)
@@ -323,8 +324,11 @@
#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4)
-#define XEHPSDV_FLAT_CCS_BASE_ADDR _MMIO(0x4910)
-#define XEHPSDV_CCS_BASE_SHIFT 8
+#define XEHP_TILE0_ADDR_RANGE _MMIO(0x4900)
+#define XEHP_TILE_LMEM_RANGE_SHIFT 8
+
+#define XEHP_FLAT_CCS_BASE_ADDR _MMIO(0x4910)
+#define XEHP_CCS_BASE_SHIFT 8
#define GAMTARBMODE _MMIO(0x4a08)
#define ARB_MODE_BWGTLB_DISABLE (1 << 9)
@@ -367,6 +371,9 @@
#define GEN9_WM_CHICKEN3 _MMIO(0x5588)
#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9)
+#define CHICKEN_RASTER_1 _MMIO(0x6204)
+#define DIS_SF_ROUND_NEAREST_EVEN REG_BIT(8)
+
#define VFLSKPD _MMIO(0x62a8)
#define DIS_OVER_FETCH_CACHE REG_BIT(1)
#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0)
@@ -561,6 +568,7 @@
#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
#define GEN12_GT_COMPUTE_DSS_ENABLE _MMIO(0x9144)
+#define XEHPC_GT_COMPUTE_DSS_ENABLE_EXT _MMIO(0x9148)
#define GEN6_UCGCTL1 _MMIO(0x9400)
#define GEN6_GAMUNIT_CLOCK_GATE_DISABLE (1 << 22)
@@ -597,24 +605,32 @@
/* GEN11 changed all bit defs except for FULL & RENDER */
#define GEN11_GRDOM_FULL GEN6_GRDOM_FULL
#define GEN11_GRDOM_RENDER GEN6_GRDOM_RENDER
-#define GEN11_GRDOM_BLT (1 << 2)
-#define GEN11_GRDOM_GUC (1 << 3)
-#define GEN11_GRDOM_MEDIA (1 << 5)
-#define GEN11_GRDOM_MEDIA2 (1 << 6)
-#define GEN11_GRDOM_MEDIA3 (1 << 7)
-#define GEN11_GRDOM_MEDIA4 (1 << 8)
-#define GEN11_GRDOM_MEDIA5 (1 << 9)
-#define GEN11_GRDOM_MEDIA6 (1 << 10)
-#define GEN11_GRDOM_MEDIA7 (1 << 11)
-#define GEN11_GRDOM_MEDIA8 (1 << 12)
-#define GEN11_GRDOM_VECS (1 << 13)
-#define GEN11_GRDOM_VECS2 (1 << 14)
-#define GEN11_GRDOM_VECS3 (1 << 15)
-#define GEN11_GRDOM_VECS4 (1 << 16)
-#define GEN11_GRDOM_SFC0 (1 << 17)
-#define GEN11_GRDOM_SFC1 (1 << 18)
-#define GEN11_GRDOM_SFC2 (1 << 19)
-#define GEN11_GRDOM_SFC3 (1 << 20)
+#define XEHPC_GRDOM_BLT8 REG_BIT(31)
+#define XEHPC_GRDOM_BLT7 REG_BIT(30)
+#define XEHPC_GRDOM_BLT6 REG_BIT(29)
+#define XEHPC_GRDOM_BLT5 REG_BIT(28)
+#define XEHPC_GRDOM_BLT4 REG_BIT(27)
+#define XEHPC_GRDOM_BLT3 REG_BIT(26)
+#define XEHPC_GRDOM_BLT2 REG_BIT(25)
+#define XEHPC_GRDOM_BLT1 REG_BIT(24)
+#define GEN11_GRDOM_SFC3 REG_BIT(20)
+#define GEN11_GRDOM_SFC2 REG_BIT(19)
+#define GEN11_GRDOM_SFC1 REG_BIT(18)
+#define GEN11_GRDOM_SFC0 REG_BIT(17)
+#define GEN11_GRDOM_VECS4 REG_BIT(16)
+#define GEN11_GRDOM_VECS3 REG_BIT(15)
+#define GEN11_GRDOM_VECS2 REG_BIT(14)
+#define GEN11_GRDOM_VECS REG_BIT(13)
+#define GEN11_GRDOM_MEDIA8 REG_BIT(12)
+#define GEN11_GRDOM_MEDIA7 REG_BIT(11)
+#define GEN11_GRDOM_MEDIA6 REG_BIT(10)
+#define GEN11_GRDOM_MEDIA5 REG_BIT(9)
+#define GEN11_GRDOM_MEDIA4 REG_BIT(8)
+#define GEN11_GRDOM_MEDIA3 REG_BIT(7)
+#define GEN11_GRDOM_MEDIA2 REG_BIT(6)
+#define GEN11_GRDOM_MEDIA REG_BIT(5)
+#define GEN11_GRDOM_GUC REG_BIT(3)
+#define GEN11_GRDOM_BLT REG_BIT(2)
#define GEN11_VCS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << ((instance) >> 1))
#define GEN11_VECS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << (instance))
@@ -622,6 +638,7 @@
#define GEN7_MISCCPCTL _MMIO(0x9424)
#define GEN7_DOP_CLOCK_GATE_ENABLE (1 << 0)
+#define GEN12_DOP_CLOCK_GATE_RENDER_ENABLE REG_BIT(1)
#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1 << 2)
#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1 << 4)
#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1 << 6)
@@ -732,6 +749,7 @@
#define GEN6_AGGRESSIVE_TURBO (0 << 15)
#define GEN9_SW_REQ_UNSLICE_RATIO_SHIFT 23
#define GEN9_IGNORE_SLICE_RATIO (0 << 0)
+#define GEN12_MEDIA_FREQ_RATIO REG_BIT(13)
#define GEN6_RC_VIDEO_FREQ _MMIO(0xa00c)
#define GEN6_RC_CTL_RC6pp_ENABLE (1 << 16)
@@ -903,6 +921,10 @@
#define GEN7_L3CNTLREG1 _MMIO(0xb01c)
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
#define GEN7_L3AGDIS (1 << 19)
+
+#define XEHPC_LNCFMISCCFGREG0 _MMIO(0xb01c)
+#define XEHPC_OVRLSCCC REG_BIT(0)
+
#define GEN7_L3CNTLREG2 _MMIO(0xb020)
/* MOCS (Memory Object Control State) registers */
@@ -969,6 +991,11 @@
#define XEHP_L3SCQREG7 _MMIO(0xb188)
#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3)
+#define XEHPC_L3SCRUB _MMIO(0xb18c)
+#define SCRUB_CL_DWNGRADE_SHARED REG_BIT(12)
+#define SCRUB_RATE_PER_BANK_MASK REG_GENMASK(2, 0)
+#define SCRUB_RATE_4B_PER_CLK REG_FIELD_PREP(SCRUB_RATE_PER_BANK_MASK, 0x6)
+
#define L3SQCREG1_CCS0 _MMIO(0xb200)
#define FLUSHALLNONCOH REG_BIT(5)
@@ -1060,8 +1087,10 @@
#define GEN9_ENABLE_GPGPU_PREEMPTION REG_BIT(2)
#define GEN10_CACHE_MODE_SS _MMIO(0xe420)
-#define ENABLE_PREFETCH_INTO_IC REG_BIT(3)
+#define ENABLE_EU_COUNT_FOR_TDL_FLUSH REG_BIT(10)
+#define DISABLE_ECC REG_BIT(5)
#define FLOAT_BLEND_OPTIMIZATION_ENABLE REG_BIT(4)
+#define ENABLE_PREFETCH_INTO_IC REG_BIT(3)
#define EU_PERF_CNTL0 _MMIO(0xe458)
#define EU_PERF_CNTL4 _MMIO(0xe45c)
@@ -1476,6 +1505,14 @@
#define GEN11_KCR (19)
#define GEN11_GTPM (16)
#define GEN11_BCS (15)
+#define XEHPC_BCS1 (14)
+#define XEHPC_BCS2 (13)
+#define XEHPC_BCS3 (12)
+#define XEHPC_BCS4 (11)
+#define XEHPC_BCS5 (10)
+#define XEHPC_BCS6 (9)
+#define XEHPC_BCS7 (8)
+#define XEHPC_BCS8 (23)
#define GEN12_CCS3 (7)
#define GEN12_CCS2 (6)
#define GEN12_CCS1 (5)
@@ -1521,6 +1558,10 @@
#define GEN11_GUNIT_CSME_INTR_MASK _MMIO(0x1900f4)
#define GEN12_CCS0_CCS1_INTR_MASK _MMIO(0x190100)
#define GEN12_CCS2_CCS3_INTR_MASK _MMIO(0x190104)
+#define XEHPC_BCS1_BCS2_INTR_MASK _MMIO(0x190110)
+#define XEHPC_BCS3_BCS4_INTR_MASK _MMIO(0x190114)
+#define XEHPC_BCS5_BCS6_INTR_MASK _MMIO(0x190118)
+#define XEHPC_BCS7_BCS8_INTR_MASK _MMIO(0x19011c)
#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index f76b6cf8040e..73a8b46e0234 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -14,6 +14,7 @@
#include "intel_gt_regs.h"
#include "intel_gt_sysfs.h"
#include "intel_gt_sysfs_pm.h"
+#include "intel_pcode.h"
#include "intel_rc6.h"
#include "intel_rps.h"
@@ -558,6 +559,174 @@ static const struct attribute *freq_attrs[] = {
NULL
};
+/*
+ * Scaling for multipliers (aka frequency factors).
+ * The format of the value in the register is u8.8.
+ *
+ * The presentation to userspace is inspired by the perf event framework.
+ * See:
+ * Documentation/ABI/testing/sysfs-bus-event_source-devices-events
+ * for description of:
+ * /sys/bus/event_source/devices/<pmu>/events/<event>.scale
+ *
+ * Summary: Expose two sysfs files for each multiplier.
+ *
+ * 1. File <attr> contains a raw hardware value.
+ * 2. File <attr>.scale contains the multiplicative scale factor to be
+ * used by userspace to compute the actual value.
+ *
+ * So userspace knows that to get the frequency_factor it multiplies the
+ * provided value by the specified scale factor and vice-versa.
+ *
+ * That way there is no precision loss in the kernel interface and API
+ * is future proof should one day the hardware register change to u16.u16,
+ * on some platform. (Or any other fixed point representation.)
+ *
+ * Example:
+ * File <attr> contains the value 2.5, represented as u8.8 0x0280, which
+ * is comprised of:
+ * - an integer part of 2
+ * - a fractional part of 0x80 (representing 0x80 / 2^8 == 0x80 / 256).
+ * File <attr>.scale contains a string representation of floating point
+ * value 0.00390625 (which is (1 / 256)).
+ * Userspace computes the actual value:
+ * 0x0280 * 0.00390625 -> 2.5
+ * or converts an actual value to the value to be written into <attr>:
+ * 2.5 / 0.00390625 -> 0x0280
+ */
+
+#define U8_8_VAL_MASK 0xffff
+#define U8_8_SCALE_TO_VALUE "0.00390625"
+
+static ssize_t freq_factor_scale_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ return sysfs_emit(buff, "%s\n", U8_8_SCALE_TO_VALUE);
+}
+
+static u32 media_ratio_mode_to_factor(u32 mode)
+{
+ /* 0 -> 0, 1 -> 256, 2 -> 128 */
+ return !mode ? mode : 256 / mode;
+}
+
+static ssize_t media_freq_factor_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+ struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ intel_wakeref_t wakeref;
+ u32 mode;
+
+ /*
+ * Retrieve media_ratio_mode from GEN6_RPNSWREQ bit 13 set by
+ * GuC. GEN6_RPNSWREQ:13 value 0 represents 1:2 and 1 represents 1:1
+ */
+ if (IS_XEHPSDV(gt->i915) &&
+ slpc->media_ratio_mode == SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL) {
+ /*
+ * For XEHPSDV dynamic mode GEN6_RPNSWREQ:13 does not contain
+ * the media_ratio_mode, just return the cached media ratio
+ */
+ mode = slpc->media_ratio_mode;
+ } else {
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ mode = intel_uncore_read(gt->uncore, GEN6_RPNSWREQ);
+ mode = REG_FIELD_GET(GEN12_MEDIA_FREQ_RATIO, mode) ?
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE :
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO;
+ }
+
+ return sysfs_emit(buff, "%u\n", media_ratio_mode_to_factor(mode));
+}
+
+static ssize_t media_freq_factor_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+ struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ u32 factor, mode;
+ int err;
+
+ err = kstrtou32(buff, 0, &factor);
+ if (err)
+ return err;
+
+ for (mode = SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL;
+ mode <= SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO; mode++)
+ if (factor == media_ratio_mode_to_factor(mode))
+ break;
+
+ if (mode > SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO)
+ return -EINVAL;
+
+ err = intel_guc_slpc_set_media_ratio_mode(slpc, mode);
+ if (!err) {
+ slpc->media_ratio_mode = mode;
+ DRM_DEBUG("Set slpc->media_ratio_mode to %d", mode);
+ }
+ return err ?: count;
+}
+
+static ssize_t media_RP0_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+ u32 val;
+ int err;
+
+ err = snb_pcode_read_p(gt->uncore, XEHP_PCODE_FREQUENCY_CONFIG,
+ PCODE_MBOX_FC_SC_READ_FUSED_P0,
+ PCODE_MBOX_DOMAIN_MEDIAFF, &val);
+
+ if (err)
+ return err;
+
+ /* Fused media RP0 read from pcode is in units of 50 MHz */
+ val *= GT_FREQUENCY_MULTIPLIER;
+
+ return sysfs_emit(buff, "%u\n", val);
+}
+
+static ssize_t media_RPn_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+ u32 val;
+ int err;
+
+ err = snb_pcode_read_p(gt->uncore, XEHP_PCODE_FREQUENCY_CONFIG,
+ PCODE_MBOX_FC_SC_READ_FUSED_PN,
+ PCODE_MBOX_DOMAIN_MEDIAFF, &val);
+
+ if (err)
+ return err;
+
+ /* Fused media RPn read from pcode is in units of 50 MHz */
+ val *= GT_FREQUENCY_MULTIPLIER;
+
+ return sysfs_emit(buff, "%u\n", val);
+}
+
+static DEVICE_ATTR_RW(media_freq_factor);
+static struct device_attribute dev_attr_media_freq_factor_scale =
+ __ATTR(media_freq_factor.scale, 0444, freq_factor_scale_show, NULL);
+static DEVICE_ATTR_RO(media_RP0_freq_mhz);
+static DEVICE_ATTR_RO(media_RPn_freq_mhz);
+
+static const struct attribute *media_perf_power_attrs[] = {
+ &dev_attr_media_freq_factor.attr,
+ &dev_attr_media_freq_factor_scale.attr,
+ &dev_attr_media_RP0_freq_mhz.attr,
+ &dev_attr_media_RPn_freq_mhz.attr,
+ NULL
+};
+
static int intel_sysfs_rps_init(struct intel_gt *gt, struct kobject *kobj,
const struct attribute * const *attrs)
{
@@ -599,4 +768,12 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
drm_warn(&gt->i915->drm,
"failed to create gt%u throttle sysfs files (%pe)",
gt->info.id, ERR_PTR(ret));
+
+ if (HAS_MEDIA_RATIO_MODE(gt->i915) && intel_uc_uses_guc_slpc(&gt->uc)) {
+ ret = sysfs_create_files(kobj, media_perf_power_attrs);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u media_perf_power_attrs sysfs (%pe)\n",
+ gt->info.id, ERR_PTR(ret));
+ }
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index edd7a3cf5f5f..df708802889d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -59,6 +59,13 @@ enum intel_steering_type {
MSLICE,
LNCF,
+ /*
+ * On some platforms there are multiple types of MCR registers that
+ * will always return a non-terminated value at instance (0, 0). We'll
+ * lump those all into a single category to keep things simple.
+ */
+ INSTANCE0,
+
NUM_STEERING_TYPES
};
@@ -221,6 +228,7 @@ struct intel_gt {
struct {
u8 uc_index;
+ u8 wb_index; /* Only used on HAS_L3_CCS_READ() platforms */
} mocs;
struct intel_pxp pxp;
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index a40d928b3888..e639434e97fd 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -306,6 +306,15 @@ struct i915_address_space {
struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags);
+ void (*raw_insert_page)(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ void (*raw_insert_entries)(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags);
void (*cleanup)(struct i915_address_space *vm);
void (*foreach)(struct i915_address_space *vm,
@@ -345,6 +354,19 @@ struct i915_ggtt {
bool do_idle_maps;
+ /**
+ * @pte_lost: Are ptes lost on resume?
+ *
+ * Whether the system was recently restored from hibernate and
+ * thus may have lost pte content.
+ */
+ bool pte_lost;
+
+ /**
+ * @probed_pte: Probed pte value on suspend. Re-checked on resume.
+ */
+ u64 probed_pte;
+
int mtrr;
/** Bit 6 swizzling required for X tiling */
@@ -548,14 +570,13 @@ i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
unsigned long lmem_pt_obj_flags);
-
void intel_ggtt_bind_vma(struct i915_address_space *vm,
- struct i915_vm_pt_stash *stash,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
- u32 flags);
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags);
void intel_ggtt_unbind_vma(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res);
+ struct i915_vma_resource *vma_res);
int i915_ggtt_probe_hw(struct drm_i915_private *i915);
int i915_ggtt_init_hw(struct drm_i915_private *i915);
@@ -581,6 +602,17 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm);
void i915_ggtt_suspend(struct i915_ggtt *gtt);
void i915_ggtt_resume(struct i915_ggtt *ggtt);
+/**
+ * i915_ggtt_mark_pte_lost - Mark ggtt ptes as lost or clear such a marking
+ * @i915 The device private.
+ * @val whether the ptes should be marked as lost.
+ *
+ * In some cases pte content is retained across suspend, but typically lost
+ * across hibernate. Typically they should be marked as lost on
+ * hibernation restore and such marking cleared on suspend.
+ */
+void i915_ggtt_mark_pte_lost(struct drm_i915_private *i915, bool val);
+
void
fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
@@ -627,7 +659,6 @@ release_pd_entry(struct i915_page_directory * const pd,
struct i915_page_table * const pt,
const struct drm_i915_gem_object * const scratch);
void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
-void gen8_ggtt_invalidate(struct i915_ggtt *ggtt);
void ppgtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index 40e2e28ee6c7..14fe65812e42 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -124,7 +124,6 @@ static void calc_ia_freq(struct intel_llc *llc,
static void gen6_update_ring_freq(struct intel_llc *llc)
{
- struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
struct ia_constants consts;
unsigned int gpu_freq;
@@ -142,7 +141,7 @@ static void gen6_update_ring_freq(struct intel_llc *llc)
unsigned int ia_freq, ring_freq;
calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
- snb_pcode_write(i915, GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+ snb_pcode_write(llc_to_gt(llc)->uncore, GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
gpu_freq);
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index c4c37585ae8c..c6ebe2781076 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -23,6 +23,7 @@ struct drm_i915_mocs_table {
unsigned int n_entries;
const struct drm_i915_mocs_entry *table;
u8 uc_index;
+ u8 wb_index; /* Only used on HAS_L3_CCS_READ() platforms */
u8 unused_entries_index;
};
@@ -47,6 +48,7 @@ struct drm_i915_mocs_table {
/* Helper defines */
#define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
+#define PVC_NUM_MOCS_ENTRIES 3
/* (e)LLC caching options */
/*
@@ -394,6 +396,17 @@ static const struct drm_i915_mocs_entry dg2_mocs_table_g10_ax[] = {
MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
};
+static const struct drm_i915_mocs_entry pvc_mocs_table[] = {
+ /* Error */
+ MOCS_ENTRY(0, 0, L3_3_WB),
+
+ /* UC */
+ MOCS_ENTRY(1, 0, L3_1_UC),
+
+ /* WB */
+ MOCS_ENTRY(2, 0, L3_3_WB),
+};
+
enum {
HAS_GLOBAL_MOCS = BIT(0),
HAS_ENGINE_MOCS = BIT(1),
@@ -423,7 +436,14 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
memset(table, 0, sizeof(struct drm_i915_mocs_table));
table->unused_entries_index = I915_MOCS_PTE;
- if (IS_DG2(i915)) {
+ if (IS_PONTEVECCHIO(i915)) {
+ table->size = ARRAY_SIZE(pvc_mocs_table);
+ table->table = pvc_mocs_table;
+ table->n_entries = PVC_NUM_MOCS_ENTRIES;
+ table->uc_index = 1;
+ table->wb_index = 2;
+ table->unused_entries_index = 2;
+ } else if (IS_DG2(i915)) {
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
table->size = ARRAY_SIZE(dg2_mocs_table_g10_ax);
table->table = dg2_mocs_table_g10_ax;
@@ -622,6 +642,8 @@ void intel_set_mocs_index(struct intel_gt *gt)
get_mocs_settings(gt->i915, &table);
gt->mocs.uc_index = table.uc_index;
+ if (HAS_L3_CCS_READ(gt->i915))
+ gt->mocs.wb_index = table.wb_index;
}
void intel_mocs_init(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index b4770690e794..f8d0523f4c18 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -272,7 +272,7 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
GEN6_RC_CTL_HW_ENABLE;
rc6vids = 0;
- ret = snb_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
+ ret = snb_pcode_read(rc6_to_gt(rc6)->uncore, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
if (GRAPHICS_VER(i915) == 6 && ret) {
drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n");
} else if (GRAPHICS_VER(i915) == 6 &&
@@ -282,7 +282,7 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
rc6vids &= 0xffff00;
rc6vids |= GEN6_ENCODE_RC6_VID(450);
- ret = snb_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+ ret = snb_pcode_write(rc6_to_gt(rc6)->uncore, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
if (ret)
drm_err(&i915->drm,
"Couldn't fix incorrect rc6 voltage\n");
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index f5111c0a0060..6e90032e12e9 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -12,8 +12,106 @@
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
+static void _release_bars(struct pci_dev *pdev)
+{
+ int resno;
+
+ for (resno = PCI_STD_RESOURCES; resno < PCI_STD_RESOURCE_END; resno++) {
+ if (pci_resource_len(pdev, resno))
+ pci_release_resource(pdev, resno);
+ }
+}
+
+static void
+_resize_bar(struct drm_i915_private *i915, int resno, resource_size_t size)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ int bar_size = pci_rebar_bytes_to_size(size);
+ int ret;
+
+ _release_bars(pdev);
+
+ ret = pci_resize_resource(pdev, resno, bar_size);
+ if (ret) {
+ drm_info(&i915->drm, "Failed to resize BAR%d to %dM (%pe)\n",
+ resno, 1 << bar_size, ERR_PTR(ret));
+ return;
+ }
+
+ drm_info(&i915->drm, "BAR%d resized to %dM\n", resno, 1 << bar_size);
+}
+
+#define LMEM_BAR_NUM 2
+static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct pci_bus *root = pdev->bus;
+ struct resource *root_res;
+ resource_size_t rebar_size;
+ resource_size_t current_size;
+ u32 pci_cmd;
+ int i;
+
+ current_size = roundup_pow_of_two(pci_resource_len(pdev, LMEM_BAR_NUM));
+
+ if (i915->params.lmem_bar_size) {
+ u32 bar_sizes;
+
+ rebar_size = i915->params.lmem_bar_size *
+ (resource_size_t)SZ_1M;
+ bar_sizes = pci_rebar_get_possible_sizes(pdev,
+ LMEM_BAR_NUM);
+
+ if (rebar_size == current_size)
+ return;
+
+ if (!(bar_sizes & BIT(pci_rebar_bytes_to_size(rebar_size))) ||
+ rebar_size >= roundup_pow_of_two(lmem_size)) {
+ rebar_size = lmem_size;
+
+ drm_info(&i915->drm,
+ "Given bar size is not within supported size, setting it to default: %llu\n",
+ (u64)lmem_size >> 20);
+ }
+ } else {
+ rebar_size = current_size;
+
+ if (rebar_size != roundup_pow_of_two(lmem_size))
+ rebar_size = lmem_size;
+ else
+ return;
+ }
+
+ /* Find out if root bus contains 64bit memory addressing */
+ while (root->parent)
+ root = root->parent;
+
+ pci_bus_for_each_resource(root, root_res, i) {
+ if (root_res && root_res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
+ root_res->start > 0x100000000ull)
+ break;
+ }
+
+ /* pci_resize_resource will fail anyways */
+ if (!root_res) {
+ drm_info(&i915->drm, "Can't resize LMEM BAR - platform support is missing\n");
+ return;
+ }
+
+ /* First disable PCI memory decoding references */
+ pci_read_config_dword(pdev, PCI_COMMAND, &pci_cmd);
+ pci_write_config_dword(pdev, PCI_COMMAND,
+ pci_cmd & ~PCI_COMMAND_MEMORY);
+
+ _resize_bar(i915, LMEM_BAR_NUM, rebar_size);
+
+ pci_assign_unassigned_bus_resources(pdev->bus);
+ pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd);
+}
+
static int
region_lmem_release(struct intel_memory_region *mem)
{
@@ -101,14 +199,18 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
return ERR_PTR(-ENODEV);
if (HAS_FLAT_CCS(i915)) {
+ resource_size_t lmem_range;
u64 tile_stolen, flat_ccs_base;
- lmem_size = pci_resource_len(pdev, 2);
- flat_ccs_base = intel_gt_read_register(gt, XEHPSDV_FLAT_CCS_BASE_ADDR);
- flat_ccs_base = (flat_ccs_base >> XEHPSDV_CCS_BASE_SHIFT) * SZ_64K;
+ lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
+ lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
+ lmem_size *= SZ_1G;
+
+ flat_ccs_base = intel_gt_mcr_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
+ flat_ccs_base = (flat_ccs_base >> XEHP_CCS_BASE_SHIFT) * SZ_64K;
if (GEM_WARN_ON(lmem_size < flat_ccs_base))
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(-EIO);
tile_stolen = lmem_size - flat_ccs_base;
@@ -123,6 +225,8 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
lmem_size = intel_uncore_read64(&i915->uncore, GEN12_GSMBASE);
}
+ i915_resize_lmem_bar(i915, lmem_size);
+
if (i915->params.lmem_size > 0) {
lmem_size = min_t(resource_size_t, lmem_size,
mul_u32_u32(i915->params.lmem_size, SZ_1M));
@@ -131,7 +235,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
io_start = pci_resource_start(pdev, 2);
io_size = min(pci_resource_len(pdev, 2), lmem_size);
if (!io_size)
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(-EIO);
min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
I915_GTT_PAGE_SIZE_4K;
@@ -159,6 +263,10 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
drm_info(&i915->drm, "Local memory available: %pa\n",
&lmem_size);
+ if (io_size < lmem_size)
+ drm_info(&i915->drm, "Using a reduced BAR size of %lluMiB. Consider enabling 'Resizable BAR' or similar, if available in the BIOS.\n",
+ (u64)io_size >> 20);
+
return mem;
err_region_put:
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 40ffcb94e379..15ec64d881c4 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -299,7 +299,8 @@ u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
GEM_BUG_ON(ring->emit > ring->size - bytes);
GEM_BUG_ON(ring->space < bytes);
cs = ring->vaddr + ring->emit;
- GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset32(cs, POISON_INUSE, bytes / sizeof(*cs));
ring->emit += bytes;
ring->space -= bytes;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 5423bfd301ad..d5d6f1fadcae 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -117,7 +117,9 @@ static void flush_cs_tlb(struct intel_engine_cs *engine)
return;
/* ring should be idle before issuing a sync flush*/
- GEM_DEBUG_WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+ if ((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0)
+ drm_warn(&engine->i915->drm, "%s not idle before sync flush!\n",
+ engine->name);
ENGINE_WRITE_FW(engine, RING_INSTPM,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
@@ -596,8 +598,9 @@ static void ring_context_reset(struct intel_context *ce)
clear_bit(CONTEXT_VALID_BIT, &ce->flags);
}
-static void ring_context_ban(struct intel_context *ce,
- struct i915_request *rq)
+static void ring_context_revoke(struct intel_context *ce,
+ struct i915_request *rq,
+ unsigned int preempt_timeout_ms)
{
struct intel_engine_cs *engine;
@@ -632,7 +635,7 @@ static const struct intel_context_ops ring_context_ops = {
.cancel_request = ring_context_cancel_request,
- .ban = ring_context_ban,
+ .revoke = ring_context_revoke,
.pre_pin = ring_context_pre_pin,
.pin = ring_context_pin,
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 3476a11f294c..fb3f57ee450b 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -1075,7 +1075,9 @@ static u32 intel_rps_read_state_cap(struct intel_rps *rps)
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
- if (IS_XEHPSDV(i915))
+ if (IS_PONTEVECCHIO(i915))
+ return intel_uncore_read(uncore, PVC_RP_STATE_CAP);
+ else if (IS_XEHPSDV(i915))
return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP);
else if (IS_GEN9_LP(i915))
return intel_uncore_read(uncore, BXT_RP_STATE_CAP);
@@ -1142,7 +1144,8 @@ static void gen6_rps_init(struct intel_rps *rps)
if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11)
mult = GEN9_FREQ_SCALER;
- if (snb_pcode_read(i915, HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
+ if (snb_pcode_read(rps_to_gt(rps)->uncore,
+ HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
&ddcc_status, NULL) == 0)
rps->efficient_freq =
clamp_t(u32,
@@ -1982,7 +1985,7 @@ void intel_rps_init(struct intel_rps *rps)
if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
u32 params = 0;
- snb_pcode_read(i915, GEN6_READ_OC_PARAMS, &params, NULL);
+ snb_pcode_read(rps_to_gt(rps)->uncore, GEN6_READ_OC_PARAMS, &params, NULL);
if (params & BIT(31)) { /* OC supported */
drm_dbg(&i915->drm,
"Overclocking supported, max: %dMHz, overclock: %dMHz\n",
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index fdd25691beda..c6d3050604c8 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -16,11 +16,6 @@ void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices,
sseu->max_slices = max_slices;
sseu->max_subslices = max_subslices;
sseu->max_eus_per_subslice = max_eus_per_subslice;
-
- sseu->ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
- GEM_BUG_ON(sseu->ss_stride > GEN_MAX_SUBSLICE_STRIDE);
- sseu->eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
- GEM_BUG_ON(sseu->eu_stride > GEN_MAX_EU_STRIDE);
}
unsigned int
@@ -28,152 +23,240 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
{
unsigned int i, total = 0;
- for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask); i++)
- total += hweight8(sseu->subslice_mask[i]);
+ if (sseu->has_xehp_dss)
+ return bitmap_weight(sseu->subslice_mask.xehp,
+ XEHP_BITMAP_BITS(sseu->subslice_mask));
+
+ for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask.hsw); i++)
+ total += hweight8(sseu->subslice_mask.hsw[i]);
return total;
}
-static u32
-sseu_get_subslices(const struct sseu_dev_info *sseu,
- const u8 *subslice_mask, u8 slice)
+unsigned int
+intel_sseu_get_hsw_subslices(const struct sseu_dev_info *sseu, u8 slice)
{
- int i, offset = slice * sseu->ss_stride;
- u32 mask = 0;
-
- GEM_BUG_ON(slice >= sseu->max_slices);
-
- for (i = 0; i < sseu->ss_stride; i++)
- mask |= (u32)subslice_mask[offset + i] << i * BITS_PER_BYTE;
+ WARN_ON(sseu->has_xehp_dss);
+ if (WARN_ON(slice >= sseu->max_slices))
+ return 0;
- return mask;
+ return sseu->subslice_mask.hsw[slice];
}
-u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice)
+static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
+ int subslice)
{
- return sseu_get_subslices(sseu, sseu->subslice_mask, slice);
+ if (sseu->has_xehp_dss) {
+ WARN_ON(slice > 0);
+ return sseu->eu_mask.xehp[subslice];
+ } else {
+ return sseu->eu_mask.hsw[slice][subslice];
+ }
}
-static u32 sseu_get_geometry_subslices(const struct sseu_dev_info *sseu)
+static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
+ u16 eu_mask)
{
- return sseu_get_subslices(sseu, sseu->geometry_subslice_mask, 0);
+ GEM_WARN_ON(eu_mask && __fls(eu_mask) >= sseu->max_eus_per_subslice);
+ if (sseu->has_xehp_dss) {
+ GEM_WARN_ON(slice > 0);
+ sseu->eu_mask.xehp[subslice] = eu_mask;
+ } else {
+ sseu->eu_mask.hsw[slice][subslice] = eu_mask;
+ }
}
-u32 intel_sseu_get_compute_subslices(const struct sseu_dev_info *sseu)
+static u16 compute_eu_total(const struct sseu_dev_info *sseu)
{
- return sseu_get_subslices(sseu, sseu->compute_subslice_mask, 0);
-}
+ int s, ss, total = 0;
-void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
- u8 *subslice_mask, u32 ss_mask)
-{
- int offset = slice * sseu->ss_stride;
+ for (s = 0; s < sseu->max_slices; s++)
+ for (ss = 0; ss < sseu->max_subslices; ss++)
+ if (sseu->has_xehp_dss)
+ total += hweight16(sseu->eu_mask.xehp[ss]);
+ else
+ total += hweight16(sseu->eu_mask.hsw[s][ss]);
- memcpy(&subslice_mask[offset], &ss_mask, sseu->ss_stride);
+ return total;
}
-unsigned int
-intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice)
+/**
+ * intel_sseu_copy_eumask_to_user - Copy EU mask into a userspace buffer
+ * @to: Pointer to userspace buffer to copy to
+ * @sseu: SSEU structure containing EU mask to copy
+ *
+ * Copies the EU mask to a userspace buffer in the format expected by
+ * the query ioctl's topology queries.
+ *
+ * Returns the result of the copy_to_user() operation.
+ */
+int intel_sseu_copy_eumask_to_user(void __user *to,
+ const struct sseu_dev_info *sseu)
{
- return hweight32(intel_sseu_get_subslices(sseu, slice));
-}
+ u8 eu_mask[GEN_SS_MASK_SIZE * GEN_MAX_EU_STRIDE] = {};
+ int eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
+ int len = sseu->max_slices * sseu->max_subslices * eu_stride;
+ int s, ss, i;
-static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
- int subslice)
-{
- int slice_stride = sseu->max_subslices * sseu->eu_stride;
+ for (s = 0; s < sseu->max_slices; s++) {
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ int uapi_offset =
+ s * sseu->max_subslices * eu_stride +
+ ss * eu_stride;
+ u16 mask = sseu_get_eus(sseu, s, ss);
+
+ for (i = 0; i < eu_stride; i++)
+ eu_mask[uapi_offset + i] =
+ (mask >> (BITS_PER_BYTE * i)) & 0xff;
+ }
+ }
- return slice * slice_stride + subslice * sseu->eu_stride;
+ return copy_to_user(to, eu_mask, len);
}
-static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
- int subslice)
+/**
+ * intel_sseu_copy_ssmask_to_user - Copy subslice mask into a userspace buffer
+ * @to: Pointer to userspace buffer to copy to
+ * @sseu: SSEU structure containing subslice mask to copy
+ *
+ * Copies the subslice mask to a userspace buffer in the format expected by
+ * the query ioctl's topology queries.
+ *
+ * Returns the result of the copy_to_user() operation.
+ */
+int intel_sseu_copy_ssmask_to_user(void __user *to,
+ const struct sseu_dev_info *sseu)
{
- int i, offset = sseu_eu_idx(sseu, slice, subslice);
- u16 eu_mask = 0;
+ u8 ss_mask[GEN_SS_MASK_SIZE] = {};
+ int ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
+ int len = sseu->max_slices * ss_stride;
+ int s, ss, i;
- for (i = 0; i < sseu->eu_stride; i++)
- eu_mask |=
- ((u16)sseu->eu_mask[offset + i]) << (i * BITS_PER_BYTE);
+ for (s = 0; s < sseu->max_slices; s++) {
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ i = s * ss_stride * BITS_PER_BYTE + ss;
- return eu_mask;
-}
+ if (!intel_sseu_has_subslice(sseu, s, ss))
+ continue;
-static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
- u16 eu_mask)
-{
- int i, offset = sseu_eu_idx(sseu, slice, subslice);
+ ss_mask[i / BITS_PER_BYTE] |= BIT(i % BITS_PER_BYTE);
+ }
+ }
- for (i = 0; i < sseu->eu_stride; i++)
- sseu->eu_mask[offset + i] =
- (eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
+ return copy_to_user(to, ss_mask, len);
}
-static u16 compute_eu_total(const struct sseu_dev_info *sseu)
+static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
+ u32 ss_en, u16 eu_en)
{
- u16 i, total = 0;
+ u32 valid_ss_mask = GENMASK(sseu->max_subslices - 1, 0);
+ int ss;
- for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
- total += hweight8(sseu->eu_mask[i]);
+ sseu->slice_mask |= BIT(0);
+ sseu->subslice_mask.hsw[0] = ss_en & valid_ss_mask;
- return total;
+ for (ss = 0; ss < sseu->max_subslices; ss++)
+ if (intel_sseu_has_subslice(sseu, 0, ss))
+ sseu_set_eus(sseu, 0, ss, eu_en);
+
+ sseu->eu_per_subslice = hweight16(eu_en);
+ sseu->eu_total = compute_eu_total(sseu);
}
-static u32 get_ss_stride_mask(struct sseu_dev_info *sseu, u8 s, u32 ss_en)
+static void xehp_compute_sseu_info(struct sseu_dev_info *sseu,
+ u16 eu_en)
{
- u32 ss_mask;
+ int ss;
- ss_mask = ss_en >> (s * sseu->max_subslices);
- ss_mask &= GENMASK(sseu->max_subslices - 1, 0);
+ sseu->slice_mask |= BIT(0);
- return ss_mask;
+ bitmap_or(sseu->subslice_mask.xehp,
+ sseu->compute_subslice_mask.xehp,
+ sseu->geometry_subslice_mask.xehp,
+ XEHP_BITMAP_BITS(sseu->subslice_mask));
+
+ for (ss = 0; ss < sseu->max_subslices; ss++)
+ if (intel_sseu_has_subslice(sseu, 0, ss))
+ sseu_set_eus(sseu, 0, ss, eu_en);
+
+ sseu->eu_per_subslice = hweight16(eu_en);
+ sseu->eu_total = compute_eu_total(sseu);
}
-static void gen11_compute_sseu_info(struct sseu_dev_info *sseu, u8 s_en,
- u32 g_ss_en, u32 c_ss_en, u16 eu_en)
+static void
+xehp_load_dss_mask(struct intel_uncore *uncore,
+ intel_sseu_ss_mask_t *ssmask,
+ int numregs,
+ ...)
{
- int s, ss;
+ va_list argp;
+ u32 fuse_val[I915_MAX_SS_FUSE_REGS] = {};
+ int i;
- /* g_ss_en/c_ss_en represent entire subslice mask across all slices */
- GEM_BUG_ON(sseu->max_slices * sseu->max_subslices >
- sizeof(g_ss_en) * BITS_PER_BYTE);
+ if (WARN_ON(numregs > I915_MAX_SS_FUSE_REGS))
+ numregs = I915_MAX_SS_FUSE_REGS;
- for (s = 0; s < sseu->max_slices; s++) {
- if ((s_en & BIT(s)) == 0)
- continue;
+ va_start(argp, numregs);
+ for (i = 0; i < numregs; i++)
+ fuse_val[i] = intel_uncore_read(uncore, va_arg(argp, i915_reg_t));
+ va_end(argp);
- sseu->slice_mask |= BIT(s);
-
- /*
- * XeHP introduces the concept of compute vs geometry DSS. To
- * reduce variation between GENs around subslice usage, store a
- * mask for both the geometry and compute enabled masks since
- * userspace will need to be able to query these masks
- * independently. Also compute a total enabled subslice count
- * for the purposes of selecting subslices to use in a
- * particular GEM context.
- */
- intel_sseu_set_subslices(sseu, s, sseu->compute_subslice_mask,
- get_ss_stride_mask(sseu, s, c_ss_en));
- intel_sseu_set_subslices(sseu, s, sseu->geometry_subslice_mask,
- get_ss_stride_mask(sseu, s, g_ss_en));
- intel_sseu_set_subslices(sseu, s, sseu->subslice_mask,
- get_ss_stride_mask(sseu, s,
- g_ss_en | c_ss_en));
+ bitmap_from_arr32(ssmask->xehp, fuse_val, numregs * 32);
+}
- for (ss = 0; ss < sseu->max_subslices; ss++)
- if (intel_sseu_has_subslice(sseu, s, ss))
- sseu_set_eus(sseu, s, ss, eu_en);
+static void xehp_sseu_info_init(struct intel_gt *gt)
+{
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ struct intel_uncore *uncore = gt->uncore;
+ u16 eu_en = 0;
+ u8 eu_en_fuse;
+ int num_compute_regs, num_geometry_regs;
+ int eu;
+
+ if (IS_PONTEVECCHIO(gt->i915)) {
+ num_geometry_regs = 0;
+ num_compute_regs = 2;
+ } else {
+ num_geometry_regs = 1;
+ num_compute_regs = 1;
}
- sseu->eu_per_subslice = hweight16(eu_en);
- sseu->eu_total = compute_eu_total(sseu);
+
+ /*
+ * The concept of slice has been removed in Xe_HP. To be compatible
+ * with prior generations, assume a single slice across the entire
+ * device. Then calculate out the DSS for each workload type within
+ * that software slice.
+ */
+ intel_sseu_set_info(sseu, 1,
+ 32 * max(num_geometry_regs, num_compute_regs),
+ HAS_ONE_EU_PER_FUSE_BIT(gt->i915) ? 8 : 16);
+ sseu->has_xehp_dss = 1;
+
+ xehp_load_dss_mask(uncore, &sseu->geometry_subslice_mask,
+ num_geometry_regs,
+ GEN12_GT_GEOMETRY_DSS_ENABLE);
+ xehp_load_dss_mask(uncore, &sseu->compute_subslice_mask,
+ num_compute_regs,
+ GEN12_GT_COMPUTE_DSS_ENABLE,
+ XEHPC_GT_COMPUTE_DSS_ENABLE_EXT);
+
+ eu_en_fuse = intel_uncore_read(uncore, XEHP_EU_ENABLE) & XEHP_EU_ENA_MASK;
+
+ if (HAS_ONE_EU_PER_FUSE_BIT(gt->i915))
+ eu_en = eu_en_fuse;
+ else
+ for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
+ if (eu_en_fuse & BIT(eu))
+ eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
+
+ xehp_compute_sseu_info(sseu, eu_en);
}
static void gen12_sseu_info_init(struct intel_gt *gt)
{
struct sseu_dev_info *sseu = &gt->info.sseu;
struct intel_uncore *uncore = gt->uncore;
- u32 g_dss_en, c_dss_en = 0;
+ u32 g_dss_en;
u16 eu_en = 0;
u8 eu_en_fuse;
u8 s_en;
@@ -183,43 +266,28 @@ static void gen12_sseu_info_init(struct intel_gt *gt)
* Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
* Instead of splitting these, provide userspace with an array
* of DSS to more closely represent the hardware resource.
- *
- * In addition, the concept of slice has been removed in Xe_HP.
- * To be compatible with prior generations, assume a single slice
- * across the entire device. Then calculate out the DSS for each
- * workload type within that software slice.
*/
- if (IS_DG2(gt->i915) || IS_XEHPSDV(gt->i915))
- intel_sseu_set_info(sseu, 1, 32, 16);
- else
- intel_sseu_set_info(sseu, 1, 6, 16);
+ intel_sseu_set_info(sseu, 1, 6, 16);
/*
- * As mentioned above, Xe_HP does not have the concept of a slice.
- * Enable one for software backwards compatibility.
+ * Although gen12 architecture supported multiple slices, TGL, RKL,
+ * DG1, and ADL only had a single slice.
*/
- if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
- s_en = 0x1;
- else
- s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
- GEN11_GT_S_ENA_MASK;
+ s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
+ GEN11_GT_S_ENA_MASK;
+ drm_WARN_ON(&gt->i915->drm, s_en != 0x1);
g_dss_en = intel_uncore_read(uncore, GEN12_GT_GEOMETRY_DSS_ENABLE);
- if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
- c_dss_en = intel_uncore_read(uncore, GEN12_GT_COMPUTE_DSS_ENABLE);
/* one bit per pair of EUs */
- if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
- eu_en_fuse = intel_uncore_read(uncore, XEHP_EU_ENABLE) & XEHP_EU_ENA_MASK;
- else
- eu_en_fuse = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
- GEN11_EU_DIS_MASK);
+ eu_en_fuse = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
+ GEN11_EU_DIS_MASK);
for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
if (eu_en_fuse & BIT(eu))
eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
- gen11_compute_sseu_info(sseu, s_en, g_dss_en, c_dss_en, eu_en);
+ gen11_compute_sseu_info(sseu, g_dss_en, eu_en);
/* TGL only supports slice-level power gating */
sseu->has_slice_pg = 1;
@@ -238,14 +306,20 @@ static void gen11_sseu_info_init(struct intel_gt *gt)
else
intel_sseu_set_info(sseu, 1, 8, 8);
+ /*
+ * Although gen11 architecture supported multiple slices, ICL and
+ * EHL/JSL only had a single slice in practice.
+ */
s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
GEN11_GT_S_ENA_MASK;
+ drm_WARN_ON(&gt->i915->drm, s_en != 0x1);
+
ss_en = ~intel_uncore_read(uncore, GEN11_GT_SUBSLICE_DISABLE);
eu_en = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
GEN11_EU_DIS_MASK);
- gen11_compute_sseu_info(sseu, s_en, ss_en, 0, eu_en);
+ gen11_compute_sseu_info(sseu, ss_en, eu_en);
/* ICL has no power gating restrictions. */
sseu->has_slice_pg = 1;
@@ -257,7 +331,6 @@ static void cherryview_sseu_info_init(struct intel_gt *gt)
{
struct sseu_dev_info *sseu = &gt->info.sseu;
u32 fuse;
- u8 subslice_mask = 0;
fuse = intel_uncore_read(gt->uncore, CHV_FUSE_GT);
@@ -271,8 +344,8 @@ static void cherryview_sseu_info_init(struct intel_gt *gt)
(((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
- subslice_mask |= BIT(0);
- sseu_set_eus(sseu, 0, 0, ~disabled_mask);
+ sseu->subslice_mask.hsw[0] |= BIT(0);
+ sseu_set_eus(sseu, 0, 0, ~disabled_mask & 0xFF);
}
if (!(fuse & CHV_FGT_DISABLE_SS1)) {
@@ -282,12 +355,10 @@ static void cherryview_sseu_info_init(struct intel_gt *gt)
(((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
- subslice_mask |= BIT(1);
- sseu_set_eus(sseu, 0, 1, ~disabled_mask);
+ sseu->subslice_mask.hsw[0] |= BIT(1);
+ sseu_set_eus(sseu, 0, 1, ~disabled_mask & 0xFF);
}
- intel_sseu_set_subslices(sseu, 0, sseu->subslice_mask, subslice_mask);
-
sseu->eu_total = compute_eu_total(sseu);
/*
@@ -342,8 +413,7 @@ static void gen9_sseu_info_init(struct intel_gt *gt)
/* skip disabled slice */
continue;
- intel_sseu_set_subslices(sseu, s, sseu->subslice_mask,
- subslice_mask);
+ sseu->subslice_mask.hsw[s] = subslice_mask;
eu_disable = intel_uncore_read(uncore, GEN9_EU_DISABLE(s));
for (ss = 0; ss < sseu->max_subslices; ss++) {
@@ -356,7 +426,7 @@ static void gen9_sseu_info_init(struct intel_gt *gt)
eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
- sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
+ sseu_set_eus(sseu, s, ss, ~eu_disabled_mask & eu_mask);
eu_per_ss = sseu->max_eus_per_subslice -
hweight8(eu_disabled_mask);
@@ -400,8 +470,8 @@ static void gen9_sseu_info_init(struct intel_gt *gt)
sseu->has_eu_pg = sseu->eu_per_subslice > 2;
if (IS_GEN9_LP(i915)) {
-#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask[0] & BIT(ss)))
- info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
+#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask.hsw[0] & BIT(ss)))
+ info->has_pooled_eu = hweight8(sseu->subslice_mask.hsw[0]) == 3;
sseu->min_eu_in_pool = 0;
if (info->has_pooled_eu) {
@@ -455,8 +525,7 @@ static void bdw_sseu_info_init(struct intel_gt *gt)
/* skip disabled slice */
continue;
- intel_sseu_set_subslices(sseu, s, sseu->subslice_mask,
- subslice_mask);
+ sseu->subslice_mask.hsw[s] = subslice_mask;
for (ss = 0; ss < sseu->max_subslices; ss++) {
u8 eu_disabled_mask;
@@ -469,7 +538,7 @@ static void bdw_sseu_info_init(struct intel_gt *gt)
eu_disabled_mask =
eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
- sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
+ sseu_set_eus(sseu, s, ss, ~eu_disabled_mask & 0xFF);
n_disabled = hweight8(eu_disabled_mask);
@@ -553,8 +622,7 @@ static void hsw_sseu_info_init(struct intel_gt *gt)
sseu->eu_per_subslice);
for (s = 0; s < sseu->max_slices; s++) {
- intel_sseu_set_subslices(sseu, s, sseu->subslice_mask,
- subslice_mask);
+ sseu->subslice_mask.hsw[s] = subslice_mask;
for (ss = 0; ss < sseu->max_subslices; ss++) {
sseu_set_eus(sseu, s, ss,
@@ -574,18 +642,20 @@ void intel_sseu_info_init(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
- if (IS_HASWELL(i915))
- hsw_sseu_info_init(gt);
- else if (IS_CHERRYVIEW(i915))
- cherryview_sseu_info_init(gt);
- else if (IS_BROADWELL(i915))
- bdw_sseu_info_init(gt);
- else if (GRAPHICS_VER(i915) == 9)
- gen9_sseu_info_init(gt);
- else if (GRAPHICS_VER(i915) == 11)
- gen11_sseu_info_init(gt);
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ xehp_sseu_info_init(gt);
else if (GRAPHICS_VER(i915) >= 12)
gen12_sseu_info_init(gt);
+ else if (GRAPHICS_VER(i915) >= 11)
+ gen11_sseu_info_init(gt);
+ else if (GRAPHICS_VER(i915) >= 9)
+ gen9_sseu_info_init(gt);
+ else if (IS_BROADWELL(i915))
+ bdw_sseu_info_init(gt);
+ else if (IS_CHERRYVIEW(i915))
+ cherryview_sseu_info_init(gt);
+ else if (IS_HASWELL(i915))
+ hsw_sseu_info_init(gt);
}
u32 intel_sseu_make_rpcs(struct intel_gt *gt,
@@ -641,7 +711,7 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt,
*/
if (GRAPHICS_VER(i915) == 11 &&
slices == 1 &&
- subslices > min_t(u8, 4, hweight8(sseu->subslice_mask[0]) / 2)) {
+ subslices > min_t(u8, 4, hweight8(sseu->subslice_mask.hsw[0]) / 2)) {
GEM_BUG_ON(subslices & 1);
subslice_pg = false;
@@ -707,14 +777,29 @@ void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
{
int s;
- drm_printf(p, "slice total: %u, mask=%04x\n",
- hweight8(sseu->slice_mask), sseu->slice_mask);
- drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
- for (s = 0; s < sseu->max_slices; s++) {
- drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
- s, intel_sseu_subslices_per_slice(sseu, s),
- intel_sseu_get_subslices(sseu, s));
+ if (sseu->has_xehp_dss) {
+ drm_printf(p, "subslice total: %u\n",
+ intel_sseu_subslice_total(sseu));
+ drm_printf(p, "geometry dss mask=%*pb\n",
+ XEHP_BITMAP_BITS(sseu->geometry_subslice_mask),
+ sseu->geometry_subslice_mask.xehp);
+ drm_printf(p, "compute dss mask=%*pb\n",
+ XEHP_BITMAP_BITS(sseu->compute_subslice_mask),
+ sseu->compute_subslice_mask.xehp);
+ } else {
+ drm_printf(p, "slice total: %u, mask=%04x\n",
+ hweight8(sseu->slice_mask), sseu->slice_mask);
+ drm_printf(p, "subslice total: %u\n",
+ intel_sseu_subslice_total(sseu));
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ u8 ss_mask = sseu->subslice_mask.hsw[s];
+
+ drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
+ s, hweight8(ss_mask), ss_mask);
+ }
}
+
drm_printf(p, "EU total: %u\n", sseu->eu_total);
drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
drm_printf(p, "has slice power gating: %s\n",
@@ -731,9 +816,10 @@ static void sseu_print_hsw_topology(const struct sseu_dev_info *sseu,
int s, ss;
for (s = 0; s < sseu->max_slices; s++) {
+ u8 ss_mask = sseu->subslice_mask.hsw[s];
+
drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
- s, intel_sseu_subslices_per_slice(sseu, s),
- intel_sseu_get_subslices(sseu, s));
+ s, hweight8(ss_mask), ss_mask);
for (ss = 0; ss < sseu->max_subslices; ss++) {
u16 enabled_eus = sseu_get_eus(sseu, s, ss);
@@ -747,16 +833,14 @@ static void sseu_print_hsw_topology(const struct sseu_dev_info *sseu,
static void sseu_print_xehp_topology(const struct sseu_dev_info *sseu,
struct drm_printer *p)
{
- u32 g_dss_mask = sseu_get_geometry_subslices(sseu);
- u32 c_dss_mask = intel_sseu_get_compute_subslices(sseu);
int dss;
for (dss = 0; dss < sseu->max_subslices; dss++) {
u16 enabled_eus = sseu_get_eus(sseu, 0, dss);
drm_printf(p, "DSS_%02d: G:%3s C:%3s, %2u EUs (0x%04hx)\n", dss,
- str_yes_no(g_dss_mask & BIT(dss)),
- str_yes_no(c_dss_mask & BIT(dss)),
+ str_yes_no(test_bit(dss, sseu->geometry_subslice_mask.xehp)),
+ str_yes_no(test_bit(dss, sseu->compute_subslice_mask.xehp)),
hweight16(enabled_eus), enabled_eus);
}
}
@@ -774,20 +858,44 @@ void intel_sseu_print_topology(struct drm_i915_private *i915,
}
}
-u16 intel_slicemask_from_dssmask(u64 dss_mask, int dss_per_slice)
+void intel_sseu_print_ss_info(const char *type,
+ const struct sseu_dev_info *sseu,
+ struct seq_file *m)
{
- u16 slice_mask = 0;
+ int s;
+
+ if (sseu->has_xehp_dss) {
+ seq_printf(m, " %s Geometry DSS: %u\n", type,
+ bitmap_weight(sseu->geometry_subslice_mask.xehp,
+ XEHP_BITMAP_BITS(sseu->geometry_subslice_mask)));
+ seq_printf(m, " %s Compute DSS: %u\n", type,
+ bitmap_weight(sseu->compute_subslice_mask.xehp,
+ XEHP_BITMAP_BITS(sseu->compute_subslice_mask)));
+ } else {
+ for (s = 0; s < fls(sseu->slice_mask); s++)
+ seq_printf(m, " %s Slice%i subslices: %u\n", type,
+ s, hweight8(sseu->subslice_mask.hsw[s]));
+ }
+}
+
+u16 intel_slicemask_from_xehp_dssmask(intel_sseu_ss_mask_t dss_mask,
+ int dss_per_slice)
+{
+ intel_sseu_ss_mask_t per_slice_mask = {};
+ unsigned long slice_mask = 0;
int i;
- WARN_ON(sizeof(dss_mask) * 8 / dss_per_slice > 8 * sizeof(slice_mask));
+ WARN_ON(DIV_ROUND_UP(XEHP_BITMAP_BITS(dss_mask), dss_per_slice) >
+ 8 * sizeof(slice_mask));
- for (i = 0; dss_mask; i++) {
- if (dss_mask & GENMASK(dss_per_slice - 1, 0))
+ bitmap_fill(per_slice_mask.xehp, dss_per_slice);
+ for (i = 0; !bitmap_empty(dss_mask.xehp, XEHP_BITMAP_BITS(dss_mask)); i++) {
+ if (bitmap_intersects(dss_mask.xehp, per_slice_mask.xehp, dss_per_slice))
slice_mask |= BIT(i);
- dss_mask >>= dss_per_slice;
+ bitmap_shift_right(dss_mask.xehp, dss_mask.xehp, dss_per_slice,
+ XEHP_BITMAP_BITS(dss_mask));
}
return slice_mask;
}
-
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index 5c078df4729c..aa87d3832d60 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -25,12 +25,16 @@ struct drm_printer;
/*
* Maximum number of subslices that can exist within a HSW-style slice. This
* is only relevant to pre-Xe_HP platforms (Xe_HP and beyond use the
- * GEN_MAX_DSS value below).
+ * I915_MAX_SS_FUSE_BITS value below).
*/
#define GEN_MAX_SS_PER_HSW_SLICE 6
-/* Maximum number of DSS on newer platforms (Xe_HP and beyond). */
-#define GEN_MAX_DSS 32
+/*
+ * Maximum number of 32-bit registers used by hardware to express the
+ * enabled/disabled subslices.
+ */
+#define I915_MAX_SS_FUSE_REGS 2
+#define I915_MAX_SS_FUSE_BITS (I915_MAX_SS_FUSE_REGS * 32)
/* Maximum number of EUs that can exist within a subslice or DSS. */
#define GEN_MAX_EUS_PER_SS 16
@@ -38,7 +42,7 @@ struct drm_printer;
#define SSEU_MAX(a, b) ((a) > (b) ? (a) : (b))
/* The maximum number of bits needed to express each subslice/DSS independently */
-#define GEN_SS_MASK_SIZE SSEU_MAX(GEN_MAX_DSS, \
+#define GEN_SS_MASK_SIZE SSEU_MAX(I915_MAX_SS_FUSE_BITS, \
GEN_MAX_HSW_SLICES * GEN_MAX_SS_PER_HSW_SLICE)
#define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE)
@@ -49,15 +53,28 @@ struct drm_printer;
#define GEN_DSS_PER_CSLICE 8
#define GEN_DSS_PER_MSLICE 8
-#define GEN_MAX_GSLICES (GEN_MAX_DSS / GEN_DSS_PER_GSLICE)
-#define GEN_MAX_CSLICES (GEN_MAX_DSS / GEN_DSS_PER_CSLICE)
+#define GEN_MAX_GSLICES (I915_MAX_SS_FUSE_BITS / GEN_DSS_PER_GSLICE)
+#define GEN_MAX_CSLICES (I915_MAX_SS_FUSE_BITS / GEN_DSS_PER_CSLICE)
+
+typedef union {
+ u8 hsw[GEN_MAX_HSW_SLICES];
+
+ /* Bitmap compatible with linux/bitmap.h; may exceed size of u64 */
+ unsigned long xehp[BITS_TO_LONGS(I915_MAX_SS_FUSE_BITS)];
+} intel_sseu_ss_mask_t;
+
+#define XEHP_BITMAP_BITS(mask) ((int)BITS_PER_TYPE(typeof(mask.xehp)))
struct sseu_dev_info {
u8 slice_mask;
- u8 subslice_mask[GEN_SS_MASK_SIZE];
- u8 geometry_subslice_mask[GEN_SS_MASK_SIZE];
- u8 compute_subslice_mask[GEN_SS_MASK_SIZE];
- u8 eu_mask[GEN_SS_MASK_SIZE * GEN_MAX_EU_STRIDE];
+ intel_sseu_ss_mask_t subslice_mask;
+ intel_sseu_ss_mask_t geometry_subslice_mask;
+ intel_sseu_ss_mask_t compute_subslice_mask;
+ union {
+ u16 hsw[GEN_MAX_HSW_SLICES][GEN_MAX_SS_PER_HSW_SLICE];
+ u16 xehp[I915_MAX_SS_FUSE_BITS];
+ } eu_mask;
+
u16 eu_total;
u8 eu_per_subslice;
u8 min_eu_in_pool;
@@ -66,14 +83,16 @@ struct sseu_dev_info {
u8 has_slice_pg:1;
u8 has_subslice_pg:1;
u8 has_eu_pg:1;
+ /*
+ * For Xe_HP and beyond, the hardware no longer has traditional slices
+ * so we just report the entire DSS pool under a fake "slice 0."
+ */
+ u8 has_xehp_dss:1;
/* Topology fields */
u8 max_slices;
u8 max_subslices;
u8 max_eus_per_subslice;
-
- u8 ss_stride;
- u8 eu_stride;
};
/*
@@ -91,7 +110,7 @@ intel_sseu_from_device_info(const struct sseu_dev_info *sseu)
{
struct intel_sseu value = {
.slice_mask = sseu->slice_mask,
- .subslice_mask = sseu->subslice_mask[0],
+ .subslice_mask = sseu->subslice_mask.hsw[0],
.min_eus_per_subslice = sseu->max_eus_per_subslice,
.max_eus_per_subslice = sseu->max_eus_per_subslice,
};
@@ -103,18 +122,28 @@ static inline bool
intel_sseu_has_subslice(const struct sseu_dev_info *sseu, int slice,
int subslice)
{
- u8 mask;
- int ss_idx = subslice / BITS_PER_BYTE;
-
if (slice >= sseu->max_slices ||
subslice >= sseu->max_subslices)
return false;
- GEM_BUG_ON(ss_idx >= sseu->ss_stride);
-
- mask = sseu->subslice_mask[slice * sseu->ss_stride + ss_idx];
+ if (sseu->has_xehp_dss)
+ return test_bit(subslice, sseu->subslice_mask.xehp);
+ else
+ return sseu->subslice_mask.hsw[slice] & BIT(subslice);
+}
- return mask & BIT(subslice % BITS_PER_BYTE);
+/*
+ * Used to obtain the index of the first DSS. Can start searching from the
+ * beginning of a specific dss group (e.g., gslice, cslice, etc.) if
+ * groupsize and groupnum are non-zero.
+ */
+static inline unsigned int
+intel_sseu_find_first_xehp_dss(const struct sseu_dev_info *sseu, int groupsize,
+ int groupnum)
+{
+ return find_next_bit(sseu->subslice_mask.xehp,
+ XEHP_BITMAP_BITS(sseu->subslice_mask),
+ groupnum * groupsize);
}
void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices,
@@ -124,14 +153,10 @@ unsigned int
intel_sseu_subslice_total(const struct sseu_dev_info *sseu);
unsigned int
-intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice);
+intel_sseu_get_hsw_subslices(const struct sseu_dev_info *sseu, u8 slice);
-u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice);
-
-u32 intel_sseu_get_compute_subslices(const struct sseu_dev_info *sseu);
-
-void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
- u8 *subslice_mask, u32 ss_mask);
+intel_sseu_ss_mask_t
+intel_sseu_get_compute_subslices(const struct sseu_dev_info *sseu);
void intel_sseu_info_init(struct intel_gt *gt);
@@ -143,6 +168,15 @@ void intel_sseu_print_topology(struct drm_i915_private *i915,
const struct sseu_dev_info *sseu,
struct drm_printer *p);
-u16 intel_slicemask_from_dssmask(u64 dss_mask, int dss_per_slice);
+u16 intel_slicemask_from_xehp_dssmask(intel_sseu_ss_mask_t dss_mask, int dss_per_slice);
+
+int intel_sseu_copy_eumask_to_user(void __user *to,
+ const struct sseu_dev_info *sseu);
+int intel_sseu_copy_ssmask_to_user(void __user *to,
+ const struct sseu_dev_info *sseu);
+
+void intel_sseu_print_ss_info(const char *type,
+ const struct sseu_dev_info *sseu,
+ struct seq_file *m);
#endif /* __INTEL_SSEU_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
index 2d5d011e01db..c2ee5e1826b5 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
@@ -4,6 +4,7 @@
* Copyright © 2020 Intel Corporation
*/
+#include <linux/bitmap.h>
#include <linux/string_helpers.h>
#include "i915_drv.h"
@@ -11,14 +12,6 @@
#include "intel_gt_regs.h"
#include "intel_sseu_debugfs.h"
-static void sseu_copy_subslices(const struct sseu_dev_info *sseu,
- int slice, u8 *to_mask)
-{
- int offset = slice * sseu->ss_stride;
-
- memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
-}
-
static void cherryview_sseu_device_status(struct intel_gt *gt,
struct sseu_dev_info *sseu)
{
@@ -41,7 +34,7 @@ static void cherryview_sseu_device_status(struct intel_gt *gt,
continue;
sseu->slice_mask = BIT(0);
- sseu->subslice_mask[0] |= BIT(ss);
+ sseu->subslice_mask.hsw[0] |= BIT(ss);
eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
@@ -92,7 +85,7 @@ static void gen11_sseu_device_status(struct intel_gt *gt,
continue;
sseu->slice_mask |= BIT(s);
- sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
+ sseu->subslice_mask.hsw[s] = info->sseu.subslice_mask.hsw[s];
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
@@ -147,21 +140,17 @@ static void gen9_sseu_device_status(struct intel_gt *gt,
sseu->slice_mask |= BIT(s);
if (IS_GEN9_BC(gt->i915))
- sseu_copy_subslices(&info->sseu, s,
- sseu->subslice_mask);
+ sseu->subslice_mask.hsw[s] = info->sseu.subslice_mask.hsw[s];
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
- u8 ss_idx = s * info->sseu.ss_stride +
- ss / BITS_PER_BYTE;
if (IS_GEN9_LP(gt->i915)) {
if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
/* skip disabled subslice */
continue;
- sseu->subslice_mask[ss_idx] |=
- BIT(ss % BITS_PER_BYTE);
+ sseu->subslice_mask.hsw[s] |= BIT(ss);
}
eu_cnt = eu_reg[2 * s + ss / 2] & eu_mask[ss % 2];
@@ -188,8 +177,7 @@ static void bdw_sseu_device_status(struct intel_gt *gt,
if (sseu->slice_mask) {
sseu->eu_per_subslice = info->sseu.eu_per_subslice;
for (s = 0; s < fls(sseu->slice_mask); s++)
- sseu_copy_subslices(&info->sseu, s,
- sseu->subslice_mask);
+ sseu->subslice_mask.hsw[s] = info->sseu.subslice_mask.hsw[s];
sseu->eu_total = sseu->eu_per_subslice *
intel_sseu_subslice_total(sseu);
@@ -208,7 +196,6 @@ static void i915_print_sseu_info(struct seq_file *m,
const struct sseu_dev_info *sseu)
{
const char *type = is_available_info ? "Available" : "Enabled";
- int s;
seq_printf(m, " %s Slice Mask: %04x\n", type,
sseu->slice_mask);
@@ -216,10 +203,7 @@ static void i915_print_sseu_info(struct seq_file *m,
hweight8(sseu->slice_mask));
seq_printf(m, " %s Subslice Total: %u\n", type,
intel_sseu_subslice_total(sseu));
- for (s = 0; s < fls(sseu->slice_mask); s++) {
- seq_printf(m, " %s Slice%i subslices: %u\n", type,
- s, intel_sseu_subslices_per_slice(sseu, s));
- }
+ intel_sseu_print_ss_info(type, sseu, m);
seq_printf(m, " %s EU Total: %u\n", type,
sseu->eu_total);
seq_printf(m, " %s EU Per Subslice: %u\n", type,
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index a05c4b99b3fb..e8111fce56d0 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -9,6 +9,7 @@
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
#include "intel_gt.h"
+#include "intel_gt_mcr.h"
#include "intel_gt_regs.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
@@ -688,6 +689,9 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) ||
IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
+
+ /* Wa_15010599737:dg2 */
+ wa_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN);
}
static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
@@ -776,7 +780,9 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (engine->class != RENDER_CLASS)
goto done;
- if (IS_DG2(i915))
+ if (IS_PONTEVECCHIO(i915))
+ ; /* noop; none at this time */
+ else if (IS_DG2(i915))
dg2_ctx_workarounds_init(engine, wal);
else if (IS_XEHPSDV(i915))
; /* noop; none at this time */
@@ -948,8 +954,8 @@ gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
* on s/ss combo, the read should be done with read_subslice_reg.
*/
slice = ffs(sseu->slice_mask) - 1;
- GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask));
- subslice = ffs(intel_sseu_get_subslices(sseu, slice));
+ GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask.hsw));
+ subslice = ffs(intel_sseu_get_hsw_subslices(sseu, slice));
GEM_BUG_ON(!subslice);
subslice--;
@@ -1080,18 +1086,17 @@ static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
gt->default_steering.instanceid = subslice;
if (drm_debug_enabled(DRM_UT_DRIVER))
- intel_gt_report_steering(&p, gt, false);
+ intel_gt_mcr_report_steering(&p, gt, false);
}
static void
icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
{
const struct sseu_dev_info *sseu = &gt->info.sseu;
- unsigned int slice, subslice;
+ unsigned int subslice;
GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11);
GEM_BUG_ON(hweight8(sseu->slice_mask) > 1);
- slice = 0;
/*
* Although a platform may have subslices, we need to always steer
@@ -1102,7 +1107,7 @@ icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
* one of the higher subslices, we run the risk of reading back 0's or
* random garbage.
*/
- subslice = __ffs(intel_sseu_get_subslices(sseu, slice));
+ subslice = __ffs(intel_sseu_get_hsw_subslices(sseu, 0));
/*
* If the subslice we picked above also steers us to a valid L3 bank,
@@ -1112,7 +1117,7 @@ icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
if (gt->info.l3bank_mask & BIT(subslice))
gt->steering_table[L3BANK] = NULL;
- __add_mcr_wa(gt, wal, slice, subslice);
+ __add_mcr_wa(gt, wal, 0, subslice);
}
static void
@@ -1120,7 +1125,6 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
{
const struct sseu_dev_info *sseu = &gt->info.sseu;
unsigned long slice, subslice = 0, slice_mask = 0;
- u64 dss_mask = 0;
u32 lncf_mask = 0;
int i;
@@ -1151,8 +1155,8 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
*/
/* Find the potential gslice candidates */
- dss_mask = intel_sseu_get_subslices(sseu, 0);
- slice_mask = intel_slicemask_from_dssmask(dss_mask, GEN_DSS_PER_GSLICE);
+ slice_mask = intel_slicemask_from_xehp_dssmask(sseu->subslice_mask,
+ GEN_DSS_PER_GSLICE);
/*
* Find the potential LNCF candidates. Either LNCF within a valid
@@ -1177,9 +1181,8 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
}
slice = __ffs(slice_mask);
- subslice = __ffs(dss_mask >> (slice * GEN_DSS_PER_GSLICE));
- WARN_ON(subslice > GEN_DSS_PER_GSLICE);
- WARN_ON(dss_mask >> (slice * GEN_DSS_PER_GSLICE) == 0);
+ subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) %
+ GEN_DSS_PER_GSLICE;
__add_mcr_wa(gt, wal, slice, subslice);
@@ -1197,6 +1200,20 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
+pvc_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ unsigned int dss;
+
+ /*
+ * Setup implicit steering for COMPUTE and DSS ranges to the first
+ * non-fused-off DSS. All other types of MCR registers will be
+ * explicitly steered.
+ */
+ dss = intel_sseu_find_first_xehp_dss(&gt->info.sseu, 0, 0);
+ __add_mcr_wa(gt, wal, dss / GEN_DSS_PER_CSLICE, dss % GEN_DSS_PER_CSLICE);
+}
+
+static void
icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
@@ -1487,6 +1504,18 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
* performance guide section.
*/
wa_write_or(wal, GEN12_SQCM, EN_32B_ACCESS);
+
+ /* Wa_14015795083 */
+ wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
+}
+
+static void
+pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ pvc_init_mcr(gt, wal);
+
+ /* Wa_14015795083 */
+ wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
}
static void
@@ -1494,7 +1523,9 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
- if (IS_DG2(i915))
+ if (IS_PONTEVECCHIO(i915))
+ pvc_gt_workarounds_init(gt, wal);
+ else if (IS_DG2(i915))
dg2_gt_workarounds_init(gt, wal);
else if (IS_XEHPSDV(i915))
xehpsdv_gt_workarounds_init(gt, wal);
@@ -1596,13 +1627,13 @@ wa_list_apply(struct intel_gt *gt, const struct i915_wa_list *wal)
u32 val, old = 0;
/* open-coded rmw due to steering */
- old = wa->clr ? intel_gt_read_register_fw(gt, wa->reg) : 0;
+ old = wa->clr ? intel_gt_mcr_read_any_fw(gt, wa->reg) : 0;
val = (old & ~wa->clr) | wa->set;
if (val != old || !wa->clr)
intel_uncore_write_fw(uncore, wa->reg, val);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- wa_verify(wa, intel_gt_read_register_fw(gt, wa->reg),
+ wa_verify(wa, intel_gt_mcr_read_any_fw(gt, wa->reg),
wal->name, "application");
}
@@ -1633,7 +1664,7 @@ static bool wa_list_verify(struct intel_gt *gt,
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
ok &= wa_verify(wa,
- intel_gt_read_register_fw(gt, wa->reg),
+ intel_gt_mcr_read_any_fw(gt, wa->reg),
wal->name, from);
intel_uncore_forcewake_put__locked(uncore, fw);
@@ -1924,6 +1955,32 @@ static void dg2_whitelist_build(struct intel_engine_cs *engine)
}
}
+static void blacklist_trtt(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ /*
+ * Prevent read/write access to [0x4400, 0x4600) which covers
+ * the TRTT range across all engines. Note that normally userspace
+ * cannot access the other engines' trtt control, but for simplicity
+ * we cover the entire range on each engine.
+ */
+ whitelist_reg_ext(w, _MMIO(0x4400),
+ RING_FORCE_TO_NONPRIV_DENY |
+ RING_FORCE_TO_NONPRIV_RANGE_64);
+ whitelist_reg_ext(w, _MMIO(0x4500),
+ RING_FORCE_TO_NONPRIV_DENY |
+ RING_FORCE_TO_NONPRIV_RANGE_64);
+}
+
+static void pvc_whitelist_build(struct intel_engine_cs *engine)
+{
+ allow_read_ctx_timestamp(engine);
+
+ /* Wa_16014440446:pvc */
+ blacklist_trtt(engine);
+}
+
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
@@ -1931,7 +1988,9 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
wa_init_start(w, "whitelist", engine->name);
- if (IS_DG2(i915))
+ if (IS_PONTEVECCHIO(i915))
+ pvc_whitelist_build(engine);
+ else if (IS_DG2(i915))
dg2_whitelist_build(engine);
else if (IS_XEHPSDV(i915))
xehpsdv_whitelist_build(engine);
@@ -1994,27 +2053,44 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
static void
engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
- u8 mocs;
+ u8 mocs_w, mocs_r;
/*
- * RING_CMD_CCTL are need to be programed to un-cached
- * for memory writes and reads outputted by Command
- * Streamers on Gen12 onward platforms.
+ * RING_CMD_CCTL specifies the default MOCS entry that will be used
+ * by the command streamer when executing commands that don't have
+ * a way to explicitly specify a MOCS setting. The default should
+ * usually reference whichever MOCS entry corresponds to uncached
+ * behavior, although use of a WB cached entry is recommended by the
+ * spec in certain circumstances on specific platforms.
*/
if (GRAPHICS_VER(engine->i915) >= 12) {
- mocs = engine->gt->mocs.uc_index;
+ mocs_r = engine->gt->mocs.uc_index;
+ mocs_w = engine->gt->mocs.uc_index;
+
+ if (HAS_L3_CCS_READ(engine->i915) &&
+ engine->class == COMPUTE_CLASS) {
+ mocs_r = engine->gt->mocs.wb_index;
+
+ /*
+ * Even on the few platforms where MOCS 0 is a
+ * legitimate table entry, it's never the correct
+ * setting to use here; we can assume the MOCS init
+ * just forgot to initialize wb_index.
+ */
+ drm_WARN_ON(&engine->i915->drm, mocs_r == 0);
+ }
+
wa_masked_field_set(wal,
RING_CMD_CCTL(engine->mmio_base),
CMD_CCTL_MOCS_MASK,
- CMD_CCTL_MOCS_OVERRIDE(mocs, mocs));
+ CMD_CCTL_MOCS_OVERRIDE(mocs_w, mocs_r));
}
}
static bool needs_wa_1308578152(struct intel_engine_cs *engine)
{
- u64 dss_mask = intel_sseu_get_subslices(&engine->gt->info.sseu, 0);
-
- return (dss_mask & GENMASK(GEN_DSS_PER_GSLICE - 1, 0)) == 0;
+ return intel_sseu_find_first_xehp_dss(&engine->gt->info.sseu, 0, 0) >=
+ GEN_DSS_PER_GSLICE;
}
static void
@@ -2023,9 +2099,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
struct drm_i915_private *i915 = engine->i915;
if (IS_DG2(i915)) {
- /* Wa_14015227452:dg2 */
- wa_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
-
/* Wa_1509235366:dg2 */
wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
GLOBAL_INVALIDATION_MODE);
@@ -2036,12 +2109,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
* performance guide section.
*/
wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
-
- /* Wa_18018781329:dg2 */
- wa_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
- wa_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
- wa_write_or(wal, VDBX_MOD_CTRL, FORCE_MISS_FTLB);
- wa_write_or(wal, VEBX_MOD_CTRL, FORCE_MISS_FTLB);
}
if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
@@ -2160,6 +2227,16 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal, GEN12_MERT_MOD_CTRL, FORCE_MISS_FTLB);
}
+ if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_B0, STEP_FOREVER) ||
+ IS_DG2_G10(i915)) {
+ /* Wa_22014600077:dg2 */
+ wa_add(wal, GEN10_CACHE_MODE_SS, 0,
+ _MASKED_BIT_ENABLE(ENABLE_EU_COUNT_FOR_TDL_FLUSH),
+ 0 /* Wa_14012342262 :write-only reg, so skip
+ verification */,
+ true);
+ }
+
if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) {
/*
@@ -2583,6 +2660,15 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
}
}
+static void
+ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+ if (IS_PVC_CT_STEP(engine->i915, STEP_A0, STEP_C0)) {
+ /* Wa_14014999345:pvc */
+ wa_masked_en(wal, GEN10_CACHE_MODE_SS, DISABLE_ECC);
+ }
+}
+
/*
* The workarounds in this function apply to shared registers in
* the general render reset domain that aren't tied to a
@@ -2597,6 +2683,18 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
{
struct drm_i915_private *i915 = engine->i915;
+ if (IS_PONTEVECCHIO(i915)) {
+ /*
+ * The following is not actually a "workaround" but rather
+ * a recommended tuning setting documented in the bspec's
+ * performance guide section.
+ */
+ wa_write(wal, XEHPC_L3SCRUB, SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
+
+ /* Wa_16016694945 */
+ wa_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
+ }
+
if (IS_XEHPSDV(i915)) {
/* Wa_1409954639 */
wa_masked_en(wal,
@@ -2629,9 +2727,21 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
GLOBAL_INVALIDATION_MODE);
}
- if (IS_DG2(i915)) {
- /* Wa_22014226127:dg2 */
+ if (IS_DG2(i915) || IS_PONTEVECCHIO(i915)) {
+ /* Wa_14015227452:dg2,pvc */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
+
+ /* Wa_22014226127:dg2,pvc */
wa_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
+
+ /* Wa_16015675438:dg2,pvc */
+ wa_masked_en(wal, FF_SLICE_CS_CHICKEN2, GEN12_PERF_FIX_BALANCING_CFE_DISABLE);
+
+ /* Wa_18018781329:dg2,pvc */
+ wa_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, VDBX_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, VEBX_MOD_CTRL, FORCE_MISS_FTLB);
}
}
@@ -2651,7 +2761,9 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
general_render_compute_wa_init(engine, wal);
- if (engine->class == RENDER_CLASS)
+ if (engine->class == COMPUTE_CLASS)
+ ccs_engine_wa_init(engine, wal);
+ else if (engine->class == RENDER_CLASS)
rcs_engine_wa_init(engine, wal);
else
xcs_engine_wa_init(engine, wal);
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 83ff4c2e57c5..6493265d5f64 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -976,6 +976,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
{
struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine, *other;
+ struct active_engine *threads;
enum intel_engine_id id, tmp;
struct hang h;
int err = 0;
@@ -996,8 +997,11 @@ static int __igt_reset_engines(struct intel_gt *gt,
h.ctx->sched.priority = 1024;
}
+ threads = kmalloc_array(I915_NUM_ENGINES, sizeof(*threads), GFP_KERNEL);
+ if (!threads)
+ return -ENOMEM;
+
for_each_engine(engine, gt, id) {
- struct active_engine threads[I915_NUM_ENGINES] = {};
unsigned long device = i915_reset_count(global);
unsigned long count = 0, reported;
bool using_guc = intel_engine_uses_guc(engine);
@@ -1016,7 +1020,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
break;
}
- memset(threads, 0, sizeof(threads));
+ memset(threads, 0, sizeof(*threads) * I915_NUM_ENGINES);
for_each_engine(other, gt, tmp) {
struct task_struct *tsk;
@@ -1236,6 +1240,7 @@ unwind:
break;
}
}
+ kfree(threads);
if (intel_gt_is_wedged(gt))
err = -EIO;
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c
index 2cd184ab32b1..cfd736d88939 100644
--- a/drivers/gpu/drm/i915/gt/selftest_llc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.c
@@ -31,7 +31,7 @@ static int gen6_verify_ring_freq(struct intel_llc *llc)
calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
val = gpu_freq;
- if (snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ if (snb_pcode_read(llc_to_gt(llc)->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE,
&val, NULL)) {
pr_err("Failed to read freq table[%d], range [%d, %d]\n",
gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq);
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index 6a69ac0184ad..cfb4708dd62e 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -521,7 +521,7 @@ static void show_pcu_config(struct intel_rps *rps)
for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
int ia_freq = gpu_freq;
- snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ snb_pcode_read(rps_to_gt(rps)->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq, NULL);
pr_info("%5d %5d %5d\n",
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
index b768cea5943d..ac29691e0b1a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_slpc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -8,6 +8,11 @@
#define delay_for_h2g() usleep_range(H2G_DELAY, H2G_DELAY + 10000)
#define FREQUENCY_REQ_UNIT DIV_ROUND_CLOSEST(GT_FREQUENCY_MULTIPLIER, \
GEN9_FREQ_SCALER)
+enum test_type {
+ VARY_MIN,
+ VARY_MAX,
+ MAX_GRANTED
+};
static int slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 freq)
{
@@ -36,147 +41,114 @@ static int slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 freq)
return ret;
}
-static int live_slpc_clamp_min(void *arg)
+static int vary_max_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps,
+ u32 *max_act_freq)
{
- struct drm_i915_private *i915 = arg;
- struct intel_gt *gt = to_gt(i915);
- struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
- struct intel_rps *rps = &gt->rps;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct igt_spinner spin;
- u32 slpc_min_freq, slpc_max_freq;
+ u32 step, max_freq, req_freq;
+ u32 act_freq;
int err = 0;
- if (!intel_uc_uses_guc_slpc(&gt->uc))
- return 0;
+ /* Go from max to min in 5 steps */
+ step = (slpc->rp0_freq - slpc->min_freq) / NUM_STEPS;
+ *max_act_freq = slpc->min_freq;
+ for (max_freq = slpc->rp0_freq; max_freq > slpc->min_freq;
+ max_freq -= step) {
+ err = slpc_set_max_freq(slpc, max_freq);
+ if (err)
+ break;
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
+ req_freq = intel_rps_read_punit_req_frequency(rps);
- if (intel_guc_slpc_get_max_freq(slpc, &slpc_max_freq)) {
- pr_err("Could not get SLPC max freq\n");
- return -EIO;
- }
+ /* GuC requests freq in multiples of 50/3 MHz */
+ if (req_freq > (max_freq + FREQUENCY_REQ_UNIT)) {
+ pr_err("SWReq is %d, should be at most %d\n", req_freq,
+ max_freq + FREQUENCY_REQ_UNIT);
+ err = -EINVAL;
+ }
- if (intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq)) {
- pr_err("Could not get SLPC min freq\n");
- return -EIO;
- }
+ act_freq = intel_rps_read_actual_frequency(rps);
+ if (act_freq > *max_act_freq)
+ *max_act_freq = act_freq;
- if (slpc_min_freq == slpc_max_freq) {
- pr_err("Min/Max are fused to the same value\n");
- return -EINVAL;
+ if (err)
+ break;
}
- intel_gt_pm_wait_for_idle(gt);
- intel_gt_pm_get(gt);
- for_each_engine(engine, gt, id) {
- struct i915_request *rq;
- u32 step, min_freq, req_freq;
- u32 act_freq, max_act_freq;
+ return err;
+}
- if (!intel_engine_can_store_dword(engine))
- continue;
+static int vary_min_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps,
+ u32 *max_act_freq)
+{
+ u32 step, min_freq, req_freq;
+ u32 act_freq;
+ int err = 0;
- /* Go from min to max in 5 steps */
- step = (slpc_max_freq - slpc_min_freq) / NUM_STEPS;
- max_act_freq = slpc_min_freq;
- for (min_freq = slpc_min_freq; min_freq < slpc_max_freq;
- min_freq += step) {
- err = slpc_set_min_freq(slpc, min_freq);
- if (err)
- break;
-
- st_engine_heartbeat_disable(engine);
-
- rq = igt_spinner_create_request(&spin,
- engine->kernel_context,
- MI_NOOP);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- st_engine_heartbeat_enable(engine);
- break;
- }
+ /* Go from min to max in 5 steps */
+ step = (slpc->rp0_freq - slpc->min_freq) / NUM_STEPS;
+ *max_act_freq = slpc->min_freq;
+ for (min_freq = slpc->min_freq; min_freq < slpc->rp0_freq;
+ min_freq += step) {
+ err = slpc_set_min_freq(slpc, min_freq);
+ if (err)
+ break;
- i915_request_add(rq);
+ req_freq = intel_rps_read_punit_req_frequency(rps);
- if (!igt_wait_for_spinner(&spin, rq)) {
- pr_err("%s: Spinner did not start\n",
- engine->name);
- igt_spinner_end(&spin);
- st_engine_heartbeat_enable(engine);
- intel_gt_set_wedged(engine->gt);
- err = -EIO;
- break;
- }
+ /* GuC requests freq in multiples of 50/3 MHz */
+ if (req_freq < (min_freq - FREQUENCY_REQ_UNIT)) {
+ pr_err("SWReq is %d, should be at least %d\n", req_freq,
+ min_freq - FREQUENCY_REQ_UNIT);
+ err = -EINVAL;
+ }
- /* Wait for GuC to detect business and raise
- * requested frequency if necessary.
- */
- delay_for_h2g();
+ act_freq = intel_rps_read_actual_frequency(rps);
+ if (act_freq > *max_act_freq)
+ *max_act_freq = act_freq;
- req_freq = intel_rps_read_punit_req_frequency(rps);
+ if (err)
+ break;
+ }
- /* GuC requests freq in multiples of 50/3 MHz */
- if (req_freq < (min_freq - FREQUENCY_REQ_UNIT)) {
- pr_err("SWReq is %d, should be at least %d\n", req_freq,
- min_freq - FREQUENCY_REQ_UNIT);
- igt_spinner_end(&spin);
- st_engine_heartbeat_enable(engine);
- err = -EINVAL;
- break;
- }
+ return err;
+}
- act_freq = intel_rps_read_actual_frequency(rps);
- if (act_freq > max_act_freq)
- max_act_freq = act_freq;
+static int max_granted_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps, u32 *max_act_freq)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+ u32 perf_limit_reasons;
+ int err = 0;
- igt_spinner_end(&spin);
- st_engine_heartbeat_enable(engine);
- }
+ err = slpc_set_min_freq(slpc, slpc->rp0_freq);
+ if (err)
+ return err;
- pr_info("Max actual frequency for %s was %d\n",
- engine->name, max_act_freq);
+ *max_act_freq = intel_rps_read_actual_frequency(rps);
+ if (*max_act_freq != slpc->rp0_freq) {
+ /* Check if there was some throttling by pcode */
+ perf_limit_reasons = intel_uncore_read(gt->uncore, GT0_PERF_LIMIT_REASONS);
- /* Actual frequency should rise above min */
- if (max_act_freq == slpc_min_freq) {
- pr_err("Actual freq did not rise above min\n");
+ /* If not, this is an error */
+ if (!(perf_limit_reasons & GT0_PERF_LIMIT_REASONS_MASK)) {
+ pr_err("Pcode did not grant max freq\n");
err = -EINVAL;
+ } else {
+ pr_info("Pcode throttled frequency 0x%x\n", perf_limit_reasons);
}
-
- if (err)
- break;
}
- /* Restore min/max frequencies */
- slpc_set_max_freq(slpc, slpc_max_freq);
- slpc_set_min_freq(slpc, slpc_min_freq);
-
- if (igt_flush_test(gt->i915))
- err = -EIO;
-
- intel_gt_pm_put(gt);
- igt_spinner_fini(&spin);
- intel_gt_pm_wait_for_idle(gt);
-
return err;
}
-static int live_slpc_clamp_max(void *arg)
+static int run_test(struct intel_gt *gt, int test_type)
{
- struct drm_i915_private *i915 = arg;
- struct intel_gt *gt = to_gt(i915);
- struct intel_guc_slpc *slpc;
- struct intel_rps *rps;
+ struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_rps *rps = &gt->rps;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
- int err = 0;
u32 slpc_min_freq, slpc_max_freq;
-
- slpc = &gt->uc.guc.slpc;
- rps = &gt->rps;
+ int err = 0;
if (!intel_uc_uses_guc_slpc(&gt->uc))
return 0;
@@ -194,7 +166,7 @@ static int live_slpc_clamp_max(void *arg)
return -EIO;
}
- if (slpc_min_freq == slpc_max_freq) {
+ if (slpc->min_freq == slpc->rp0_freq) {
pr_err("Min/Max are fused to the same value\n");
return -EINVAL;
}
@@ -203,93 +175,82 @@ static int live_slpc_clamp_max(void *arg)
intel_gt_pm_get(gt);
for_each_engine(engine, gt, id) {
struct i915_request *rq;
- u32 max_freq, req_freq;
- u32 act_freq, max_act_freq;
- u32 step;
+ u32 max_act_freq;
if (!intel_engine_can_store_dword(engine))
continue;
- /* Go from max to min in 5 steps */
- step = (slpc_max_freq - slpc_min_freq) / NUM_STEPS;
- max_act_freq = slpc_min_freq;
- for (max_freq = slpc_max_freq; max_freq > slpc_min_freq;
- max_freq -= step) {
- err = slpc_set_max_freq(slpc, max_freq);
- if (err)
- break;
-
- st_engine_heartbeat_disable(engine);
-
- rq = igt_spinner_create_request(&spin,
- engine->kernel_context,
- MI_NOOP);
- if (IS_ERR(rq)) {
- st_engine_heartbeat_enable(engine);
- err = PTR_ERR(rq);
- break;
- }
+ st_engine_heartbeat_disable(engine);
- i915_request_add(rq);
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ st_engine_heartbeat_enable(engine);
+ break;
+ }
- if (!igt_wait_for_spinner(&spin, rq)) {
- pr_err("%s: SLPC spinner did not start\n",
- engine->name);
- igt_spinner_end(&spin);
- st_engine_heartbeat_enable(engine);
- intel_gt_set_wedged(engine->gt);
- err = -EIO;
- break;
- }
+ i915_request_add(rq);
- delay_for_h2g();
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ pr_err("%s: Spinner did not start\n",
+ engine->name);
+ igt_spinner_end(&spin);
+ st_engine_heartbeat_enable(engine);
+ intel_gt_set_wedged(engine->gt);
+ err = -EIO;
+ break;
+ }
- /* Verify that SWREQ indeed was set to specific value */
- req_freq = intel_rps_read_punit_req_frequency(rps);
+ switch (test_type) {
+ case VARY_MIN:
+ err = vary_min_freq(slpc, rps, &max_act_freq);
+ break;
- /* GuC requests freq in multiples of 50/3 MHz */
- if (req_freq > (max_freq + FREQUENCY_REQ_UNIT)) {
- pr_err("SWReq is %d, should be at most %d\n", req_freq,
- max_freq + FREQUENCY_REQ_UNIT);
+ case VARY_MAX:
+ err = vary_max_freq(slpc, rps, &max_act_freq);
+ break;
+
+ case MAX_GRANTED:
+ /* Media engines have a different RP0 */
+ if (engine->class == VIDEO_DECODE_CLASS ||
+ engine->class == VIDEO_ENHANCEMENT_CLASS) {
igt_spinner_end(&spin);
st_engine_heartbeat_enable(engine);
- err = -EINVAL;
- break;
+ err = 0;
+ continue;
}
- act_freq = intel_rps_read_actual_frequency(rps);
- if (act_freq > max_act_freq)
- max_act_freq = act_freq;
-
- st_engine_heartbeat_enable(engine);
- igt_spinner_end(&spin);
-
- if (err)
- break;
+ err = max_granted_freq(slpc, rps, &max_act_freq);
+ break;
}
pr_info("Max actual frequency for %s was %d\n",
engine->name, max_act_freq);
/* Actual frequency should rise above min */
- if (max_act_freq == slpc_min_freq) {
+ if (max_act_freq <= slpc_min_freq) {
pr_err("Actual freq did not rise above min\n");
+ pr_err("Perf Limit Reasons: 0x%x\n",
+ intel_uncore_read(gt->uncore, GT0_PERF_LIMIT_REASONS));
err = -EINVAL;
}
- if (igt_flush_test(gt->i915)) {
- err = -EIO;
- break;
- }
+ igt_spinner_end(&spin);
+ st_engine_heartbeat_enable(engine);
if (err)
break;
}
- /* Restore min/max freq */
+ /* Restore min/max frequencies */
slpc_set_max_freq(slpc, slpc_max_freq);
slpc_set_min_freq(slpc, slpc_min_freq);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
intel_gt_pm_put(gt);
igt_spinner_fini(&spin);
intel_gt_pm_wait_for_idle(gt);
@@ -297,11 +258,37 @@ static int live_slpc_clamp_max(void *arg)
return err;
}
+static int live_slpc_vary_min(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = to_gt(i915);
+
+ return run_test(gt, VARY_MIN);
+}
+
+static int live_slpc_vary_max(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = to_gt(i915);
+
+ return run_test(gt, VARY_MAX);
+}
+
+/* check if pcode can grant RP0 */
+static int live_slpc_max_granted(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = to_gt(i915);
+
+ return run_test(gt, MAX_GRANTED);
+}
+
int intel_slpc_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
- SUBTEST(live_slpc_clamp_max),
- SUBTEST(live_slpc_clamp_min),
+ SUBTEST(live_slpc_vary_max),
+ SUBTEST(live_slpc_vary_min),
+ SUBTEST(live_slpc_max_granted),
};
if (intel_gt_is_wedged(to_gt(i915)))
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
index 62cb4254a77a..4c840a2639dc 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
@@ -122,6 +122,12 @@ enum slpc_param_id {
SLPC_MAX_PARAM = 32,
};
+enum slpc_media_ratio_mode {
+ SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL = 0,
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE = 1,
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO = 2,
+};
+
enum slpc_event_id {
SLPC_EVENT_RESET = 0,
SLPC_EVENT_SHUTDOWN = 1,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 8c6885f43d1a..2706a8c65090 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -327,6 +327,10 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_FOREVER))
flags |= GUC_WA_CONTEXT_ISOLATION;
+ /* Wa_16015675438 */
+ if (!RCS_MASK(gt))
+ flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
+
return flags;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 9feda105f913..a7acffbf15d1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -235,6 +235,14 @@ struct intel_guc {
* @shift: Right shift value for the gpm timestamp
*/
u32 shift;
+
+ /**
+ * @last_stat_jiffies: jiffies at last actual stats collection time
+ * We use this timestamp to ensure we don't oversample the
+ * stats because runtime power management events can trigger
+ * stats collection at much higher rates than required.
+ */
+ unsigned long last_stat_jiffies;
} timestamp;
#ifdef CONFIG_DRM_I915_SELFTEST
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 3eabf4cf8eec..ba7541f3ca61 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -7,6 +7,7 @@
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_lrc.h"
#include "gt/shmem_utils.h"
@@ -313,7 +314,7 @@ static long __must_check guc_mmio_reg_add(struct intel_gt *gt,
* tracking, it is easier to just program the default steering for all
* regs that don't need a non-default one.
*/
- intel_gt_get_valid_steering_for_reg(gt, reg, &group, &inst);
+ intel_gt_mcr_get_nonterminated_steering(gt, reg, &group, &inst);
entry.flags |= GUC_REGSET_STEERING(group, inst);
slot = __mmio_reg_add(regset, &entry);
@@ -457,7 +458,7 @@ static void fill_engine_enable_masks(struct intel_gt *gt,
{
info_map_write(info_map, engine_enabled_masks[GUC_RENDER_CLASS], RCS_MASK(gt));
info_map_write(info_map, engine_enabled_masks[GUC_COMPUTE_CLASS], CCS_MASK(gt));
- info_map_write(info_map, engine_enabled_masks[GUC_BLITTER_CLASS], 1);
+ info_map_write(info_map, engine_enabled_masks[GUC_BLITTER_CLASS], BCS_MASK(gt));
info_map_write(info_map, engine_enabled_masks[GUC_VIDEO_CLASS], VDBOX_MASK(gt));
info_map_write(info_map, engine_enabled_masks[GUC_VIDEOENHANCE_CLASS], VEBOX_MASK(gt));
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index c4e25966d3e9..75257bd20ff0 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -9,6 +9,7 @@
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_lrc.h"
#include "guc_capture_fwif.h"
@@ -281,8 +282,7 @@ guc_capture_alloc_steered_lists_xe_lpd(struct intel_guc *guc,
const struct __guc_mmio_reg_descr_group *lists)
{
struct intel_gt *gt = guc_to_gt(guc);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
- int slice, subslice, i, num_steer_regs, num_tot_regs = 0;
+ int slice, subslice, iter, i, num_steer_regs, num_tot_regs = 0;
const struct __guc_mmio_reg_descr_group *list;
struct __guc_mmio_reg_descr_group *extlists;
struct __guc_mmio_reg_descr *extarray;
@@ -298,7 +298,7 @@ guc_capture_alloc_steered_lists_xe_lpd(struct intel_guc *guc,
num_steer_regs = ARRAY_SIZE(xe_extregs);
sseu = &gt->info.sseu;
- for_each_instdone_slice_subslice(i915, sseu, slice, subslice)
+ for_each_ss_steering(iter, gt, slice, subslice)
num_tot_regs += num_steer_regs;
if (!num_tot_regs)
@@ -315,7 +315,7 @@ guc_capture_alloc_steered_lists_xe_lpd(struct intel_guc *guc,
}
extarray = extlists[0].extlist;
- for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
+ for_each_ss_steering(iter, gt, slice, subslice) {
for (i = 0; i < num_steer_regs; ++i) {
__fill_ext_reg(extarray, &xe_extregs[i], slice, subslice);
++extarray;
@@ -359,9 +359,8 @@ guc_capture_alloc_steered_lists_xe_hpg(struct intel_guc *guc,
num_steer_regs += ARRAY_SIZE(xehpg_extregs);
sseu = &gt->info.sseu;
- for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice) {
+ for_each_ss_steering(iter, gt, slice, subslice)
num_tot_regs += num_steer_regs;
- }
if (!num_tot_regs)
return;
@@ -377,7 +376,7 @@ guc_capture_alloc_steered_lists_xe_hpg(struct intel_guc *guc,
}
extarray = extlists[0].extlist;
- for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice) {
+ for_each_ss_steering(iter, gt, slice, subslice) {
for (i = 0; i < ARRAY_SIZE(xe_extregs); ++i) {
__fill_ext_reg(extarray, &xe_extregs[i], slice, subslice);
++extarray;
@@ -420,72 +419,6 @@ guc_capture_get_device_reglist(struct intel_guc *guc)
return default_lists;
}
-static const char *
-__stringify_owner(u32 owner)
-{
- switch (owner) {
- case GUC_CAPTURE_LIST_INDEX_PF:
- return "PF";
- case GUC_CAPTURE_LIST_INDEX_VF:
- return "VF";
- default:
- return "unknown";
- }
-
- return "";
-}
-
-static const char *
-__stringify_type(u32 type)
-{
- switch (type) {
- case GUC_CAPTURE_LIST_TYPE_GLOBAL:
- return "Global";
- case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
- return "Class";
- case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
- return "Instance";
- default:
- return "unknown";
- }
-
- return "";
-}
-
-static const char *
-__stringify_engclass(u32 class)
-{
- switch (class) {
- case GUC_RENDER_CLASS:
- return "Render";
- case GUC_VIDEO_CLASS:
- return "Video";
- case GUC_VIDEOENHANCE_CLASS:
- return "VideoEnhance";
- case GUC_BLITTER_CLASS:
- return "Blitter";
- case GUC_COMPUTE_CLASS:
- return "Compute";
- default:
- return "unknown";
- }
-
- return "";
-}
-
-static void
-guc_capture_warn_with_list_info(struct drm_i915_private *i915, char *msg,
- u32 owner, u32 type, u32 classid)
-{
- if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL)
- drm_dbg(&i915->drm, "GuC-capture: %s for %s %s-Registers.\n", msg,
- __stringify_owner(owner), __stringify_type(type));
- else
- drm_dbg(&i915->drm, "GuC-capture: %s for %s %s-Registers on %s-Engine\n", msg,
- __stringify_owner(owner), __stringify_type(type),
- __stringify_engclass(classid));
-}
-
static int
guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
struct guc_mmio_reg *ptr, u16 num_entries)
@@ -501,11 +434,8 @@ guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
return -ENODEV;
match = guc_capture_get_one_list(reglists, owner, type, classid);
- if (!match) {
- guc_capture_warn_with_list_info(i915, "Missing register list init", owner, type,
- classid);
+ if (!match)
return -ENODATA;
- }
for (i = 0; i < num_entries && i < match->num_regs; ++i) {
ptr[i].offset = match->list[i].reg.reg;
@@ -556,7 +486,6 @@ int
intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
size_t *size)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct intel_guc_state_capture *gc = guc->capture;
struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
int num_regs;
@@ -570,11 +499,8 @@ intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 cl
}
num_regs = guc_cap_list_num_regs(gc, owner, type, classid);
- if (!num_regs) {
- guc_capture_warn_with_list_info(i915, "Missing register list size",
- owner, type, classid);
+ if (!num_regs)
return -ENODATA;
- }
*size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) +
(num_regs * sizeof(struct guc_mmio_reg)));
@@ -1334,7 +1260,8 @@ static int __guc_capture_flushlog_complete(struct intel_guc *guc)
GUC_CAPTURE_LOG_BUFFER
};
- return intel_guc_send(guc, action, ARRAY_SIZE(action));
+ return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0);
+
}
static void __guc_capture_process_output(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 89a7e5ec0614..323b055e5db9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -105,6 +105,7 @@
#define GUC_WA_PRE_PARSER BIT(14)
#define GUC_WA_HOLD_CCS_SWITCHOUT BIT(17)
#define GUC_WA_POLLCS BIT(18)
+#define GUC_WA_RCS_REGS_IN_CCS_REGS_LIST BIT(21)
#define GUC_CTL_FEATURE 2
#define GUC_CTL_ENABLE_SLPC BIT(2)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
index 79c66b6b51a3..4781fccc2687 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
@@ -94,9 +94,9 @@ static int guc_hwconfig_fill_buffer(struct intel_guc *guc, struct intel_hwconfig
static bool has_table(struct drm_i915_private *i915)
{
- if (IS_ALDERLAKE_P(i915))
+ if (IS_ALDERLAKE_P(i915) && !IS_ADLP_N(i915))
return true;
- if (IS_DG2(i915))
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
return true;
return false;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 78d2989fe917..25b2d7ce6640 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -31,7 +31,7 @@ static int guc_action_flush_log_complete(struct intel_guc *guc)
GUC_DEBUG_LOG_BUFFER
};
- return intel_guc_send(guc, action, ARRAY_SIZE(action));
+ return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0);
}
static int guc_action_flush_log(struct intel_guc *guc)
@@ -588,7 +588,7 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
/*
* We require SSE 4.1 for fast reads from the GuC log buffer and
* it should be present on the chipsets supporting GuC based
- * submisssions.
+ * submissions.
*/
if (!i915_has_memcpy_from_wc()) {
ret = -ENXIO;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
index e00661fb0853..8f8dd05835c5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
@@ -49,7 +49,6 @@ static int guc_action_control_gucrc(struct intel_guc *guc, bool enable)
static int __guc_rc_control(struct intel_guc *guc, bool enable)
{
struct intel_gt *gt = guc_to_gt(guc);
- struct drm_device *drm = &guc_to_gt(guc)->i915->drm;
int ret;
if (!intel_uc_uses_guc_rc(&gt->uc))
@@ -60,8 +59,8 @@ static int __guc_rc_control(struct intel_guc *guc, bool enable)
ret = guc_action_control_gucrc(guc, enable);
if (ret) {
- drm_err(drm, "Failed to %s GuC RC (%pe)\n",
- str_enable_disable(enable), ERR_PTR(ret));
+ i915_probe_error(guc_to_gt(guc)->i915, "Failed to %s GuC RC (%pe)\n",
+ str_enable_disable(enable), ERR_PTR(ret));
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index ad570fa002a6..8dc063f087eb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -96,6 +96,7 @@
#define GUC_SHIM_CONTROL2 _MMIO(0xc068)
#define GUC_IS_PRIVILEGED (1<<29)
+#define GSC_LOADS_HUC (1<<30)
#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
#define GUC_SEND_TRIGGER (1<<0)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 1db833da42df..ec9c4ca0f615 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -98,6 +98,30 @@ static u32 slpc_get_state(struct intel_guc_slpc *slpc)
return data->header.global_state;
}
+static int guc_action_slpc_set_param_nb(struct intel_guc *guc, u8 id, u32 value)
+{
+ u32 request[] = {
+ GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
+ SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
+ id,
+ value,
+ };
+ int ret;
+
+ ret = intel_guc_send_nb(guc, request, ARRAY_SIZE(request), 0);
+
+ return ret > 0 ? -EPROTO : ret;
+}
+
+static int slpc_set_param_nb(struct intel_guc_slpc *slpc, u8 id, u32 value)
+{
+ struct intel_guc *guc = slpc_to_guc(slpc);
+
+ GEM_BUG_ON(id >= SLPC_MAX_PARAM);
+
+ return guc_action_slpc_set_param_nb(guc, id, value);
+}
+
static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value)
{
u32 request[] = {
@@ -208,12 +232,14 @@ static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- ret = slpc_set_param(slpc,
- SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
- freq);
+ /* Non-blocking request will avoid stalls */
+ ret = slpc_set_param_nb(slpc,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ freq);
if (ret)
- i915_probe_error(i915, "Unable to force min freq to %u: %d",
- freq, ret);
+ drm_notice(&i915->drm,
+ "Failed to send set_param for min freq(%d): (%d)\n",
+ freq, ret);
}
return ret;
@@ -222,6 +248,7 @@ static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
static void slpc_boost_work(struct work_struct *work)
{
struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work);
+ int err;
/*
* Raise min freq to boost. It's possible that
@@ -231,8 +258,9 @@ static void slpc_boost_work(struct work_struct *work)
*/
mutex_lock(&slpc->lock);
if (atomic_read(&slpc->num_waiters)) {
- slpc_force_min_freq(slpc, slpc->boost_freq);
- slpc->num_boosts++;
+ err = slpc_force_min_freq(slpc, slpc->boost_freq);
+ if (!err)
+ slpc->num_boosts++;
}
mutex_unlock(&slpc->lock);
}
@@ -260,6 +288,7 @@ int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
slpc->boost_freq = 0;
atomic_set(&slpc->num_waiters, 0);
slpc->num_boosts = 0;
+ slpc->media_ratio_mode = SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL;
mutex_init(&slpc->lock);
INIT_WORK(&slpc->boost_work, slpc_boost_work);
@@ -506,6 +535,22 @@ int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
return ret;
}
+int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ if (!HAS_MEDIA_RATIO_MODE(i915))
+ return -ENODEV;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_MEDIA_FF_RATIO_MODE,
+ val);
+ return ret;
+}
+
void intel_guc_pm_intrmsk_enable(struct intel_gt *gt)
{
u32 pm_intrmsk_mbz = 0;
@@ -654,6 +699,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
return ret;
}
+ /* Set cached media freq ratio mode */
+ intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
index 0caa8fee3c04..82a98f78f96c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
@@ -38,6 +38,7 @@ int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val);
int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val);
int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val);
int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p);
+int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val);
void intel_guc_pm_intrmsk_enable(struct intel_gt *gt);
void intel_guc_slpc_boost(struct intel_guc_slpc *slpc);
void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
index bf5b9a563c09..73d208123528 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
@@ -29,6 +29,9 @@ struct intel_guc_slpc {
u32 min_freq_softlimit;
u32 max_freq_softlimit;
+ /* cached media ratio mode */
+ u32 media_ratio_mode;
+
/* Protects set/reset of boost freq
* and value of num_waiters
*/
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 2d9f5f1c79d3..76916aed897a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1365,6 +1365,8 @@ static void __update_guc_busyness_stats(struct intel_guc *guc)
unsigned long flags;
ktime_t unused;
+ guc->timestamp.last_stat_jiffies = jiffies;
+
spin_lock_irqsave(&guc->timestamp.lock, flags);
guc_update_pm_timestamp(guc, &unused);
@@ -1437,6 +1439,17 @@ void intel_guc_busyness_park(struct intel_gt *gt)
return;
cancel_delayed_work(&guc->timestamp.work);
+
+ /*
+ * Before parking, we should sample engine busyness stats if we need to.
+ * We can skip it if we are less than half a ping from the last time we
+ * sampled the busyness stats.
+ */
+ if (guc->timestamp.last_stat_jiffies &&
+ !time_after(jiffies, guc->timestamp.last_stat_jiffies +
+ (guc->timestamp.ping_delay / 2)))
+ return;
+
__update_guc_busyness_stats(guc);
}
@@ -2949,7 +2962,9 @@ static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
}
}
-static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
+static void
+guc_context_revoke(struct intel_context *ce, struct i915_request *rq,
+ unsigned int preempt_timeout_ms)
{
struct intel_guc *guc = ce_to_guc(ce);
struct intel_runtime_pm *runtime_pm =
@@ -2988,7 +3003,8 @@ static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
* gets kicked off the HW ASAP.
*/
with_intel_runtime_pm(runtime_pm, wakeref) {
- __guc_context_set_preemption_timeout(guc, guc_id, 1);
+ __guc_context_set_preemption_timeout(guc, guc_id,
+ preempt_timeout_ms);
__guc_context_sched_disable(guc, ce, guc_id);
}
} else {
@@ -2996,7 +3012,7 @@ static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
with_intel_runtime_pm(runtime_pm, wakeref)
__guc_context_set_preemption_timeout(guc,
ce->guc_id.id,
- 1);
+ preempt_timeout_ms);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
}
}
@@ -3359,7 +3375,7 @@ static const struct intel_context_ops guc_context_ops = {
.unpin = guc_context_unpin,
.post_unpin = guc_context_post_unpin,
- .ban = guc_context_ban,
+ .revoke = guc_context_revoke,
.cancel_request = guc_context_cancel_request,
@@ -3608,7 +3624,7 @@ static const struct intel_context_ops virtual_guc_context_ops = {
.unpin = guc_virtual_context_unpin,
.post_unpin = guc_context_post_unpin,
- .ban = guc_context_ban,
+ .revoke = guc_context_revoke,
.cancel_request = guc_context_cancel_request,
@@ -3697,7 +3713,7 @@ static const struct intel_context_ops virtual_parent_context_ops = {
.unpin = guc_parent_context_unpin,
.post_unpin = guc_context_post_unpin,
- .ban = guc_context_ban,
+ .revoke = guc_context_revoke,
.cancel_request = guc_context_cancel_request,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 556829de9c17..3bb8838e325a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include "gt/intel_gt.h"
+#include "intel_guc_reg.h"
#include "intel_huc.h"
#include "i915_drv.h"
@@ -17,11 +18,15 @@
* capabilities by adding HuC specific commands to batch buffers.
*
* The kernel driver is only responsible for loading the HuC firmware and
- * triggering its security authentication, which is performed by the GuC. For
- * The GuC to correctly perform the authentication, the HuC binary must be
- * loaded before the GuC one. Loading the HuC is optional; however, not using
- * the HuC might negatively impact power usage and/or performance of media
- * workloads, depending on the use-cases.
+ * triggering its security authentication, which is performed by the GuC on
+ * older platforms and by the GSC on newer ones. For the GuC to correctly
+ * perform the authentication, the HuC binary must be loaded before the GuC one.
+ * Loading the HuC is optional; however, not using the HuC might negatively
+ * impact power usage and/or performance of media workloads, depending on the
+ * use-cases.
+ * HuC must be reloaded on events that cause the WOPCM to lose its contents
+ * (S3/S4, FLR); GuC-authenticated HuC must also be reloaded on GuC/GT reset,
+ * while GSC-managed HuC will survive that.
*
* See https://github.com/intel/media-driver for the latest details on HuC
* functionality.
@@ -54,11 +59,51 @@ void intel_huc_init_early(struct intel_huc *huc)
}
}
+#define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy")
+static int check_huc_loading_mode(struct intel_huc *huc)
+{
+ struct intel_gt *gt = huc_to_gt(huc);
+ bool fw_needs_gsc = intel_huc_is_loaded_by_gsc(huc);
+ bool hw_uses_gsc = false;
+
+ /*
+ * The fuse for HuC load via GSC is only valid on platforms that have
+ * GuC deprivilege.
+ */
+ if (HAS_GUC_DEPRIVILEGE(gt->i915))
+ hw_uses_gsc = intel_uncore_read(gt->uncore, GUC_SHIM_CONTROL2) &
+ GSC_LOADS_HUC;
+
+ if (fw_needs_gsc != hw_uses_gsc) {
+ drm_err(&gt->i915->drm,
+ "mismatch between HuC FW (%s) and HW (%s) load modes\n",
+ HUC_LOAD_MODE_STRING(fw_needs_gsc),
+ HUC_LOAD_MODE_STRING(hw_uses_gsc));
+ return -ENOEXEC;
+ }
+
+ /* make sure we can access the GSC via the mei driver if we need it */
+ if (!(IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC)) &&
+ fw_needs_gsc) {
+ drm_info(&gt->i915->drm,
+ "Can't load HuC due to missing MEI modules\n");
+ return -EIO;
+ }
+
+ drm_dbg(&gt->i915->drm, "GSC loads huc=%s\n", str_yes_no(fw_needs_gsc));
+
+ return 0;
+}
+
int intel_huc_init(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
int err;
+ err = check_huc_loading_mode(huc);
+ if (err)
+ goto out;
+
err = intel_uc_fw_init(&huc->fw);
if (err)
goto out;
@@ -68,7 +113,7 @@ int intel_huc_init(struct intel_huc *huc)
return 0;
out:
- i915_probe_error(i915, "failed with %d\n", err);
+ drm_info(&i915->drm, "HuC init failed with %d\n", err);
return err;
}
@@ -96,17 +141,20 @@ int intel_huc_auth(struct intel_huc *huc)
struct intel_guc *guc = &gt->uc.guc;
int ret;
- GEM_BUG_ON(intel_huc_is_authenticated(huc));
-
if (!intel_uc_fw_is_loaded(&huc->fw))
return -ENOEXEC;
+ /* GSC will do the auth */
+ if (intel_huc_is_loaded_by_gsc(huc))
+ return -ENODEV;
+
ret = i915_inject_probe_error(gt->i915, -ENXIO);
if (ret)
goto fail;
- ret = intel_guc_auth_huc(guc,
- intel_guc_ggtt_offset(guc, huc->fw.rsa_data));
+ GEM_BUG_ON(intel_uc_fw_is_running(&huc->fw));
+
+ ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data));
if (ret) {
DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
goto fail;
@@ -133,6 +181,18 @@ fail:
return ret;
}
+static bool huc_is_authenticated(struct intel_huc *huc)
+{
+ struct intel_gt *gt = huc_to_gt(huc);
+ intel_wakeref_t wakeref;
+ u32 status = 0;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ status = intel_uncore_read(gt->uncore, huc->status.reg);
+
+ return (status & huc->status.mask) == huc->status.value;
+}
+
/**
* intel_huc_check_status() - check HuC status
* @huc: intel_huc structure
@@ -150,10 +210,6 @@ fail:
*/
int intel_huc_check_status(struct intel_huc *huc)
{
- struct intel_gt *gt = huc_to_gt(huc);
- intel_wakeref_t wakeref;
- u32 status = 0;
-
switch (__intel_uc_fw_status(&huc->fw)) {
case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
return -ENODEV;
@@ -167,10 +223,17 @@ int intel_huc_check_status(struct intel_huc *huc)
break;
}
- with_intel_runtime_pm(gt->uncore->rpm, wakeref)
- status = intel_uncore_read(gt->uncore, huc->status.reg);
+ return huc_is_authenticated(huc);
+}
- return (status & huc->status.mask) == huc->status.value;
+void intel_huc_update_auth_status(struct intel_huc *huc)
+{
+ if (!intel_uc_fw_is_loadable(&huc->fw))
+ return;
+
+ if (huc_is_authenticated(huc))
+ intel_uc_fw_change_status(&huc->fw,
+ INTEL_UC_FIRMWARE_RUNNING);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index 73ec670800f2..d7e25b6e879e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -27,6 +27,7 @@ int intel_huc_init(struct intel_huc *huc);
void intel_huc_fini(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc);
int intel_huc_check_status(struct intel_huc *huc);
+void intel_huc_update_auth_status(struct intel_huc *huc);
static inline int intel_huc_sanitize(struct intel_huc *huc)
{
@@ -50,9 +51,9 @@ static inline bool intel_huc_is_used(struct intel_huc *huc)
return intel_uc_fw_is_available(&huc->fw);
}
-static inline bool intel_huc_is_authenticated(struct intel_huc *huc)
+static inline bool intel_huc_is_loaded_by_gsc(const struct intel_huc *huc)
{
- return intel_uc_fw_is_running(&huc->fw);
+ return huc->fw.loaded_via_gsc;
}
void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index e5ef509c70e8..9d6ab1e01639 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -8,7 +8,7 @@
#include "i915_drv.h"
/**
- * intel_huc_fw_upload() - load HuC uCode to device
+ * intel_huc_fw_upload() - load HuC uCode to device via DMA transfer
* @huc: intel_huc structure
*
* Called from intel_uc_init_hw() during driver load, resume from sleep and
@@ -21,6 +21,9 @@
*/
int intel_huc_fw_upload(struct intel_huc *huc)
{
+ if (intel_huc_is_loaded_by_gsc(huc))
+ return -ENODEV;
+
/* HW doesn't look at destination address for HuC, so set it to 0 */
return intel_uc_fw_upload(&huc->fw, 0, HUC_UKERNEL);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index e8f099360e01..f2e7c82985ef 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -45,6 +45,10 @@ static void uc_expand_default_options(struct intel_uc *uc)
/* Default: enable HuC authentication and GuC submission */
i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION;
+
+ /* XEHPSDV and PVC do not use HuC */
+ if (IS_XEHPSDV(i915) || IS_PONTEVECCHIO(i915))
+ i915->params.enable_guc &= ~ENABLE_GUC_LOAD_HUC;
}
/* Reset GuC providing us with fresh state for both GuC and HuC.
@@ -323,17 +327,10 @@ static int __uc_init(struct intel_uc *uc)
if (ret)
return ret;
- if (intel_uc_uses_huc(uc)) {
- ret = intel_huc_init(huc);
- if (ret)
- goto out_guc;
- }
+ if (intel_uc_uses_huc(uc))
+ intel_huc_init(huc);
return 0;
-
-out_guc:
- intel_guc_fini(guc);
- return ret;
}
static void __uc_fini(struct intel_uc *uc)
@@ -509,7 +506,16 @@ static int __uc_init_hw(struct intel_uc *uc)
if (ret)
goto err_log_capture;
- intel_huc_auth(huc);
+ /*
+ * GSC-loaded HuC is authenticated by the GSC, so we don't need to
+ * trigger the auth here. However, given that the HuC loaded this way
+ * survive GT reset, we still need to update our SW bookkeeping to make
+ * sure it reflects the correct HW status.
+ */
+ if (intel_huc_is_loaded_by_gsc(huc))
+ intel_huc_update_auth_status(huc);
+ else
+ intel_huc_auth(huc);
if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_enable(guc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 703f42ba5ddd..56a0d80f88ba 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -335,62 +335,31 @@ static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
}
}
-/**
- * intel_uc_fw_fetch - fetch uC firmware
- * @uc_fw: uC firmware
- *
- * Fetch uC firmware into GEM obj.
- *
- * Return: 0 on success, a negative errno code on failure.
- */
-int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
+static int check_gsc_manifest(const struct firmware *fw,
+ struct intel_uc_fw *uc_fw)
{
- struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
- struct device *dev = i915->drm.dev;
- struct drm_i915_gem_object *obj;
- const struct firmware *fw = NULL;
- struct uc_css_header *css;
- size_t size;
- int err;
-
- GEM_BUG_ON(!i915->wopcm.size);
- GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
-
- err = i915_inject_probe_error(i915, -ENXIO);
- if (err)
- goto fail;
+ u32 *dw = (u32 *)fw->data;
+ u32 version = dw[HUC_GSC_VERSION_DW];
- __force_fw_fetch_failures(uc_fw, -EINVAL);
- __force_fw_fetch_failures(uc_fw, -ESTALE);
+ uc_fw->major_ver_found = FIELD_GET(HUC_GSC_MAJOR_VER_MASK, version);
+ uc_fw->minor_ver_found = FIELD_GET(HUC_GSC_MINOR_VER_MASK, version);
- err = firmware_request_nowarn(&fw, uc_fw->path, dev);
- if (err && !intel_uc_fw_is_overridden(uc_fw) && uc_fw->fallback.path) {
- err = firmware_request_nowarn(&fw, uc_fw->fallback.path, dev);
- if (!err) {
- drm_notice(&i915->drm,
- "%s firmware %s is recommended, but only %s was found\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->wanted_path,
- uc_fw->fallback.path);
- drm_info(&i915->drm,
- "Consider updating your linux-firmware pkg or downloading from %s\n",
- INTEL_UC_FIRMWARE_URL);
+ return 0;
+}
- uc_fw->path = uc_fw->fallback.path;
- uc_fw->major_ver_wanted = uc_fw->fallback.major_ver;
- uc_fw->minor_ver_wanted = uc_fw->fallback.minor_ver;
- }
- }
- if (err)
- goto fail;
+static int check_ccs_header(struct drm_i915_private *i915,
+ const struct firmware *fw,
+ struct intel_uc_fw *uc_fw)
+{
+ struct uc_css_header *css;
+ size_t size;
/* Check the size of the blob before examining buffer contents */
if (unlikely(fw->size < sizeof(struct uc_css_header))) {
drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
fw->size, sizeof(struct uc_css_header));
- err = -ENODATA;
- goto fail;
+ return -ENODATA;
}
css = (struct uc_css_header *)fw->data;
@@ -403,8 +372,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
"%s firmware %s: unexpected header size: %zu != %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
fw->size, sizeof(struct uc_css_header));
- err = -EPROTO;
- goto fail;
+ return -EPROTO;
}
/* uCode size must calculated from other sizes */
@@ -419,8 +387,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
fw->size, size);
- err = -ENOEXEC;
- goto fail;
+ return -ENOEXEC;
}
/* Sanity check whether this fw is not larger than whole WOPCM memory */
@@ -429,8 +396,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
size, (size_t)i915->wopcm.size);
- err = -E2BIG;
- goto fail;
+ return -E2BIG;
}
/* Get version numbers from the CSS header */
@@ -439,6 +405,66 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
css->sw_version);
+ if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
+ uc_fw->private_data_size = css->private_data_size;
+
+ return 0;
+}
+
+/**
+ * intel_uc_fw_fetch - fetch uC firmware
+ * @uc_fw: uC firmware
+ *
+ * Fetch uC firmware into GEM obj.
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
+ struct device *dev = i915->drm.dev;
+ struct drm_i915_gem_object *obj;
+ const struct firmware *fw = NULL;
+ int err;
+
+ GEM_BUG_ON(!i915->wopcm.size);
+ GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
+
+ err = i915_inject_probe_error(i915, -ENXIO);
+ if (err)
+ goto fail;
+
+ __force_fw_fetch_failures(uc_fw, -EINVAL);
+ __force_fw_fetch_failures(uc_fw, -ESTALE);
+
+ err = firmware_request_nowarn(&fw, uc_fw->path, dev);
+ if (err && !intel_uc_fw_is_overridden(uc_fw) && uc_fw->fallback.path) {
+ err = firmware_request_nowarn(&fw, uc_fw->fallback.path, dev);
+ if (!err) {
+ drm_notice(&i915->drm,
+ "%s firmware %s is recommended, but only %s was found\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->wanted_path,
+ uc_fw->fallback.path);
+ drm_info(&i915->drm,
+ "Consider updating your linux-firmware pkg or downloading from %s\n",
+ INTEL_UC_FIRMWARE_URL);
+
+ uc_fw->path = uc_fw->fallback.path;
+ uc_fw->major_ver_wanted = uc_fw->fallback.major_ver;
+ uc_fw->minor_ver_wanted = uc_fw->fallback.minor_ver;
+ }
+ }
+ if (err)
+ goto fail;
+
+ if (uc_fw->loaded_via_gsc)
+ err = check_gsc_manifest(fw, uc_fw);
+ else
+ err = check_ccs_header(i915, fw, uc_fw);
+ if (err)
+ goto fail;
+
if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
@@ -451,9 +477,6 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
}
}
- if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
- uc_fw->private_data_size = css->private_data_size;
-
if (HAS_LMEM(i915)) {
obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
if (!IS_ERR(obj))
@@ -521,7 +544,10 @@ static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
if (i915_gem_object_is_lmem(obj))
pte_flags |= PTE_LM;
- ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
+ if (ggtt->vm.raw_insert_entries)
+ ggtt->vm.raw_insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
+ else
+ ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
}
static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 562acdf88adb..7aa2644400b9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -109,6 +109,8 @@ struct intel_uc_fw {
u32 ucode_size;
u32 private_data_size;
+
+ bool loaded_via_gsc;
};
#ifdef CONFIG_DRM_I915_DEBUG_GUC
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
index e41ffc7a7fbc..b05e0e35b734 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
@@ -39,6 +39,11 @@
* 3. Length info of each component can be found in header, in dwords.
* 4. Modulus and exponent key are not required by driver. They may not appear
* in fw. So driver will load a truncated firmware in this case.
+ *
+ * Starting from DG2, the HuC is loaded by the GSC instead of i915. The GSC
+ * firmware performs all the required integrity checks, we just need to check
+ * the version. Note that the header for GSC-managed blobs is different from the
+ * CSS used for dma-loaded firmwares.
*/
struct uc_css_header {
@@ -78,4 +83,8 @@ struct uc_css_header {
} __packed;
static_assert(sizeof(struct uc_css_header) == 128);
+#define HUC_GSC_VERSION_DW 44
+#define HUC_GSC_MAJOR_VER_MASK (0xFF << 0)
+#define HUC_GSC_MINOR_VER_MASK (0xFF << 16)
+
#endif /* _INTEL_UC_FW_ABI_H */
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 1c35a41620ae..de13f102d4fd 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -428,7 +428,7 @@ struct cmd_info {
#define R_VECS BIT(VECS0)
#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
/* rings that support this cmd: BLT/RCS/VCS/VECS */
- u16 rings;
+ intel_engine_mask_t rings;
/* devices that support this cmd: SNB/IVB/HSW/... */
u16 devices;
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index ee2b3a375362..7412abf166a8 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -974,7 +974,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
llist_add(barrier_to_ll(node), &engine->barrier_tasks);
- intel_engine_pm_put_delay(engine, 1);
+ intel_engine_pm_put_delay(engine, 2);
}
}
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 1041b5340465..deb8a8b76965 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -100,6 +100,9 @@
#include "intel_region_ttm.h"
#include "vlv_suspend.h"
+/* Intel Rapid Start Technology ACPI device name */
+static const char irst_name[] = "INT3392";
+
static const struct drm_driver i915_drm_driver;
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
@@ -520,6 +523,22 @@ mask_err:
return ret;
}
+static int i915_pcode_init(struct drm_i915_private *i915)
+{
+ struct intel_gt *gt;
+ int id, ret;
+
+ for_each_gt(gt, i915, id) {
+ ret = intel_pcode_init(gt->uncore);
+ if (ret) {
+ drm_err(&gt->i915->drm, "gt%d: intel_pcode_init failed %d\n", id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/**
* i915_driver_hw_probe - setup state requiring device access
* @dev_priv: device private
@@ -630,7 +649,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
intel_opregion_setup(dev_priv);
- ret = intel_pcode_init(dev_priv);
+ ret = i915_pcode_init(dev_priv);
if (ret)
goto err_msi;
@@ -828,8 +847,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- const struct intel_device_info *match_info =
- (struct intel_device_info *)ent->driver_data;
struct drm_i915_private *i915;
int ret;
@@ -838,7 +855,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return PTR_ERR(i915);
/* Disable nuclear pageflip by default on pre-ILK */
- if (!i915->params.nuclear_pageflip && match_info->graphics.ver < 5)
+ if (!i915->params.nuclear_pageflip && DISPLAY_VER(i915) < 5)
i915->drm.driver_features &= ~DRIVER_ATOMIC;
ret = pci_enable_device(pdev);
@@ -1066,8 +1083,6 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_runtime_pm_disable(&i915->runtime_pm);
intel_power_domains_disable(i915);
- i915_gem_suspend(i915);
-
if (HAS_DISPLAY(i915)) {
drm_kms_helper_poll_disable(&i915->drm);
@@ -1084,6 +1099,8 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_dmc_ucode_suspend(i915);
+ i915_gem_suspend(i915);
+
/*
* The only requirement is to reboot with display DC states disabled,
* for now leaving all display power wells in the INIT power domain
@@ -1167,6 +1184,8 @@ static int i915_drm_suspend(struct drm_device *dev)
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+ i915_gem_drain_freed_objects(dev_priv);
+
return 0;
}
@@ -1258,7 +1277,7 @@ static int i915_drm_resume(struct drm_device *dev)
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- ret = intel_pcode_init(dev_priv);
+ ret = i915_pcode_init(dev_priv);
if (ret)
return ret;
@@ -1430,6 +1449,8 @@ static int i915_pm_suspend(struct device *kdev)
return -ENODEV;
}
+ i915_ggtt_mark_pte_lost(i915, false);
+
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -1482,6 +1503,14 @@ static int i915_pm_resume(struct device *kdev)
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
+ /*
+ * If IRST is enabled, or if we can't detect whether it's enabled,
+ * then we must assume we lost the GGTT page table entries, since
+ * they are not retained if IRST decided to enter S4.
+ */
+ if (!IS_ENABLED(CONFIG_ACPI) || acpi_dev_present(irst_name, NULL, -1))
+ i915_ggtt_mark_pte_lost(i915, true);
+
return i915_drm_resume(&i915->drm);
}
@@ -1541,6 +1570,9 @@ static int i915_pm_restore_early(struct device *kdev)
static int i915_pm_restore(struct device *kdev)
{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ i915_ggtt_mark_pte_lost(i915, true);
return i915_pm_resume(kdev);
}
@@ -1553,7 +1585,7 @@ static int intel_runtime_suspend(struct device *kdev)
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
- drm_dbg_kms(&dev_priv->drm, "Suspending device\n");
+ drm_dbg(&dev_priv->drm, "Suspending device\n");
disable_rpm_wakeref_asserts(rpm);
@@ -1623,7 +1655,7 @@ static int intel_runtime_suspend(struct device *kdev)
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_poll_enable(dev_priv);
- drm_dbg_kms(&dev_priv->drm, "Device suspended\n");
+ drm_dbg(&dev_priv->drm, "Device suspended\n");
return 0;
}
@@ -1636,7 +1668,7 @@ static int intel_runtime_resume(struct device *kdev)
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
- drm_dbg_kms(&dev_priv->drm, "Resuming device\n");
+ drm_dbg(&dev_priv->drm, "Resuming device\n");
drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
disable_rpm_wakeref_asserts(rpm);
@@ -1679,7 +1711,7 @@ static int intel_runtime_resume(struct device *kdev)
drm_err(&dev_priv->drm,
"Runtime resume failed, disabling it (%d)\n", ret);
else
- drm_dbg_kms(&dev_priv->drm, "Device resumed\n");
+ drm_dbg(&dev_priv->drm, "Device resumed\n");
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_drm_client.h b/drivers/gpu/drm/i915/i915_drm_client.h
index f796c5e8e060..69496af996d9 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.h
+++ b/drivers/gpu/drm/i915/i915_drm_client.h
@@ -11,7 +11,7 @@
#include <linux/spinlock.h>
#include <linux/xarray.h>
-#include "gt/intel_engine_types.h"
+#include <uapi/drm/i915_drm.h>
#define I915_LAST_UABI_ENGINE_CLASS I915_ENGINE_CLASS_COMPUTE
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 00d7eeae33bd..d25647be25d1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -37,7 +37,6 @@
#include <drm/drm_connector.h>
#include <drm/ttm/ttm_device.h>
-#include "display/intel_bios.h"
#include "display/intel_cdclk.h"
#include "display/intel_display.h"
#include "display/intel_display_power.h"
@@ -194,12 +193,6 @@ struct drm_i915_display_funcs {
#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
-enum drrs_type {
- DRRS_TYPE_NONE,
- DRRS_TYPE_STATIC,
- DRRS_TYPE_SEAMLESS,
-};
-
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
@@ -308,76 +301,19 @@ struct intel_vbt_data {
/* bdb version */
u16 version;
- struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
- struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
-
/* Feature bits */
unsigned int int_tv_support:1;
- unsigned int lvds_dither:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int int_lvds_support:1;
unsigned int display_clock_mode:1;
unsigned int fdi_rx_polarity_inverted:1;
- unsigned int panel_type:4;
int lvds_ssc_freq;
- unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
enum drm_panel_orientation orientation;
bool override_afc_startup;
u8 override_afc_startup_val;
- u8 seamless_drrs_min_refresh_rate;
- enum drrs_type drrs_type;
-
- struct {
- int rate;
- int lanes;
- int preemphasis;
- int vswing;
- int bpp;
- struct edp_power_seq pps;
- u8 drrs_msa_timing_delay;
- bool low_vswing;
- bool initialized;
- bool hobl;
- } edp;
-
- struct {
- bool enable;
- bool full_link;
- bool require_aux_wakeup;
- int idle_frames;
- int tp1_wakeup_time_us;
- int tp2_tp3_wakeup_time_us;
- int psr2_tp2_tp3_wakeup_time_us;
- } psr;
-
- struct {
- u16 pwm_freq_hz;
- u16 brightness_precision_bits;
- bool present;
- bool active_low_pwm;
- u8 min_brightness; /* min_brightness/255 of max */
- u8 controller; /* brightness controller number */
- enum intel_backlight_type type;
- } backlight;
-
- /* MIPI DSI */
- struct {
- u16 panel_id;
- struct mipi_config *config;
- struct mipi_pps_data *pps;
- u16 bl_ports;
- u16 cabc_ports;
- u8 seq_version;
- u32 size;
- u8 *data;
- const u8 *sequence[MIPI_SEQ_MAX];
- u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
- enum drm_panel_orientation orientation;
- } dsi;
-
int crt_ddc_pin;
struct list_head display_devices;
@@ -943,6 +879,7 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
#define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step)
#define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step)
#define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step)
+#define INTEL_BASEDIE_STEP(__i915) (RUNTIME_INFO(__i915)->step.basedie_step)
#define IS_DISPLAY_STEP(__i915, since, until) \
(drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
@@ -956,6 +893,10 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
(drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \
INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until))
+#define IS_BASEDIE_STEP(__i915, since, until) \
+ (drm_WARN_ON(&(__i915)->drm, INTEL_BASEDIE_STEP(__i915) == STEP_NONE), \
+ INTEL_BASEDIE_STEP(__i915) >= (since) && INTEL_BASEDIE_STEP(__i915) < (until))
+
static __always_inline unsigned int
__platform_mask_index(const struct intel_runtime_info *info,
enum intel_platform p)
@@ -1064,7 +1005,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_XEHPSDV(dev_priv) IS_PLATFORM(dev_priv, INTEL_XEHPSDV)
#define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG2)
#define IS_PONTEVECCHIO(dev_priv) IS_PLATFORM(dev_priv, INTEL_PONTEVECCHIO)
+#define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_METEORLAKE)
+#define IS_METEORLAKE_M(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_METEORLAKE, INTEL_SUBPLATFORM_M)
+#define IS_METEORLAKE_P(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_METEORLAKE, INTEL_SUBPLATFORM_P)
#define IS_DG2_G10(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10)
#define IS_DG2_G11(dev_priv) \
@@ -1208,6 +1154,14 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_DG2(__i915) && \
IS_DISPLAY_STEP(__i915, since, until))
+#define IS_PVC_BD_STEP(__i915, since, until) \
+ (IS_PONTEVECCHIO(__i915) && \
+ IS_BASEDIE_STEP(__i915, since, until))
+
+#define IS_PVC_CT_STEP(__i915, since, until) \
+ (IS_PONTEVECCHIO(__i915) && \
+ IS_GRAPHICS_STEP(__i915, since, until))
+
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
#define IS_GEN9_LP(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && IS_LP(dev_priv))
#define IS_GEN9_BC(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && !IS_LP(dev_priv))
@@ -1223,6 +1177,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
})
#define RCS_MASK(gt) \
ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS)
+#define BCS_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, BCS0, I915_MAX_BCS)
#define VDBOX_MASK(gt) \
ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
#define VEBOX_MASK(gt) \
@@ -1230,6 +1186,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define CCS_MASK(gt) \
ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS)
+#define HAS_MEDIA_RATIO_MODE(dev_priv) (INTEL_INFO(dev_priv)->has_media_ratio_mode)
+
/*
* The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
* All later gens can run the final buffer from the ppgtt
@@ -1329,9 +1287,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
-#define HAS_MSLICES(dev_priv) \
- (INTEL_INFO(dev_priv)->has_mslices)
-
/*
* Set this flag, when platform requires 64K GTT page sizes or larger for
* device local memory access.
@@ -1370,6 +1325,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_LSPCON(dev_priv) (IS_DISPLAY_VER(dev_priv, 9, 10))
+#define HAS_L3_CCS_READ(i915) (INTEL_INFO(i915)->has_l3_ccs_read)
+
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
@@ -1388,7 +1345,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
/* Only valid when HAS_DISPLAY() is true */
#define INTEL_DISPLAY_ENABLED(dev_priv) \
- (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
+ (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), \
+ !(dev_priv)->params.disable_display && \
+ !intel_opregion_headless_sku(dev_priv))
#define HAS_GUC_DEPRIVILEGE(dev_priv) \
(INTEL_INFO(dev_priv)->has_guc_deprivilege)
@@ -1401,6 +1360,10 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915))
+#define HAS_3D_PIPELINE(i915) (INTEL_INFO(i915)->has_3d_pipeline)
+
+#define HAS_ONE_EU_PER_FUSE_BIT(i915) (INTEL_INFO(i915)->has_one_eu_per_fuse_bit)
+
/* i915_gem.c */
void i915_gem_init_early(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index d0752e5553db..68d8d52bd541 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -26,7 +26,6 @@
#define __I915_GEM_H__
#include <linux/bug.h>
-#include <linux/interrupt.h>
#include <drm/drm_drv.h>
@@ -54,9 +53,6 @@ struct drm_i915_private;
} while(0)
#define GEM_WARN_ON(expr) WARN_ON(expr)
-#define GEM_DEBUG_DECL(var) var
-#define GEM_DEBUG_EXEC(expr) expr
-#define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr)
#define GEM_DEBUG_WARN_ON(expr) GEM_WARN_ON(expr)
#else
@@ -66,9 +62,6 @@ struct drm_i915_private;
#define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#define GEM_WARN_ON(expr) ({ unlikely(!!(expr)); })
-#define GEM_DEBUG_DECL(var)
-#define GEM_DEBUG_EXEC(expr) do { } while (0)
-#define GEM_DEBUG_BUG_ON(expr)
#define GEM_DEBUG_WARN_ON(expr) ({ BUILD_BUG_ON_INVALID(expr); 0; })
#endif
@@ -91,36 +84,4 @@ struct drm_i915_private;
#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
-static inline void tasklet_lock(struct tasklet_struct *t)
-{
- while (!tasklet_trylock(t))
- cpu_relax();
-}
-
-static inline bool tasklet_is_locked(const struct tasklet_struct *t)
-{
- return test_bit(TASKLET_STATE_RUN, &t->state);
-}
-
-static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
-{
- if (!atomic_fetch_inc(&t->count))
- tasklet_unlock_spin_wait(t);
-}
-
-static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
-{
- return !atomic_read(&t->count);
-}
-
-static inline bool __tasklet_enable(struct tasklet_struct *t)
-{
- return atomic_dec_and_test(&t->count);
-}
-
-static inline bool __tasklet_is_scheduled(struct tasklet_struct *t)
-{
- return test_bit(TASKLET_STATE_SCHED, &t->state);
-}
-
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index c12a0adefda5..6fd15b39570c 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -148,14 +148,21 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = intel_engines_has_context_isolation(i915);
break;
case I915_PARAM_SLICE_MASK:
+ /* Not supported from Xe_HP onward; use topology queries */
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ return -EINVAL;
+
value = sseu->slice_mask;
if (!value)
return -ENODEV;
break;
case I915_PARAM_SUBSLICE_MASK:
+ /* Not supported from Xe_HP onward; use topology queries */
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ return -EINVAL;
+
/* Only copy bits from the first slice */
- memcpy(&value, sseu->subslice_mask,
- min(sseu->ss_stride, (u8)sizeof(value)));
+ value = intel_sseu_get_hsw_subslices(sseu, 0);
if (!value)
return -ENODEV;
break;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0512c66fa4f3..32e92651ef7c 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -46,6 +46,7 @@
#include "gem/i915_gem_lmem.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_regs.h"
#include "gt/uc/intel_guc_capture.h"
@@ -436,7 +437,6 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
static void error_print_instdone(struct drm_i915_error_state_buf *m,
const struct intel_engine_coredump *ee)
{
- const struct sseu_dev_info *sseu = &ee->engine->gt->info.sseu;
int slice;
int subslice;
int iter;
@@ -453,33 +453,21 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
if (GRAPHICS_VER(m->i915) <= 6)
return;
- if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 50)) {
- for_each_instdone_gslice_dss_xehp(m->i915, sseu, iter, slice, subslice)
- err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
- slice, subslice,
- ee->instdone.sampler[slice][subslice]);
+ for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
+ err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.sampler[slice][subslice]);
- for_each_instdone_gslice_dss_xehp(m->i915, sseu, iter, slice, subslice)
- err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
- slice, subslice,
- ee->instdone.row[slice][subslice]);
- } else {
- for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
- err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
- slice, subslice,
- ee->instdone.sampler[slice][subslice]);
-
- for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
- err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
- slice, subslice,
- ee->instdone.row[slice][subslice]);
- }
+ for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
+ err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.row[slice][subslice]);
if (GRAPHICS_VER(m->i915) < 12)
return;
if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 55)) {
- for_each_instdone_gslice_dss_xehp(m->i915, sseu, iter, slice, subslice)
+ for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
err_printf(m, " GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice,
ee->instdone.geom_svg[slice][subslice]);
@@ -581,6 +569,15 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
}
+ if (GRAPHICS_VER(m->i915) >= 11) {
+ err_printf(m, " NOPID: 0x%08x\n", ee->nopid);
+ err_printf(m, " EXCC: 0x%08x\n", ee->excc);
+ err_printf(m, " CMD_CCTL: 0x%08x\n", ee->cmd_cctl);
+ err_printf(m, " CSCMDOP: 0x%08x\n", ee->cscmdop);
+ err_printf(m, " CTX_SR_CTL: 0x%08x\n", ee->ctx_sr_ctl);
+ err_printf(m, " DMA_FADDR_HI: 0x%08x\n", ee->dma_faddr_hi);
+ err_printf(m, " DMA_FADDR_LO: 0x%08x\n", ee->dma_faddr_lo);
+ }
if (HAS_PPGTT(m->i915)) {
err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
@@ -1095,8 +1092,12 @@ i915_vma_coredump_create(const struct intel_gt *gt,
for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
mutex_lock(&ggtt->error_mutex);
- ggtt->vm.insert_page(&ggtt->vm, dma, slot,
- I915_CACHE_NONE, 0);
+ if (ggtt->vm.raw_insert_page)
+ ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot,
+ I915_CACHE_NONE, 0);
+ else
+ ggtt->vm.insert_page(&ggtt->vm, dma, slot,
+ I915_CACHE_NONE, 0);
mb();
s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
@@ -1116,11 +1117,15 @@ i915_vma_coredump_create(const struct intel_gt *gt,
dma_addr_t dma;
for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
+ dma_addr_t offset = dma - mem->region.start;
void __iomem *s;
- s = io_mapping_map_wc(&mem->iomap,
- dma - mem->region.start,
- PAGE_SIZE);
+ if (offset + PAGE_SIZE > mem->io_size) {
+ ret = -EINVAL;
+ break;
+ }
+
+ s = io_mapping_map_wc(&mem->iomap, offset, PAGE_SIZE);
ret = compress_page(compress,
(void __force *)s, dst,
true);
@@ -1224,6 +1229,16 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
ee->ipehr = ENGINE_READ(engine, IPEHR);
}
+ if (GRAPHICS_VER(i915) >= 11) {
+ ee->cmd_cctl = ENGINE_READ(engine, RING_CMD_CCTL);
+ ee->cscmdop = ENGINE_READ(engine, RING_CSCMDOP);
+ ee->ctx_sr_ctl = ENGINE_READ(engine, RING_CTX_SR_CTL);
+ ee->dma_faddr_hi = ENGINE_READ(engine, RING_DMA_FADD_UDW);
+ ee->dma_faddr_lo = ENGINE_READ(engine, RING_DMA_FADD);
+ ee->nopid = ENGINE_READ(engine, RING_NOPID);
+ ee->excc = ENGINE_READ(engine, RING_EXCC);
+ }
+
intel_engine_get_instdone(engine, &ee->instdone);
ee->instpm = ENGINE_READ(engine, RING_INSTPM);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index a611abacd9c2..55a143b92d10 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -84,6 +84,13 @@ struct intel_engine_coredump {
u32 fault_reg;
u64 faddr;
u32 rc_psmi; /* sleep state */
+ u32 nopid;
+ u32 excc;
+ u32 cmd_cctl;
+ u32 cscmdop;
+ u32 ctx_sr_ctl;
+ u32 dma_faddr_hi;
+ u32 dma_faddr_lo;
struct intel_instdone instdone;
/* GuC matched capture-lists info */
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 701fbc98afa0..6fc475a5db61 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -204,6 +204,8 @@ i915_param_named_unsafe(request_timeout_ms, uint, 0600,
i915_param_named_unsafe(lmem_size, uint, 0400,
"Set the lmem size(in MiB) for each region. (default: 0, all memory)");
+i915_param_named_unsafe(lmem_bar_size, uint, 0400,
+ "Set the lmem bar size(in MiB).");
static __always_inline void _print_param(struct drm_printer *p,
const char *name,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index b5e7ea45d191..2733cb6cfe09 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -74,6 +74,7 @@ struct drm_printer;
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \
param(unsigned int, request_timeout_ms, CONFIG_DRM_I915_REQUEST_TIMEOUT, CONFIG_DRM_I915_REQUEST_TIMEOUT ? 0600 : 0) \
param(unsigned int, lmem_size, 0, 0400) \
+ param(unsigned int, lmem_bar_size, 0, 0400) \
/* leave bools at the end to not create holes */ \
param(bool, enable_hangcheck, true, 0600) \
param(bool, load_detect_test, false, 0600) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index acf688b698c3..aacc10f2e73f 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -38,43 +38,43 @@
.display.ver = (x)
#define I845_PIPE_OFFSETS \
- .pipe_offsets = { \
+ .display.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
}, \
- .trans_offsets = { \
+ .display.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
}
#define I9XX_PIPE_OFFSETS \
- .pipe_offsets = { \
+ .display.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
}, \
- .trans_offsets = { \
+ .display.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
}
#define IVB_PIPE_OFFSETS \
- .pipe_offsets = { \
+ .display.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
[TRANSCODER_C] = PIPE_C_OFFSET, \
}, \
- .trans_offsets = { \
+ .display.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
}
#define HSW_PIPE_OFFSETS \
- .pipe_offsets = { \
+ .display.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
[TRANSCODER_C] = PIPE_C_OFFSET, \
[TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
}, \
- .trans_offsets = { \
+ .display.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
@@ -82,44 +82,44 @@
}
#define CHV_PIPE_OFFSETS \
- .pipe_offsets = { \
+ .display.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
[TRANSCODER_C] = CHV_PIPE_C_OFFSET, \
}, \
- .trans_offsets = { \
+ .display.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
[TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \
}
#define I845_CURSOR_OFFSETS \
- .cursor_offsets = { \
+ .display.cursor_offsets = { \
[PIPE_A] = CURSOR_A_OFFSET, \
}
#define I9XX_CURSOR_OFFSETS \
- .cursor_offsets = { \
+ .display.cursor_offsets = { \
[PIPE_A] = CURSOR_A_OFFSET, \
[PIPE_B] = CURSOR_B_OFFSET, \
}
#define CHV_CURSOR_OFFSETS \
- .cursor_offsets = { \
+ .display.cursor_offsets = { \
[PIPE_A] = CURSOR_A_OFFSET, \
[PIPE_B] = CURSOR_B_OFFSET, \
[PIPE_C] = CHV_CURSOR_C_OFFSET, \
}
#define IVB_CURSOR_OFFSETS \
- .cursor_offsets = { \
+ .display.cursor_offsets = { \
[PIPE_A] = CURSOR_A_OFFSET, \
[PIPE_B] = IVB_CURSOR_B_OFFSET, \
[PIPE_C] = IVB_CURSOR_C_OFFSET, \
}
#define TGL_CURSOR_OFFSETS \
- .cursor_offsets = { \
+ .display.cursor_offsets = { \
[PIPE_A] = CURSOR_A_OFFSET, \
[PIPE_B] = IVB_CURSOR_B_OFFSET, \
[PIPE_C] = IVB_CURSOR_C_OFFSET, \
@@ -127,30 +127,33 @@
}
#define I9XX_COLORS \
- .color = { .gamma_lut_size = 256 }
+ .display.color = { .gamma_lut_size = 256 }
#define I965_COLORS \
- .color = { .gamma_lut_size = 129, \
+ .display.color = { .gamma_lut_size = 129, \
.gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
}
#define ILK_COLORS \
- .color = { .gamma_lut_size = 1024 }
+ .display.color = { .gamma_lut_size = 1024 }
#define IVB_COLORS \
- .color = { .degamma_lut_size = 1024, .gamma_lut_size = 1024 }
+ .display.color = { .degamma_lut_size = 1024, .gamma_lut_size = 1024 }
#define CHV_COLORS \
- .color = { .degamma_lut_size = 65, .gamma_lut_size = 257, \
- .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
- .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
+ .display.color = { \
+ .degamma_lut_size = 65, .gamma_lut_size = 257, \
+ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
+ .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
}
#define GLK_COLORS \
- .color = { .degamma_lut_size = 33, .gamma_lut_size = 1024, \
- .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
- DRM_COLOR_LUT_EQUAL_CHANNELS, \
+ .display.color = { \
+ .degamma_lut_size = 33, .gamma_lut_size = 1024, \
+ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
+ DRM_COLOR_LUT_EQUAL_CHANNELS, \
}
#define ICL_COLORS \
- .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145, \
- .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
- DRM_COLOR_LUT_EQUAL_CHANNELS, \
- .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
+ .display.color = { \
+ .degamma_lut_size = 33, .gamma_lut_size = 262145, \
+ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
+ DRM_COLOR_LUT_EQUAL_CHANNELS, \
+ .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
}
/* Keep in gen based order, and chronological order within a gen */
@@ -171,6 +174,7 @@
.display.overlay_needs_physical = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
+ .has_3d_pipeline = 1, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
.platform_engine_mask = BIT(RCS0), \
@@ -190,6 +194,7 @@
.display.has_overlay = 1, \
.display.overlay_needs_physical = 1, \
.display.has_gmch = 1, \
+ .has_3d_pipeline = 1, \
.gpu_reset_clobbers_display = true, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
@@ -232,6 +237,7 @@ static const struct intel_device_info i865g_info = {
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
.platform_engine_mask = BIT(RCS0), \
+ .has_3d_pipeline = 1, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
.dma_mask_size = 32, \
@@ -323,6 +329,7 @@ static const struct intel_device_info pnv_m_info = {
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
.platform_engine_mask = BIT(RCS0), \
+ .has_3d_pipeline = 1, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
.dma_mask_size = 36, \
@@ -374,6 +381,7 @@ static const struct intel_device_info gm45_info = {
.display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
+ .has_3d_pipeline = 1, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
/* ilk does support rc6, but we do not implement [power] contexts */ \
@@ -405,6 +413,7 @@ static const struct intel_device_info ilk_m_info = {
.display.has_hotplug = 1, \
.display.fbc_mask = BIT(INTEL_FBC_A), \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+ .has_3d_pipeline = 1, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
@@ -456,6 +465,7 @@ static const struct intel_device_info snb_m_gt2_info = {
.display.has_hotplug = 1, \
.display.fbc_mask = BIT(INTEL_FBC_A), \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+ .has_3d_pipeline = 1, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
@@ -529,7 +539,7 @@ static const struct intel_device_info vlv_info = {
.has_snoop = true,
.has_coherent_ggtt = false,
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
- .display_mmio_offset = VLV_DISPLAY_BASE,
+ .display.mmio_offset = VLV_DISPLAY_BASE,
I9XX_PIPE_OFFSETS,
I9XX_CURSOR_OFFSETS,
I965_COLORS,
@@ -627,7 +637,7 @@ static const struct intel_device_info chv_info = {
.has_reset_engine = 1,
.has_snoop = true,
.has_coherent_ggtt = false,
- .display_mmio_offset = VLV_DISPLAY_BASE,
+ .display.mmio_offset = VLV_DISPLAY_BASE,
CHV_PIPE_OFFSETS,
CHV_CURSOR_OFFSETS,
CHV_COLORS,
@@ -649,8 +659,8 @@ static const struct intel_device_info chv_info = {
.display.has_ipc = 1, \
.display.has_psr = 1, \
.display.has_psr_hw_tracking = 1, \
- .dbuf.size = 896 - 4, /* 4 blocks for bypass path allocation */ \
- .dbuf.slice_mask = BIT(DBUF_S1)
+ .display.dbuf.size = 896 - 4, /* 4 blocks for bypass path allocation */ \
+ .display.dbuf.slice_mask = BIT(DBUF_S1)
#define SKL_PLATFORM \
GEN9_FEATURES, \
@@ -685,13 +695,14 @@ static const struct intel_device_info skl_gt4_info = {
#define GEN9_LP_FEATURES \
GEN(9), \
.is_lp = 1, \
- .dbuf.slice_mask = BIT(DBUF_S1), \
+ .display.dbuf.slice_mask = BIT(DBUF_S1), \
.display.has_hotplug = 1, \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
.display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \
+ .has_3d_pipeline = 1, \
.has_64bit_reloc = 1, \
.display.has_ddi = 1, \
.display.has_fpga_dbg = 1, \
@@ -722,14 +733,14 @@ static const struct intel_device_info skl_gt4_info = {
static const struct intel_device_info bxt_info = {
GEN9_LP_FEATURES,
PLATFORM(INTEL_BROXTON),
- .dbuf.size = 512 - 4, /* 4 blocks for bypass path allocation */
+ .display.dbuf.size = 512 - 4, /* 4 blocks for bypass path allocation */
};
static const struct intel_device_info glk_info = {
GEN9_LP_FEATURES,
PLATFORM(INTEL_GEMINILAKE),
.display.ver = 10,
- .dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */
+ .display.dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */
GLK_COLORS,
};
@@ -801,7 +812,7 @@ static const struct intel_device_info cml_gt2_info = {
.display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
- .pipe_offsets = { \
+ .display.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
[TRANSCODER_C] = PIPE_C_OFFSET, \
@@ -809,7 +820,7 @@ static const struct intel_device_info cml_gt2_info = {
[TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
[TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
}, \
- .trans_offsets = { \
+ .display.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
@@ -819,8 +830,8 @@ static const struct intel_device_info cml_gt2_info = {
}, \
GEN(11), \
ICL_COLORS, \
- .dbuf.size = 2048, \
- .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \
+ .display.dbuf.size = 2048, \
+ .display.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \
.display.has_dsc = 1, \
.has_coherent_ggtt = false, \
.has_logical_ring_elsq = 1
@@ -854,7 +865,7 @@ static const struct intel_device_info jsl_info = {
.display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
- .pipe_offsets = { \
+ .display.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
[TRANSCODER_C] = PIPE_C_OFFSET, \
@@ -862,7 +873,7 @@ static const struct intel_device_info jsl_info = {
[TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
[TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
}, \
- .trans_offsets = { \
+ .display.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
@@ -929,22 +940,15 @@ static const struct intel_device_info adl_s_info = {
.dma_mask_size = 39,
};
-#define XE_LPD_CURSOR_OFFSETS \
- .cursor_offsets = { \
- [PIPE_A] = CURSOR_A_OFFSET, \
- [PIPE_B] = IVB_CURSOR_B_OFFSET, \
- [PIPE_C] = IVB_CURSOR_C_OFFSET, \
- [PIPE_D] = TGL_CURSOR_D_OFFSET, \
- }
-
#define XE_LPD_FEATURES \
.display.abox_mask = GENMASK(1, 0), \
- .color = { .degamma_lut_size = 128, .gamma_lut_size = 1024, \
- .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
- DRM_COLOR_LUT_EQUAL_CHANNELS, \
+ .display.color = { \
+ .degamma_lut_size = 128, .gamma_lut_size = 1024, \
+ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
+ DRM_COLOR_LUT_EQUAL_CHANNELS, \
}, \
- .dbuf.size = 4096, \
- .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \
+ .display.dbuf.size = 4096, \
+ .display.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \
BIT(DBUF_S4), \
.display.has_ddi = 1, \
.display.has_dmc = 1, \
@@ -959,7 +963,7 @@ static const struct intel_device_info adl_s_info = {
.display.has_psr = 1, \
.display.ver = 13, \
.display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
- .pipe_offsets = { \
+ .display.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
[TRANSCODER_C] = PIPE_C_OFFSET, \
@@ -967,7 +971,7 @@ static const struct intel_device_info adl_s_info = {
[TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
[TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
}, \
- .trans_offsets = { \
+ .display.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
@@ -975,7 +979,7 @@ static const struct intel_device_info adl_s_info = {
[TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
[TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
}, \
- XE_LPD_CURSOR_OFFSETS
+ TGL_CURSOR_OFFSETS
static const struct intel_device_info adl_p_info = {
GEN12_FEATURES,
@@ -1005,6 +1009,7 @@ static const struct intel_device_info adl_p_info = {
.graphics.rel = 50, \
XE_HP_PAGE_SIZES, \
.dma_mask_size = 46, \
+ .has_3d_pipeline = 1, \
.has_64bit_reloc = 1, \
.has_flat_ccs = 1, \
.has_global_mocs = 1, \
@@ -1012,7 +1017,7 @@ static const struct intel_device_info adl_p_info = {
.has_llc = 1, \
.has_logical_ring_contexts = 1, \
.has_logical_ring_elsq = 1, \
- .has_mslices = 1, \
+ .has_mslice_steering = 1, \
.has_rc6 = 1, \
.has_reset_engine = 1, \
.has_rps = 1, \
@@ -1033,6 +1038,7 @@ static const struct intel_device_info xehpsdv_info = {
.display = { },
.has_64k_pages = 1,
.needs_compact_pt = 1,
+ .has_media_ratio_mode = 1,
.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) |
BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) |
@@ -1054,6 +1060,7 @@ static const struct intel_device_info xehpsdv_info = {
.has_guc_deprivilege = 1, \
.has_heci_pxp = 1, \
.needs_compact_pt = 1, \
+ .has_media_ratio_mode = 1, \
.platform_engine_mask = \
BIT(RCS0) | BIT(BCS0) | \
BIT(VECS0) | BIT(VECS1) | \
@@ -1068,7 +1075,6 @@ static const struct intel_device_info dg2_info = {
.require_force_probe = 1,
};
-__maybe_unused
static const struct intel_device_info ats_m_info = {
DG2_FEATURES,
.display = { 0 },
@@ -1077,7 +1083,12 @@ static const struct intel_device_info ats_m_info = {
#define XE_HPC_FEATURES \
XE_HP_FEATURES, \
- .dma_mask_size = 52
+ .dma_mask_size = 52, \
+ .has_3d_pipeline = 0, \
+ .has_guc_deprivilege = 1, \
+ .has_l3_ccs_read = 1, \
+ .has_mslice_steering = 0, \
+ .has_one_eu_per_fuse_bit = 1
__maybe_unused
static const struct intel_device_info pvc_info = {
@@ -1096,6 +1107,31 @@ static const struct intel_device_info pvc_info = {
.require_force_probe = 1,
};
+#define XE_LPDP_FEATURES \
+ XE_LPD_FEATURES, \
+ .display.ver = 14, \
+ .display.has_cdclk_crawl = 1
+
+__maybe_unused
+static const struct intel_device_info mtl_info = {
+ XE_HP_FEATURES,
+ XE_LPDP_FEATURES,
+ /*
+ * Real graphics IP version will be obtained from hardware GMD_ID
+ * register. Value provided here is just for sanity checking.
+ */
+ .graphics.ver = 12,
+ .graphics.rel = 70,
+ .media.ver = 13,
+ PLATFORM(INTEL_METEORLAKE),
+ .display.has_modular_fia = 1,
+ .has_flat_ccs = 0,
+ .has_snoop = 1,
+ .memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
+ .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0),
+ .require_force_probe = 1,
+};
+
#undef PLATFORM
/*
@@ -1177,6 +1213,8 @@ static const struct pci_device_id pciidlist[] = {
INTEL_RPLS_IDS(&adl_s_info),
INTEL_RPLP_IDS(&adl_p_info),
INTEL_DG2_IDS(&dg2_info),
+ INTEL_ATS_M_IDS(&ats_m_info),
+ INTEL_MTL_IDS(&mtl_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 1577ab6754db..f3c23fe9ad9c 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -885,8 +885,9 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
if (ret)
return ret;
- DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
- stream->period_exponent);
+ drm_dbg(&stream->perf->i915->drm,
+ "OA buffer overflow (exponent = %d): force restart\n",
+ stream->period_exponent);
stream->perf->ops.oa_disable(stream);
stream->perf->ops.oa_enable(stream);
@@ -1108,8 +1109,9 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
if (ret)
return ret;
- DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
- stream->period_exponent);
+ drm_dbg(&stream->perf->i915->drm,
+ "OA buffer overflow (exponent = %d): force restart\n",
+ stream->period_exponent);
stream->perf->ops.oa_disable(stream);
stream->perf->ops.oa_enable(stream);
@@ -2863,7 +2865,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
int ret;
if (!props->engine) {
- DRM_DEBUG("OA engine not specified\n");
+ drm_dbg(&stream->perf->i915->drm,
+ "OA engine not specified\n");
return -EINVAL;
}
@@ -2873,18 +2876,21 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* IDs
*/
if (!perf->metrics_kobj) {
- DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
+ drm_dbg(&stream->perf->i915->drm,
+ "OA metrics weren't advertised via sysfs\n");
return -EINVAL;
}
if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
(GRAPHICS_VER(perf->i915) < 12 || !stream->ctx)) {
- DRM_DEBUG("Only OA report sampling supported\n");
+ drm_dbg(&stream->perf->i915->drm,
+ "Only OA report sampling supported\n");
return -EINVAL;
}
if (!perf->ops.enable_metric_set) {
- DRM_DEBUG("OA unit not supported\n");
+ drm_dbg(&stream->perf->i915->drm,
+ "OA unit not supported\n");
return -ENODEV;
}
@@ -2894,12 +2900,14 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* we currently only allow exclusive access
*/
if (perf->exclusive_stream) {
- DRM_DEBUG("OA unit already in use\n");
+ drm_dbg(&stream->perf->i915->drm,
+ "OA unit already in use\n");
return -EBUSY;
}
if (!props->oa_format) {
- DRM_DEBUG("OA report format not specified\n");
+ drm_dbg(&stream->perf->i915->drm,
+ "OA report format not specified\n");
return -EINVAL;
}
@@ -2929,20 +2937,23 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
if (stream->ctx) {
ret = oa_get_render_ctx_id(stream);
if (ret) {
- DRM_DEBUG("Invalid context id to filter with\n");
+ drm_dbg(&stream->perf->i915->drm,
+ "Invalid context id to filter with\n");
return ret;
}
}
ret = alloc_noa_wait(stream);
if (ret) {
- DRM_DEBUG("Unable to allocate NOA wait batch buffer\n");
+ drm_dbg(&stream->perf->i915->drm,
+ "Unable to allocate NOA wait batch buffer\n");
goto err_noa_wait_alloc;
}
stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
if (!stream->oa_config) {
- DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
+ drm_dbg(&stream->perf->i915->drm,
+ "Invalid OA config id=%i\n", props->metrics_set);
ret = -EINVAL;
goto err_config;
}
@@ -2973,11 +2984,13 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
ret = i915_perf_stream_enable_sync(stream);
if (ret) {
- DRM_DEBUG("Unable to enable metric set\n");
+ drm_dbg(&stream->perf->i915->drm,
+ "Unable to enable metric set\n");
goto err_enable;
}
- DRM_DEBUG("opening stream oa config uuid=%s\n",
+ drm_dbg(&stream->perf->i915->drm,
+ "opening stream oa config uuid=%s\n",
stream->oa_config->uuid);
hrtimer_init(&stream->poll_check_timer,
@@ -3429,7 +3442,8 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
if (IS_ERR(specific_ctx)) {
- DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
+ drm_dbg(&perf->i915->drm,
+ "Failed to look up context with ID %u for opening perf stream\n",
ctx_handle);
ret = PTR_ERR(specific_ctx);
goto err;
@@ -3463,7 +3477,8 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
if (props->hold_preemption) {
if (!props->single_context) {
- DRM_DEBUG("preemption disable with no context\n");
+ drm_dbg(&perf->i915->drm,
+ "preemption disable with no context\n");
ret = -EINVAL;
goto err;
}
@@ -3485,7 +3500,8 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
*/
if (privileged_op &&
i915_perf_stream_paranoid && !perfmon_capable()) {
- DRM_DEBUG("Insufficient privileges to open i915 perf stream\n");
+ drm_dbg(&perf->i915->drm,
+ "Insufficient privileges to open i915 perf stream\n");
ret = -EACCES;
goto err_ctx;
}
@@ -3592,7 +3608,8 @@ static int read_properties_unlocked(struct i915_perf *perf,
props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
if (!n_props) {
- DRM_DEBUG("No i915 perf properties given\n");
+ drm_dbg(&perf->i915->drm,
+ "No i915 perf properties given\n");
return -EINVAL;
}
@@ -3601,7 +3618,8 @@ static int read_properties_unlocked(struct i915_perf *perf,
I915_ENGINE_CLASS_RENDER,
0);
if (!props->engine) {
- DRM_DEBUG("No RENDER-capable engines\n");
+ drm_dbg(&perf->i915->drm,
+ "No RENDER-capable engines\n");
return -EINVAL;
}
@@ -3612,7 +3630,8 @@ static int read_properties_unlocked(struct i915_perf *perf,
* from userspace.
*/
if (n_props >= DRM_I915_PERF_PROP_MAX) {
- DRM_DEBUG("More i915 perf properties specified than exist\n");
+ drm_dbg(&perf->i915->drm,
+ "More i915 perf properties specified than exist\n");
return -EINVAL;
}
@@ -3629,7 +3648,8 @@ static int read_properties_unlocked(struct i915_perf *perf,
return ret;
if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
- DRM_DEBUG("Unknown i915 perf property ID\n");
+ drm_dbg(&perf->i915->drm,
+ "Unknown i915 perf property ID\n");
return -EINVAL;
}
@@ -3644,19 +3664,22 @@ static int read_properties_unlocked(struct i915_perf *perf,
break;
case DRM_I915_PERF_PROP_OA_METRICS_SET:
if (value == 0) {
- DRM_DEBUG("Unknown OA metric set ID\n");
+ drm_dbg(&perf->i915->drm,
+ "Unknown OA metric set ID\n");
return -EINVAL;
}
props->metrics_set = value;
break;
case DRM_I915_PERF_PROP_OA_FORMAT:
if (value == 0 || value >= I915_OA_FORMAT_MAX) {
- DRM_DEBUG("Out-of-range OA report format %llu\n",
+ drm_dbg(&perf->i915->drm,
+ "Out-of-range OA report format %llu\n",
value);
return -EINVAL;
}
if (!oa_format_valid(perf, value)) {
- DRM_DEBUG("Unsupported OA report format %llu\n",
+ drm_dbg(&perf->i915->drm,
+ "Unsupported OA report format %llu\n",
value);
return -EINVAL;
}
@@ -3664,7 +3687,8 @@ static int read_properties_unlocked(struct i915_perf *perf,
break;
case DRM_I915_PERF_PROP_OA_EXPONENT:
if (value > OA_EXPONENT_MAX) {
- DRM_DEBUG("OA timer exponent too high (> %u)\n",
+ drm_dbg(&perf->i915->drm,
+ "OA timer exponent too high (> %u)\n",
OA_EXPONENT_MAX);
return -EINVAL;
}
@@ -3692,7 +3716,8 @@ static int read_properties_unlocked(struct i915_perf *perf,
oa_freq_hz = 0;
if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
- DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
+ drm_dbg(&perf->i915->drm,
+ "OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
i915_oa_max_sample_rate);
return -EACCES;
}
@@ -3706,16 +3731,25 @@ static int read_properties_unlocked(struct i915_perf *perf,
case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
struct drm_i915_gem_context_param_sseu user_sseu;
+ if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 50)) {
+ drm_dbg(&perf->i915->drm,
+ "SSEU config not supported on gfx %x\n",
+ GRAPHICS_VER_FULL(perf->i915));
+ return -ENODEV;
+ }
+
if (copy_from_user(&user_sseu,
u64_to_user_ptr(value),
sizeof(user_sseu))) {
- DRM_DEBUG("Unable to copy global sseu parameter\n");
+ drm_dbg(&perf->i915->drm,
+ "Unable to copy global sseu parameter\n");
return -EFAULT;
}
ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
if (ret) {
- DRM_DEBUG("Invalid SSEU configuration\n");
+ drm_dbg(&perf->i915->drm,
+ "Invalid SSEU configuration\n");
return ret;
}
props->has_sseu = true;
@@ -3723,7 +3757,8 @@ static int read_properties_unlocked(struct i915_perf *perf,
}
case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
if (value < 100000 /* 100us */) {
- DRM_DEBUG("OA availability timer too small (%lluns < 100us)\n",
+ drm_dbg(&perf->i915->drm,
+ "OA availability timer too small (%lluns < 100us)\n",
value);
return -EINVAL;
}
@@ -3774,7 +3809,8 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
int ret;
if (!perf->i915) {
- DRM_DEBUG("i915 perf interface not available for this system\n");
+ drm_dbg(&perf->i915->drm,
+ "i915 perf interface not available for this system\n");
return -ENOTSUPP;
}
@@ -3782,7 +3818,8 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
I915_PERF_FLAG_FD_NONBLOCK |
I915_PERF_FLAG_DISABLED;
if (param->flags & ~known_open_flags) {
- DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
+ drm_dbg(&perf->i915->drm,
+ "Unknown drm_i915_perf_open_param flag\n");
return -EINVAL;
}
@@ -4028,7 +4065,8 @@ static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
goto addr_err;
if (!is_valid(perf, addr)) {
- DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
+ drm_dbg(&perf->i915->drm,
+ "Invalid oa_reg address: %X\n", addr);
err = -EINVAL;
goto addr_err;
}
@@ -4102,30 +4140,35 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
int err, id;
if (!perf->i915) {
- DRM_DEBUG("i915 perf interface not available for this system\n");
+ drm_dbg(&perf->i915->drm,
+ "i915 perf interface not available for this system\n");
return -ENOTSUPP;
}
if (!perf->metrics_kobj) {
- DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
+ drm_dbg(&perf->i915->drm,
+ "OA metrics weren't advertised via sysfs\n");
return -EINVAL;
}
if (i915_perf_stream_paranoid && !perfmon_capable()) {
- DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
+ drm_dbg(&perf->i915->drm,
+ "Insufficient privileges to add i915 OA config\n");
return -EACCES;
}
if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
(!args->boolean_regs_ptr || !args->n_boolean_regs) &&
(!args->flex_regs_ptr || !args->n_flex_regs)) {
- DRM_DEBUG("No OA registers given\n");
+ drm_dbg(&perf->i915->drm,
+ "No OA registers given\n");
return -EINVAL;
}
oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
if (!oa_config) {
- DRM_DEBUG("Failed to allocate memory for the OA config\n");
+ drm_dbg(&perf->i915->drm,
+ "Failed to allocate memory for the OA config\n");
return -ENOMEM;
}
@@ -4133,7 +4176,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
kref_init(&oa_config->ref);
if (!uuid_is_valid(args->uuid)) {
- DRM_DEBUG("Invalid uuid format for OA config\n");
+ drm_dbg(&perf->i915->drm,
+ "Invalid uuid format for OA config\n");
err = -EINVAL;
goto reg_err;
}
@@ -4150,7 +4194,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
args->n_mux_regs);
if (IS_ERR(regs)) {
- DRM_DEBUG("Failed to create OA config for mux_regs\n");
+ drm_dbg(&perf->i915->drm,
+ "Failed to create OA config for mux_regs\n");
err = PTR_ERR(regs);
goto reg_err;
}
@@ -4163,7 +4208,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
args->n_boolean_regs);
if (IS_ERR(regs)) {
- DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
+ drm_dbg(&perf->i915->drm,
+ "Failed to create OA config for b_counter_regs\n");
err = PTR_ERR(regs);
goto reg_err;
}
@@ -4182,7 +4228,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
args->n_flex_regs);
if (IS_ERR(regs)) {
- DRM_DEBUG("Failed to create OA config for flex_regs\n");
+ drm_dbg(&perf->i915->drm,
+ "Failed to create OA config for flex_regs\n");
err = PTR_ERR(regs);
goto reg_err;
}
@@ -4198,7 +4245,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
*/
idr_for_each_entry(&perf->metrics_idr, tmp, id) {
if (!strcmp(tmp->uuid, oa_config->uuid)) {
- DRM_DEBUG("OA config already exists with this uuid\n");
+ drm_dbg(&perf->i915->drm,
+ "OA config already exists with this uuid\n");
err = -EADDRINUSE;
goto sysfs_err;
}
@@ -4206,7 +4254,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
err = create_dynamic_oa_sysfs_entry(perf, oa_config);
if (err) {
- DRM_DEBUG("Failed to create sysfs entry for OA config\n");
+ drm_dbg(&perf->i915->drm,
+ "Failed to create sysfs entry for OA config\n");
goto sysfs_err;
}
@@ -4215,14 +4264,16 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
oa_config, 2,
0, GFP_KERNEL);
if (oa_config->id < 0) {
- DRM_DEBUG("Failed to create sysfs entry for OA config\n");
+ drm_dbg(&perf->i915->drm,
+ "Failed to create sysfs entry for OA config\n");
err = oa_config->id;
goto sysfs_err;
}
mutex_unlock(&perf->metrics_lock);
- DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
+ drm_dbg(&perf->i915->drm,
+ "Added config %s id=%i\n", oa_config->uuid, oa_config->id);
return oa_config->id;
@@ -4230,7 +4281,8 @@ sysfs_err:
mutex_unlock(&perf->metrics_lock);
reg_err:
i915_oa_config_put(oa_config);
- DRM_DEBUG("Failed to add new OA config\n");
+ drm_dbg(&perf->i915->drm,
+ "Failed to add new OA config\n");
return err;
}
@@ -4254,12 +4306,14 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
int ret;
if (!perf->i915) {
- DRM_DEBUG("i915 perf interface not available for this system\n");
+ drm_dbg(&perf->i915->drm,
+ "i915 perf interface not available for this system\n");
return -ENOTSUPP;
}
if (i915_perf_stream_paranoid && !perfmon_capable()) {
- DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
+ drm_dbg(&perf->i915->drm,
+ "Insufficient privileges to remove i915 OA config\n");
return -EACCES;
}
@@ -4269,7 +4323,8 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
oa_config = idr_find(&perf->metrics_idr, *arg);
if (!oa_config) {
- DRM_DEBUG("Failed to remove unknown OA config\n");
+ drm_dbg(&perf->i915->drm,
+ "Failed to remove unknown OA config\n");
ret = -ENOENT;
goto err_unlock;
}
@@ -4282,7 +4337,8 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&perf->metrics_lock);
- DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
+ drm_dbg(&perf->i915->drm,
+ "Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
i915_oa_config_put(oa_config);
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 7584cec53d5d..6ec9c9fb7b0d 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -31,10 +31,12 @@ static int copy_query_item(void *query_hdr, size_t query_sz,
static int fill_topology_info(const struct sseu_dev_info *sseu,
struct drm_i915_query_item *query_item,
- const u8 *subslice_mask)
+ intel_sseu_ss_mask_t subslice_mask)
{
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
+ int ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
+ int eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
int ret;
BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
@@ -43,8 +45,8 @@ static int fill_topology_info(const struct sseu_dev_info *sseu,
return -ENODEV;
slice_length = sizeof(sseu->slice_mask);
- subslice_length = sseu->max_slices * sseu->ss_stride;
- eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride;
+ subslice_length = sseu->max_slices * ss_stride;
+ eu_length = sseu->max_slices * sseu->max_subslices * eu_stride;
total_length = sizeof(topo) + slice_length + subslice_length +
eu_length;
@@ -59,9 +61,9 @@ static int fill_topology_info(const struct sseu_dev_info *sseu,
topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
topo.subslice_offset = slice_length;
- topo.subslice_stride = sseu->ss_stride;
+ topo.subslice_stride = ss_stride;
topo.eu_offset = slice_length + subslice_length;
- topo.eu_stride = sseu->eu_stride;
+ topo.eu_stride = eu_stride;
if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
&topo, sizeof(topo)))
@@ -71,15 +73,15 @@ static int fill_topology_info(const struct sseu_dev_info *sseu,
&sseu->slice_mask, slice_length))
return -EFAULT;
- if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
- sizeof(topo) + slice_length),
- subslice_mask, subslice_length))
+ if (intel_sseu_copy_ssmask_to_user(u64_to_user_ptr(query_item->data_ptr +
+ sizeof(topo) + slice_length),
+ sseu))
return -EFAULT;
- if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
- sizeof(topo) +
- slice_length + subslice_length),
- sseu->eu_mask, eu_length))
+ if (intel_sseu_copy_eumask_to_user(u64_to_user_ptr(query_item->data_ptr +
+ sizeof(topo) +
+ slice_length + subslice_length),
+ sseu))
return -EFAULT;
return total_length;
@@ -496,7 +498,21 @@ static int query_memregion_info(struct drm_i915_private *i915,
info.region.memory_class = mr->type;
info.region.memory_instance = mr->instance;
info.probed_size = mr->total;
- info.unallocated_size = mr->avail;
+
+ if (mr->type == INTEL_MEMORY_LOCAL)
+ info.probed_cpu_visible_size = mr->io_size;
+ else
+ info.probed_cpu_visible_size = mr->total;
+
+ if (perfmon_capable()) {
+ intel_memory_region_avail(mr,
+ &info.unallocated_size,
+ &info.unallocated_cpu_visible_size);
+ } else {
+ info.unallocated_size = info.probed_size;
+ info.unallocated_cpu_visible_size =
+ info.probed_cpu_visible_size;
+ }
if (__copy_to_user(info_ptr, &info, sizeof(info)))
return -EFAULT;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4f5a51bb9e1e..3168d7007e10 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -115,7 +115,7 @@
* #define GEN8_BAR _MMIO(0xb888)
*/
-#define DISPLAY_MMIO_BASE(dev_priv) (INTEL_INFO(dev_priv)->display_mmio_offset)
+#define DISPLAY_MMIO_BASE(dev_priv) (INTEL_INFO(dev_priv)->display.mmio_offset)
/*
* Given the first two numbers __a and __b of arbitrarily many evenly spaced
@@ -161,16 +161,15 @@
* Device info offset array based helpers for groups of registers with unevenly
* spaced base offsets.
*/
-#define _MMIO_PIPE2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->pipe_offsets[pipe] - \
- INTEL_INFO(dev_priv)->pipe_offsets[PIPE_A] + (reg) + \
- DISPLAY_MMIO_BASE(dev_priv))
-#define _TRANS2(tran, reg) (INTEL_INFO(dev_priv)->trans_offsets[(tran)] - \
- INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + (reg) + \
- DISPLAY_MMIO_BASE(dev_priv))
-#define _MMIO_TRANS2(tran, reg) _MMIO(_TRANS2(tran, reg))
-#define _CURSOR2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->cursor_offsets[(pipe)] - \
- INTEL_INFO(dev_priv)->cursor_offsets[PIPE_A] + (reg) + \
- DISPLAY_MMIO_BASE(dev_priv))
+#define _MMIO_PIPE2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->display.pipe_offsets[(pipe)] - \
+ INTEL_INFO(dev_priv)->display.pipe_offsets[PIPE_A] + \
+ DISPLAY_MMIO_BASE(dev_priv) + (reg))
+#define _MMIO_TRANS2(tran, reg) _MMIO(INTEL_INFO(dev_priv)->display.trans_offsets[(tran)] - \
+ INTEL_INFO(dev_priv)->display.trans_offsets[TRANSCODER_A] + \
+ DISPLAY_MMIO_BASE(dev_priv) + (reg))
+#define _MMIO_CURSOR2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->display.cursor_offsets[(pipe)] - \
+ INTEL_INFO(dev_priv)->display.cursor_offsets[PIPE_A] + \
+ DISPLAY_MMIO_BASE(dev_priv) + (reg))
#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
#define _MASKED_FIELD(mask, value) ({ \
@@ -976,6 +975,14 @@
#define GEN12_COMPUTE2_RING_BASE 0x1e000
#define GEN12_COMPUTE3_RING_BASE 0x26000
#define BLT_RING_BASE 0x22000
+#define XEHPC_BCS1_RING_BASE 0x3e0000
+#define XEHPC_BCS2_RING_BASE 0x3e2000
+#define XEHPC_BCS3_RING_BASE 0x3e4000
+#define XEHPC_BCS4_RING_BASE 0x3e6000
+#define XEHPC_BCS5_RING_BASE 0x3e8000
+#define XEHPC_BCS6_RING_BASE 0x3ea000
+#define XEHPC_BCS7_RING_BASE 0x3ec000
+#define XEHPC_BCS8_RING_BASE 0x3ee000
#define DG1_GSC_HECI1_BASE 0x00258000
#define DG1_GSC_HECI2_BASE 0x00259000
#define DG2_GSC_HECI1_BASE 0x00373000
@@ -1846,6 +1853,7 @@
#define BXT_RP_STATE_CAP _MMIO(0x138170)
#define GEN9_RP_STATE_LIMITS _MMIO(0x138148)
#define XEHPSDV_RP_STATE_CAP _MMIO(0x250014)
+#define PVC_RP_STATE_CAP _MMIO(0x281014)
#define GT0_PERF_LIMIT_REASONS _MMIO(0x1381a8)
#define GT0_PERF_LIMIT_REASONS_MASK 0xde3
@@ -2162,7 +2170,7 @@
*/
#define _SRD_CTL_A 0x60800
#define _SRD_CTL_EDP 0x6f800
-#define EDP_PSR_CTL(tran) _MMIO(_TRANS2(tran, _SRD_CTL_A))
+#define EDP_PSR_CTL(tran) _MMIO_TRANS2(tran, _SRD_CTL_A)
#define EDP_PSR_ENABLE (1 << 31)
#define BDW_PSR_SINGLE_FRAME (1 << 30)
#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */
@@ -2208,11 +2216,11 @@
#define _SRD_AUX_DATA_A 0x60814
#define _SRD_AUX_DATA_EDP 0x6f814
-#define EDP_PSR_AUX_DATA(tran, i) _MMIO(_TRANS2(tran, _SRD_AUX_DATA_A) + (i) + 4) /* 5 registers */
+#define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(tran, _SRD_AUX_DATA_A + (i) + 4) /* 5 registers */
#define _SRD_STATUS_A 0x60840
#define _SRD_STATUS_EDP 0x6f840
-#define EDP_PSR_STATUS(tran) _MMIO(_TRANS2(tran, _SRD_STATUS_A))
+#define EDP_PSR_STATUS(tran) _MMIO_TRANS2(tran, _SRD_STATUS_A)
#define EDP_PSR_STATUS_STATE_MASK (7 << 29)
#define EDP_PSR_STATUS_STATE_SHIFT 29
#define EDP_PSR_STATUS_STATE_IDLE (0 << 29)
@@ -2239,13 +2247,13 @@
#define _SRD_PERF_CNT_A 0x60844
#define _SRD_PERF_CNT_EDP 0x6f844
-#define EDP_PSR_PERF_CNT(tran) _MMIO(_TRANS2(tran, _SRD_PERF_CNT_A))
+#define EDP_PSR_PERF_CNT(tran) _MMIO_TRANS2(tran, _SRD_PERF_CNT_A)
#define EDP_PSR_PERF_CNT_MASK 0xffffff
/* PSR_MASK on SKL+ */
#define _SRD_DEBUG_A 0x60860
#define _SRD_DEBUG_EDP 0x6f860
-#define EDP_PSR_DEBUG(tran) _MMIO(_TRANS2(tran, _SRD_DEBUG_A))
+#define EDP_PSR_DEBUG(tran) _MMIO_TRANS2(tran, _SRD_DEBUG_A)
#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28)
#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26)
@@ -2320,7 +2328,7 @@
#define _PSR2_SU_STATUS_A 0x60914
#define _PSR2_SU_STATUS_EDP 0x6f914
-#define _PSR2_SU_STATUS(tran, index) _MMIO(_TRANS2(tran, _PSR2_SU_STATUS_A) + (index) * 4)
+#define _PSR2_SU_STATUS(tran, index) _MMIO_TRANS2(tran, _PSR2_SU_STATUS_A + (index) * 4)
#define PSR2_SU_STATUS(tran, frame) (_PSR2_SU_STATUS(tran, (frame) / 3))
#define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10)
#define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame))
@@ -4319,12 +4327,12 @@
#define _CURBBASE_IVB 0x71084
#define _CURBPOS_IVB 0x71088
-#define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR)
-#define CURBASE(pipe) _CURSOR2(pipe, _CURABASE)
-#define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS)
-#define CURSIZE(pipe) _CURSOR2(pipe, _CURASIZE)
-#define CUR_FBC_CTL(pipe) _CURSOR2(pipe, _CUR_FBC_CTL_A)
-#define CURSURFLIVE(pipe) _CURSOR2(pipe, _CURASURFLIVE)
+#define CURCNTR(pipe) _MMIO_CURSOR2(pipe, _CURACNTR)
+#define CURBASE(pipe) _MMIO_CURSOR2(pipe, _CURABASE)
+#define CURPOS(pipe) _MMIO_CURSOR2(pipe, _CURAPOS)
+#define CURSIZE(pipe) _MMIO_CURSOR2(pipe, _CURASIZE)
+#define CUR_FBC_CTL(pipe) _MMIO_CURSOR2(pipe, _CUR_FBC_CTL_A)
+#define CURSURFLIVE(pipe) _MMIO_CURSOR2(pipe, _CURASURFLIVE)
#define CURSOR_A_OFFSET 0x70080
#define CURSOR_B_OFFSET 0x700c0
@@ -4399,7 +4407,7 @@
#define DSPLINOFF(plane) DSPADDR(plane)
#define DSPOFFSET(plane) _MMIO_PIPE2(plane, _DSPAOFFSET)
#define DSPSURFLIVE(plane) _MMIO_PIPE2(plane, _DSPASURFLIVE)
-#define DSPGAMC(plane, i) _MMIO(_PIPE2(plane, _DSPAGAMC) + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */
+#define DSPGAMC(plane, i) _MMIO_PIPE2(plane, _DSPAGAMC + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */
/* CHV pipe B blender and primary plane */
#define _CHV_BLEND_A 0x60a00
@@ -6689,6 +6697,9 @@
#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
#define GEN6_PCODE_READY (1 << 31)
+#define GEN6_PCODE_MB_PARAM2 REG_GENMASK(23, 16)
+#define GEN6_PCODE_MB_PARAM1 REG_GENMASK(15, 8)
+#define GEN6_PCODE_MB_COMMAND REG_GENMASK(7, 0)
#define GEN6_PCODE_ERROR_MASK 0xFF
#define GEN6_PCODE_SUCCESS 0x0
#define GEN6_PCODE_ILLEGAL_CMD 0x1
@@ -6755,6 +6766,14 @@
#define DG1_UNCORE_GET_INIT_STATUS 0x0
#define DG1_UNCORE_INIT_STATUS_COMPLETE 0x1
#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23
+#define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* xehpsdv, pvc */
+/* XEHP_PCODE_FREQUENCY_CONFIG sub-commands (param1) */
+#define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0
+#define PCODE_MBOX_FC_SC_READ_FUSED_PN 0x1
+/* PCODE_MBOX_DOMAIN_* - mailbox domain IDs */
+/* XEHP_PCODE_FREQUENCY_CONFIG param2 */
+#define PCODE_MBOX_DOMAIN_NONE 0x0
+#define PCODE_MBOX_DOMAIN_MEDIAFF 0x3
#define GEN6_PCODE_DATA _MMIO(0x138128)
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
@@ -6774,163 +6793,12 @@
(((reg) & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
#define GEN7_L3CDERRST1_ENABLE (1 << 7)
-/* Audio */
-#define G4X_AUD_VID_DID _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020)
-#define INTEL_AUDIO_DEVCL 0x808629FB
-#define INTEL_AUDIO_DEVBLC 0x80862801
-#define INTEL_AUDIO_DEVCTG 0x80862802
-
-#define G4X_AUD_CNTL_ST _MMIO(0x620B4)
-#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
-#define G4X_ELDV_DEVCTG (1 << 14)
-#define G4X_ELD_ADDR_MASK (0xf << 5)
-#define G4X_ELD_ACK (1 << 4)
-#define G4X_HDMIW_HDMIEDID _MMIO(0x6210C)
-
-#define _IBX_HDMIW_HDMIEDID_A 0xE2050
-#define _IBX_HDMIW_HDMIEDID_B 0xE2150
-#define IBX_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _IBX_HDMIW_HDMIEDID_A, \
- _IBX_HDMIW_HDMIEDID_B)
-#define _IBX_AUD_CNTL_ST_A 0xE20B4
-#define _IBX_AUD_CNTL_ST_B 0xE21B4
-#define IBX_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CNTL_ST_A, \
- _IBX_AUD_CNTL_ST_B)
-#define IBX_ELD_BUFFER_SIZE_MASK (0x1f << 10)
-#define IBX_ELD_ADDRESS_MASK (0x1f << 5)
-#define IBX_ELD_ACK (1 << 4)
-#define IBX_AUD_CNTL_ST2 _MMIO(0xE20C0)
-#define IBX_CP_READY(port) ((1 << 1) << (((port) - 1) * 4))
-#define IBX_ELD_VALID(port) ((1 << 0) << (((port) - 1) * 4))
-
-#define _CPT_HDMIW_HDMIEDID_A 0xE5050
-#define _CPT_HDMIW_HDMIEDID_B 0xE5150
-#define CPT_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _CPT_HDMIW_HDMIEDID_A, _CPT_HDMIW_HDMIEDID_B)
-#define _CPT_AUD_CNTL_ST_A 0xE50B4
-#define _CPT_AUD_CNTL_ST_B 0xE51B4
-#define CPT_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CNTL_ST_A, _CPT_AUD_CNTL_ST_B)
-#define CPT_AUD_CNTRL_ST2 _MMIO(0xE50C0)
-
-#define _VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
-#define _VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
-#define VLV_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _VLV_HDMIW_HDMIEDID_A, _VLV_HDMIW_HDMIEDID_B)
-#define _VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
-#define _VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
-#define VLV_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CNTL_ST_A, _VLV_AUD_CNTL_ST_B)
-#define VLV_AUD_CNTL_ST2 _MMIO(VLV_DISPLAY_BASE + 0x620C0)
-
/* These are the 4 32-bit write offset registers for each stream
* output buffer. It determines the offset from the
* 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
*/
#define GEN7_SO_WRITE_OFFSET(n) _MMIO(0x5280 + (n) * 4)
-#define _IBX_AUD_CONFIG_A 0xe2000
-#define _IBX_AUD_CONFIG_B 0xe2100
-#define IBX_AUD_CFG(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CONFIG_A, _IBX_AUD_CONFIG_B)
-#define _CPT_AUD_CONFIG_A 0xe5000
-#define _CPT_AUD_CONFIG_B 0xe5100
-#define CPT_AUD_CFG(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CONFIG_A, _CPT_AUD_CONFIG_B)
-#define _VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
-#define _VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
-#define VLV_AUD_CFG(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CONFIG_A, _VLV_AUD_CONFIG_B)
-
-#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
-#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
-#define AUD_CONFIG_UPPER_N_SHIFT 20
-#define AUD_CONFIG_UPPER_N_MASK (0xff << 20)
-#define AUD_CONFIG_LOWER_N_SHIFT 4
-#define AUD_CONFIG_LOWER_N_MASK (0xfff << 4)
-#define AUD_CONFIG_N_MASK (AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK)
-#define AUD_CONFIG_N(n) \
- (((((n) >> 12) & 0xff) << AUD_CONFIG_UPPER_N_SHIFT) | \
- (((n) & 0xfff) << AUD_CONFIG_LOWER_N_SHIFT))
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 (1 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 (2 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 (3 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 (4 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 (5 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 (6 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 (10 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 (11 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 (12 << 16)
-#define AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 (13 << 16)
-#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
-
-/* HSW Audio */
-#define _HSW_AUD_CONFIG_A 0x65000
-#define _HSW_AUD_CONFIG_B 0x65100
-#define HSW_AUD_CFG(trans) _MMIO_TRANS(trans, _HSW_AUD_CONFIG_A, _HSW_AUD_CONFIG_B)
-
-#define _HSW_AUD_MISC_CTRL_A 0x65010
-#define _HSW_AUD_MISC_CTRL_B 0x65110
-#define HSW_AUD_MISC_CTRL(trans) _MMIO_TRANS(trans, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
-
-#define _HSW_AUD_M_CTS_ENABLE_A 0x65028
-#define _HSW_AUD_M_CTS_ENABLE_B 0x65128
-#define HSW_AUD_M_CTS_ENABLE(trans) _MMIO_TRANS(trans, _HSW_AUD_M_CTS_ENABLE_A, _HSW_AUD_M_CTS_ENABLE_B)
-#define AUD_M_CTS_M_VALUE_INDEX (1 << 21)
-#define AUD_M_CTS_M_PROG_ENABLE (1 << 20)
-#define AUD_CONFIG_M_MASK 0xfffff
-
-#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
-#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
-#define HSW_AUD_DIP_ELD_CTRL(trans) _MMIO_TRANS(trans, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)
-
-/* Audio Digital Converter */
-#define _HSW_AUD_DIG_CNVT_1 0x65080
-#define _HSW_AUD_DIG_CNVT_2 0x65180
-#define AUD_DIG_CNVT(trans) _MMIO_TRANS(trans, _HSW_AUD_DIG_CNVT_1, _HSW_AUD_DIG_CNVT_2)
-#define DIP_PORT_SEL_MASK 0x3
-
-#define _HSW_AUD_EDID_DATA_A 0x65050
-#define _HSW_AUD_EDID_DATA_B 0x65150
-#define HSW_AUD_EDID_DATA(trans) _MMIO_TRANS(trans, _HSW_AUD_EDID_DATA_A, _HSW_AUD_EDID_DATA_B)
-
-#define HSW_AUD_PIPE_CONV_CFG _MMIO(0x6507c)
-#define HSW_AUD_PIN_ELD_CP_VLD _MMIO(0x650c0)
-#define AUDIO_INACTIVE(trans) ((1 << 3) << ((trans) * 4))
-#define AUDIO_OUTPUT_ENABLE(trans) ((1 << 2) << ((trans) * 4))
-#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4))
-#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4))
-
-#define _AUD_TCA_DP_2DOT0_CTRL 0x650bc
-#define _AUD_TCB_DP_2DOT0_CTRL 0x651bc
-#define AUD_DP_2DOT0_CTRL(trans) _MMIO_TRANS(trans, _AUD_TCA_DP_2DOT0_CTRL, _AUD_TCB_DP_2DOT0_CTRL)
-#define AUD_ENABLE_SDP_SPLIT REG_BIT(31)
-
-#define HSW_AUD_CHICKENBIT _MMIO(0x65f10)
-#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
-
-#define AUD_FREQ_CNTRL _MMIO(0x65900)
-#define AUD_PIN_BUF_CTL _MMIO(0x48414)
-#define AUD_PIN_BUF_ENABLE REG_BIT(31)
-
-#define AUD_TS_CDCLK_M _MMIO(0x65ea0)
-#define AUD_TS_CDCLK_M_EN REG_BIT(31)
-#define AUD_TS_CDCLK_N _MMIO(0x65ea4)
-
-/* Display Audio Config Reg */
-#define AUD_CONFIG_BE _MMIO(0x65ef0)
-#define HBLANK_EARLY_ENABLE_ICL(pipe) (0x1 << (20 - (pipe)))
-#define HBLANK_EARLY_ENABLE_TGL(pipe) (0x1 << (24 + (pipe)))
-#define HBLANK_START_COUNT_MASK(pipe) (0x7 << (3 + ((pipe) * 6)))
-#define HBLANK_START_COUNT(pipe, val) (((val) & 0x7) << (3 + ((pipe)) * 6))
-#define NUMBER_SAMPLES_PER_LINE_MASK(pipe) (0x3 << ((pipe) * 6))
-#define NUMBER_SAMPLES_PER_LINE(pipe, val) (((val) & 0x3) << ((pipe) * 6))
-
-#define HBLANK_START_COUNT_8 0
-#define HBLANK_START_COUNT_16 1
-#define HBLANK_START_COUNT_32 2
-#define HBLANK_START_COUNT_64 3
-#define HBLANK_START_COUNT_96 4
-#define HBLANK_START_COUNT_128 5
-
/*
* HSW - ICL power wells
*
@@ -8476,23 +8344,6 @@ enum skl_power_gate {
#define SGGI_DIS REG_BIT(15)
#define SGR_DIS REG_BIT(13)
-#define XEHPSDV_TILE0_ADDR_RANGE _MMIO(0x4900)
-#define XEHPSDV_TILE_LMEM_RANGE_SHIFT 8
-
-#define XEHPSDV_FLAT_CCS_BASE_ADDR _MMIO(0x4910)
-#define XEHPSDV_CCS_BASE_SHIFT 8
-
-/* gamt regs */
-#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
-#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */
-#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV 0x5FF101FF /* max/min for LRA1/2 */
-#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */
-#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */
-
-#define MMCD_MISC_CTRL _MMIO(0x4ddc) /* skl+ */
-#define MMCD_PCLA (1 << 31)
-#define MMCD_HOTSPOT_EN (1 << 27)
-
#define _ICL_PHY_MISC_A 0x64C00
#define _ICL_PHY_MISC_B 0x64C04
#define _DG2_PHY_MISC_TC1 0x64C14 /* TC1="PHY E" but offset as if "PHY F" */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 73d5195146b0..62fad16a55e8 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -60,7 +60,7 @@ static struct kmem_cache *slab_execute_cbs;
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
- return dev_name(to_request(fence)->engine->i915->drm.dev);
+ return dev_name(to_request(fence)->i915->drm.dev);
}
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
@@ -134,17 +134,42 @@ static void i915_fence_release(struct dma_fence *fence)
i915_sw_fence_fini(&rq->semaphore);
/*
- * Keep one request on each engine for reserved use under mempressure,
+ * Keep one request on each engine for reserved use under mempressure
* do not use with virtual engines as this really is only needed for
* kernel contexts.
+ *
+ * We do not hold a reference to the engine here and so have to be
+ * very careful in what rq->engine we poke. The virtual engine is
+ * referenced via the rq->context and we released that ref during
+ * i915_request_retire(), ergo we must not dereference a virtual
+ * engine here. Not that we would want to, as the only consumer of
+ * the reserved engine->request_pool is the power management parking,
+ * which must-not-fail, and that is only run on the physical engines.
+ *
+ * Since the request must have been executed to be have completed,
+ * we know that it will have been processed by the HW and will
+ * not be unsubmitted again, so rq->engine and rq->execution_mask
+ * at this point is stable. rq->execution_mask will be a single
+ * bit if the last and _only_ engine it could execution on was a
+ * physical engine, if it's multiple bits then it started on and
+ * could still be on a virtual engine. Thus if the mask is not a
+ * power-of-two we assume that rq->engine may still be a virtual
+ * engine and so a dangling invalid pointer that we cannot dereference
+ *
+ * For example, consider the flow of a bonded request through a virtual
+ * engine. The request is created with a wide engine mask (all engines
+ * that we might execute on). On processing the bond, the request mask
+ * is reduced to one or more engines. If the request is subsequently
+ * bound to a single engine, it will then be constrained to only
+ * execute on that engine and never returned to the virtual engine
+ * after timeslicing away, see __unwind_incomplete_requests(). Thus we
+ * know that if the rq->execution_mask is a single bit, rq->engine
+ * can be a physical engine with the exact corresponding mask.
*/
if (!intel_engine_is_virtual(rq->engine) &&
- !cmpxchg(&rq->engine->request_pool, NULL, rq)) {
- intel_context_put(rq->context);
+ is_power_of_2(rq->execution_mask) &&
+ !cmpxchg(&rq->engine->request_pool, NULL, rq))
return;
- }
-
- intel_context_put(rq->context);
kmem_cache_free(slab_requests, rq);
}
@@ -611,7 +636,7 @@ bool __i915_request_submit(struct i915_request *request)
goto active;
}
- if (unlikely(intel_context_is_banned(request->context)))
+ if (unlikely(!intel_context_is_schedulable(request->context)))
i915_request_set_error_once(request, -EIO);
if (unlikely(fatal_error(request->fence.error)))
@@ -921,22 +946,11 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
}
}
- /*
- * Hold a reference to the intel_context over life of an i915_request.
- * Without this an i915_request can exist after the context has been
- * destroyed (e.g. request retired, context closed, but user space holds
- * a reference to the request from an out fence). In the case of GuC
- * submission + virtual engine, the engine that the request references
- * is also destroyed which can trigger bad pointer dref in fence ops
- * (e.g. i915_fence_get_driver_name). We could likely change these
- * functions to avoid touching the engine but let's just be safe and
- * hold the intel_context reference. In execlist mode the request always
- * eventually points to a physical engine so this isn't an issue.
- */
- rq->context = intel_context_get(ce);
+ rq->context = ce;
rq->engine = ce->engine;
rq->ring = ce->ring;
rq->execution_mask = ce->engine->mask;
+ rq->i915 = ce->engine->i915;
ret = intel_timeline_get_seqno(tl, rq, &seqno);
if (ret)
@@ -1008,7 +1022,6 @@ err_unwind:
GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
err_free:
- intel_context_put(ce);
kmem_cache_free(slab_requests, rq);
err_unreserve:
intel_context_unpin(ce);
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 28b1f9db5487..47041ec68df8 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -196,6 +196,8 @@ struct i915_request {
struct dma_fence fence;
spinlock_t lock;
+ struct drm_i915_private *i915;
+
/**
* Context and ring buffer related to this request
* Contexts are refcounted, so when this request is associated with a
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 0b9b86af6c7f..c229c91071d7 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include "i915_scheduler_types.h"
+#include "i915_tasklet.h"
struct drm_printer;
diff --git a/drivers/gpu/drm/i915/i915_tasklet.h b/drivers/gpu/drm/i915/i915_tasklet.h
new file mode 100644
index 000000000000..5d7069bdf2c0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_tasklet.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __I915_TASKLET_H__
+#define __I915_TASKLET_H__
+
+#include <linux/interrupt.h>
+
+static inline void tasklet_lock(struct tasklet_struct *t)
+{
+ while (!tasklet_trylock(t))
+ cpu_relax();
+}
+
+static inline bool tasklet_is_locked(const struct tasklet_struct *t)
+{
+ return test_bit(TASKLET_STATE_RUN, &t->state);
+}
+
+static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
+{
+ if (!atomic_fetch_inc(&t->count))
+ tasklet_unlock_spin_wait(t);
+}
+
+static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
+{
+ return !atomic_read(&t->count);
+}
+
+static inline bool __tasklet_enable(struct tasklet_struct *t)
+{
+ return atomic_dec_and_test(&t->count);
+}
+
+static inline bool __tasklet_is_scheduled(struct tasklet_struct *t)
+{
+ return test_bit(TASKLET_STATE_SCHED, &t->state);
+}
+
+#endif /* __I915_TASKLET_H__ */
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index a5109548abc0..427de1aaab36 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -104,18 +104,15 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
min_page_size,
&bman_res->blocks,
bman_res->flags);
- mutex_unlock(&bman->lock);
if (unlikely(err))
goto err_free_blocks;
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
u64 original_size = (u64)bman_res->base.num_pages << PAGE_SHIFT;
- mutex_lock(&bman->lock);
drm_buddy_block_trim(mm,
original_size,
&bman_res->blocks);
- mutex_unlock(&bman->lock);
}
if (lpfn <= bman->visible_size) {
@@ -137,11 +134,10 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
}
}
- if (bman_res->used_visible_size) {
- mutex_lock(&bman->lock);
+ if (bman_res->used_visible_size)
bman->visible_avail -= bman_res->used_visible_size;
- mutex_unlock(&bman->lock);
- }
+
+ mutex_unlock(&bman->lock);
if (place->lpfn - place->fpfn == n_pages)
bman_res->base.start = place->fpfn;
@@ -154,7 +150,6 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
return 0;
err_free_blocks:
- mutex_lock(&bman->lock);
drm_buddy_free_list(mm, &bman_res->blocks);
mutex_unlock(&bman->lock);
err_free_res:
@@ -365,6 +360,26 @@ u64 i915_ttm_buddy_man_visible_size(struct ttm_resource_manager *man)
return bman->visible_size;
}
+/**
+ * i915_ttm_buddy_man_avail - Query the avail tracking for the manager.
+ *
+ * @man: The buddy allocator ttm manager
+ * @avail: The total available memory in pages for the entire manager.
+ * @visible_avail: The total available memory in pages for the CPU visible
+ * portion. Note that this will always give the same value as @avail on
+ * configurations that don't have a small BAR.
+ */
+void i915_ttm_buddy_man_avail(struct ttm_resource_manager *man,
+ u64 *avail, u64 *visible_avail)
+{
+ struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
+
+ mutex_lock(&bman->lock);
+ *avail = bman->mm.avail >> PAGE_SHIFT;
+ *visible_avail = bman->visible_avail;
+ mutex_unlock(&bman->lock);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
void i915_ttm_buddy_man_force_visible_size(struct ttm_resource_manager *man,
u64 size)
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h
index 52d9586d242c..d64620712830 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h
@@ -61,6 +61,9 @@ int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man,
u64 i915_ttm_buddy_man_visible_size(struct ttm_resource_manager *man);
+void i915_ttm_buddy_man_avail(struct ttm_resource_manager *man,
+ u64 *avail, u64 *avail_visible);
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
void i915_ttm_buddy_man_force_visible_size(struct ttm_resource_manager *man,
u64 size);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index ea7648e3aa0e..c10d68cdc3ca 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -115,39 +115,6 @@ bool i915_error_injected(void);
#define overflows_type(x, T) \
(sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
-static inline bool
-__check_struct_size(size_t base, size_t arr, size_t count, size_t *size)
-{
- size_t sz;
-
- if (check_mul_overflow(count, arr, &sz))
- return false;
-
- if (check_add_overflow(sz, base, &sz))
- return false;
-
- *size = sz;
- return true;
-}
-
-/**
- * check_struct_size() - Calculate size of structure with trailing array.
- * @p: Pointer to the structure.
- * @member: Name of the array member.
- * @n: Number of elements in the array.
- * @sz: Total size of structure and array
- *
- * Calculates size of memory needed for structure @p followed by an
- * array of @n @member elements, like struct_size() but reports
- * whether it overflowed, and the resultant size in @sz
- *
- * Return: false if the calculation overflowed.
- */
-#define check_struct_size(p, member, n, sz) \
- likely(__check_struct_size(sizeof(*(p)), \
- sizeof(*(p)->member) + __must_be_array((p)->member), \
- n, sz))
-
#define ptr_mask_bits(ptr, n) ({ \
unsigned long __v = (unsigned long)(ptr); \
(typeof(ptr))(__v & -BIT(n)); \
@@ -184,8 +151,6 @@ __check_struct_size(size_t base, size_t arr, size_t count, size_t *size)
#define struct_member(T, member) (((T *)0)->member)
-#define ptr_offset(ptr, member) offsetof(typeof(*(ptr)), member)
-
#define fetch_and_zero(ptr) ({ \
typeof(*ptr) __T = *(ptr); \
*(ptr) = (typeof(*ptr))0; \
@@ -228,11 +193,6 @@ static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \
})
-static inline u64 ptr_to_u64(const void *ptr)
-{
- return (uintptr_t)ptr;
-}
-
#define u64_to_ptr(T, x) ({ \
typecheck(u64, x); \
(T *)(uintptr_t)(x); \
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 04d12f278f57..ef3b04c7e153 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -310,7 +310,7 @@ struct i915_vma_work {
struct i915_address_space *vm;
struct i915_vm_pt_stash stash;
struct i915_vma_resource *vma_res;
- struct drm_i915_gem_object *pinned;
+ struct drm_i915_gem_object *obj;
struct i915_sw_dma_fence_cb cb;
enum i915_cache_level cache_level;
unsigned int flags;
@@ -321,17 +321,25 @@ static void __vma_bind(struct dma_fence_work *work)
struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
struct i915_vma_resource *vma_res = vw->vma_res;
+ /*
+ * We are about the bind the object, which must mean we have already
+ * signaled the work to potentially clear/move the pages underneath. If
+ * something went wrong at that stage then the object should have
+ * unknown_state set, in which case we need to skip the bind.
+ */
+ if (i915_gem_object_has_unknown_state(vw->obj))
+ return;
+
vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
vma_res, vw->cache_level, vw->flags);
-
}
static void __vma_release(struct dma_fence_work *work)
{
struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
- if (vw->pinned)
- i915_gem_object_put(vw->pinned);
+ if (vw->obj)
+ i915_gem_object_put(vw->obj);
i915_vm_free_pt_stash(vw->vm, &vw->stash);
if (vw->vma_res)
@@ -517,14 +525,7 @@ int i915_vma_bind(struct i915_vma *vma,
}
work->base.dma.error = 0; /* enable the queue_work() */
-
- /*
- * If we don't have the refcounted pages list, keep a reference
- * on the object to avoid waiting for the async bind to
- * complete in the object destruction path.
- */
- if (!work->vma_res->bi.pages_rsgt)
- work->pinned = i915_gem_object_get(vma->obj);
+ work->obj = i915_gem_object_get(vma->obj);
} else {
ret = i915_gem_object_wait_moving_fence(vma->obj, true);
if (ret) {
@@ -551,13 +552,6 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
return IOMEM_ERR_PTR(-EINVAL);
- if (!i915_gem_object_is_lmem(vma->obj)) {
- if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
- err = -ENODEV;
- goto err;
- }
- }
-
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
@@ -570,20 +564,33 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
* of pages, that way we can also drop the
* I915_BO_ALLOC_CONTIGUOUS when allocating the object.
*/
- if (i915_gem_object_is_lmem(vma->obj))
+ if (i915_gem_object_is_lmem(vma->obj)) {
ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
vma->obj->base.size);
- else
+ } else if (i915_vma_is_map_and_fenceable(vma)) {
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
vma->node.start,
vma->node.size);
+ } else {
+ ptr = (void __iomem *)
+ i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ goto err;
+ }
+ ptr = page_pack_bits(ptr, 1);
+ }
+
if (ptr == NULL) {
err = -ENOMEM;
goto err;
}
if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
- io_mapping_unmap(ptr);
+ if (page_unmask_bits(ptr))
+ __i915_gem_object_release_map(vma->obj);
+ else
+ io_mapping_unmap(ptr);
ptr = vma->iomap;
}
}
@@ -597,7 +604,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
i915_vma_set_ggtt_write(vma);
/* NB Access through the GTT requires the device to be awake. */
- return ptr;
+ return page_mask_bits(ptr);
err_unpin:
__i915_vma_unpin(vma);
@@ -615,6 +622,8 @@ void i915_vma_unpin_iomap(struct i915_vma *vma)
{
GEM_BUG_ON(vma->iomap == NULL);
+ /* XXX We keep the mapping until __i915_vma_unbind()/evict() */
+
i915_vma_flush_writes(vma);
i915_vma_unpin_fence(vma);
@@ -1767,7 +1776,10 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
if (vma->iomap == NULL)
return;
- io_mapping_unmap(vma->iomap);
+ if (page_unmask_bits(vma->iomap))
+ __i915_gem_object_release_map(vma->obj);
+ else
+ io_mapping_unmap(vma->iomap);
vma->iomap = NULL;
}
@@ -1911,9 +1923,11 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
/* release the fence reg _after_ flushing */
i915_vma_revoke_fence(vma);
- __i915_vma_iounmap(vma);
clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
}
+
+ __i915_vma_iounmap(vma);
+
GEM_BUG_ON(vma->fence);
GEM_BUG_ON(i915_vma_has_userfault(vma));
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 7eb893666595..d98fbbd589aa 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -73,6 +73,7 @@ static const char * const platform_names[] = {
PLATFORM_NAME(XEHPSDV),
PLATFORM_NAME(DG2),
PLATFORM_NAME(PONTEVECCHIO),
+ PLATFORM_NAME(METEORLAKE),
};
#undef PLATFORM_NAME
@@ -189,16 +190,26 @@ static const u16 subplatform_rpl_ids[] = {
static const u16 subplatform_g10_ids[] = {
INTEL_DG2_G10_IDS(0),
+ INTEL_ATS_M150_IDS(0),
};
static const u16 subplatform_g11_ids[] = {
INTEL_DG2_G11_IDS(0),
+ INTEL_ATS_M75_IDS(0),
};
static const u16 subplatform_g12_ids[] = {
INTEL_DG2_G12_IDS(0),
};
+static const u16 subplatform_m_ids[] = {
+ INTEL_MTL_M_IDS(0),
+};
+
+static const u16 subplatform_p_ids[] = {
+ INTEL_MTL_P_IDS(0),
+};
+
static bool find_devid(u16 id, const u16 *p, unsigned int num)
{
for (; num; num--, p++) {
@@ -253,6 +264,12 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915)
} else if (find_devid(devid, subplatform_g12_ids,
ARRAY_SIZE(subplatform_g12_ids))) {
mask = BIT(INTEL_SUBPLATFORM_G12);
+ } else if (find_devid(devid, subplatform_m_ids,
+ ARRAY_SIZE(subplatform_m_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_M);
+ } else if (find_devid(devid, subplatform_p_ids,
+ ARRAY_SIZE(subplatform_p_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_P);
}
GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_MASK);
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index e7d2cf7d65c8..23bf230aa104 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -89,6 +89,7 @@ enum intel_platform {
INTEL_XEHPSDV,
INTEL_DG2,
INTEL_PONTEVECCHIO,
+ INTEL_METEORLAKE,
INTEL_MAX_PLATFORMS
};
@@ -126,6 +127,10 @@ enum intel_platform {
*/
#define INTEL_SUBPLATFORM_N 1
+/* MTL */
+#define INTEL_SUBPLATFORM_M 0
+#define INTEL_SUBPLATFORM_P 1
+
enum intel_ppgtt_type {
INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
@@ -143,6 +148,7 @@ enum intel_ppgtt_type {
func(needs_compact_pt); \
func(gpu_reset_clobbers_display); \
func(has_reset_engine); \
+ func(has_3d_pipeline); \
func(has_4tile); \
func(has_flat_ccs); \
func(has_global_mocs); \
@@ -150,11 +156,14 @@ enum intel_ppgtt_type {
func(has_heci_pxp); \
func(has_heci_gscfi); \
func(has_guc_deprivilege); \
+ func(has_l3_ccs_read); \
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
func(has_logical_ring_elsq); \
- func(has_mslices); \
+ func(has_media_ratio_mode); \
+ func(has_mslice_steering); \
+ func(has_one_eu_per_fuse_bit); \
func(has_pooled_eu); \
func(has_pxp); \
func(has_rc6); \
@@ -210,8 +219,6 @@ struct intel_device_info {
u32 memory_regions; /* regions supported by the HW */
- u32 display_mmio_offset;
-
u8 gt; /* GT number, 0 if undefined */
#define DEFINE_FLAG(name) u8 name:1
@@ -227,27 +234,30 @@ struct intel_device_info {
u8 fbc_mask;
u8 abox_mask;
+ struct {
+ u16 size; /* in blocks */
+ u8 slice_mask;
+ } dbuf;
+
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
- } display;
- struct {
- u16 size; /* in blocks */
- u8 slice_mask;
- } dbuf;
-
- /* Register offsets for the various display pipes and transcoders */
- int pipe_offsets[I915_MAX_TRANSCODERS];
- int trans_offsets[I915_MAX_TRANSCODERS];
- int cursor_offsets[I915_MAX_PIPES];
-
- struct color_luts {
- u32 degamma_lut_size;
- u32 gamma_lut_size;
- u32 degamma_lut_tests;
- u32 gamma_lut_tests;
- } color;
+ /* Global register offset for the display engine */
+ u32 mmio_offset;
+
+ /* Register offsets for the various display pipes and transcoders */
+ u32 pipe_offsets[I915_MAX_TRANSCODERS];
+ u32 trans_offsets[I915_MAX_TRANSCODERS];
+ u32 cursor_offsets[I915_MAX_PIPES];
+
+ struct {
+ u32 degamma_lut_size;
+ u32 gamma_lut_size;
+ u32 degamma_lut_tests;
+ u32 gamma_lut_tests;
+ } color;
+ } display;
};
struct intel_runtime_info {
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 2b9e7833da96..437447119770 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -393,7 +393,7 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
u32 val = 0;
int ret;
- ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 72dac1718f3e..157e166672d7 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -3,10 +3,12 @@
* Copyright © 2020 Intel Corporation
*/
+#include "display/intel_audio_regs.h"
#include "display/intel_dmc_regs.h"
#include "display/vlv_dsi_pll_regs.h"
#include "gt/intel_gt_regs.h"
#include "gvt/gvt.h"
+
#include "i915_drv.h"
#include "i915_pvinfo.h"
#include "i915_reg.h"
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index e38d2db1c3e3..9a4a7fb55582 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -198,8 +198,7 @@ void intel_memory_region_debug(struct intel_memory_region *mr,
if (mr->region_private)
ttm_resource_manager_debug(mr->region_private, printer);
else
- drm_printf(printer, "total:%pa, available:%pa bytes\n",
- &mr->total, &mr->avail);
+ drm_printf(printer, "total:%pa bytes\n", &mr->total);
}
static int intel_memory_region_memtest(struct intel_memory_region *mem,
@@ -242,7 +241,6 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->min_page_size = min_page_size;
mem->ops = ops;
mem->total = size;
- mem->avail = mem->total;
mem->type = type;
mem->instance = instance;
@@ -279,6 +277,20 @@ void intel_memory_region_set_name(struct intel_memory_region *mem,
va_end(ap);
}
+void intel_memory_region_avail(struct intel_memory_region *mr,
+ u64 *avail, u64 *visible_avail)
+{
+ if (mr->type == INTEL_MEMORY_LOCAL) {
+ i915_ttm_buddy_man_avail(mr->region_private,
+ avail, visible_avail);
+ *avail <<= PAGE_SHIFT;
+ *visible_avail <<= PAGE_SHIFT;
+ } else {
+ *avail = mr->total;
+ *visible_avail = mr->total;
+ }
+}
+
void intel_memory_region_destroy(struct intel_memory_region *mem)
{
int ret = 0;
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 3d8378c1b447..2953ed5c3248 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -75,7 +75,6 @@ struct intel_memory_region {
resource_size_t io_size;
resource_size_t min_page_size;
resource_size_t total;
- resource_size_t avail;
u16 type;
u16 instance;
@@ -127,6 +126,9 @@ int intel_memory_region_reserve(struct intel_memory_region *mem,
void intel_memory_region_debug(struct intel_memory_region *mr,
struct drm_printer *printer);
+void intel_memory_region_avail(struct intel_memory_region *mr,
+ u64 *avail, u64 *visible_avail);
+
struct intel_memory_region *
i915_gem_ttm_system_setup(struct drm_i915_private *i915,
u16 type, u16 instance);
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index e2b2bbdc0714..0fec25be146a 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -25,7 +25,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n");
drm_WARN_ON(&dev_priv->drm,
GRAPHICS_VER(dev_priv) != 6 && !IS_IVYBRIDGE(dev_priv));
- /* PantherPoint is CPT compatible */
+ /* PPT is CPT compatible */
return PCH_CPT;
case INTEL_PCH_LPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found LynxPoint PCH\n");
@@ -47,7 +47,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
drm_WARN_ON(&dev_priv->drm,
IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
- /* WildcatPoint is LPT compatible */
+ /* WPT is LPT compatible */
return PCH_LPT;
case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n");
@@ -55,7 +55,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
drm_WARN_ON(&dev_priv->drm,
!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
- /* WildcatPoint is LPT compatible */
+ /* WPT is LPT compatible */
return PCH_LPT;
case INTEL_PCH_SPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n");
@@ -99,14 +99,14 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
!IS_COFFEELAKE(dev_priv) &&
!IS_COMETLAKE(dev_priv) &&
!IS_ROCKETLAKE(dev_priv));
- /* CometPoint is CNP Compatible */
+ /* CMP is CNP compatible */
return PCH_CNP;
case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n");
drm_WARN_ON(&dev_priv->drm,
!IS_COFFEELAKE(dev_priv) &&
!IS_COMETLAKE(dev_priv));
- /* Comet Lake V PCH is based on KBP, which is SPT compatible */
+ /* CMP-V is based on KBP, which is SPT compatible */
return PCH_SPT;
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
case INTEL_PCH_ICP2_DEVICE_ID_TYPE:
@@ -116,7 +116,8 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
case INTEL_PCH_MCC_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n");
drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
- return PCH_MCC;
+ /* MCC is TGP compatible */
+ return PCH_TGP;
case INTEL_PCH_TGP_DEVICE_ID_TYPE:
case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
@@ -127,7 +128,8 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
case INTEL_PCH_JSP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
- return PCH_JSP;
+ /* JSP is ICP compatible */
+ return PCH_ICP;
case INTEL_PCH_ADP_DEVICE_ID_TYPE:
case INTEL_PCH_ADP2_DEVICE_ID_TYPE:
case INTEL_PCH_ADP3_DEVICE_ID_TYPE:
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index b7a8cf409d48..7c8ce9781d1a 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -22,10 +22,8 @@ enum intel_pch {
PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
PCH_CNP, /* Cannon/Comet Lake PCH */
- PCH_ICP, /* Ice Lake PCH */
- PCH_JSP, /* Jasper Lake PCH */
- PCH_MCC, /* Mule Creek Canyon PCH */
- PCH_TGP, /* Tiger Lake PCH */
+ PCH_ICP, /* Ice Lake/Jasper Lake PCH */
+ PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */
PCH_ADP, /* Alder Lake PCH */
/* Fake PCHs, functionality handled on the same PCI dev */
@@ -68,8 +66,6 @@ enum intel_pch {
#define HAS_PCH_DG2(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG2)
#define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP)
#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
-#define HAS_PCH_JSP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_JSP)
-#define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP)
#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c
index ac727546868e..a234d9b4ed14 100644
--- a/drivers/gpu/drm/i915/intel_pcode.c
+++ b/drivers/gpu/drm/i915/intel_pcode.c
@@ -52,14 +52,12 @@ static int gen7_check_mailbox_status(u32 mbox)
}
}
-static int __snb_pcode_rw(struct drm_i915_private *i915, u32 mbox,
+static int __snb_pcode_rw(struct intel_uncore *uncore, u32 mbox,
u32 *val, u32 *val1,
int fast_timeout_us, int slow_timeout_ms,
bool is_read)
{
- struct intel_uncore *uncore = &i915->uncore;
-
- lockdep_assert_held(&i915->sb_lock);
+ lockdep_assert_held(&uncore->i915->sb_lock);
/*
* GEN6_PCODE_* are outside of the forcewake domain, we can use
@@ -88,22 +86,22 @@ static int __snb_pcode_rw(struct drm_i915_private *i915, u32 mbox,
if (is_read && val1)
*val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
- if (GRAPHICS_VER(i915) > 6)
+ if (GRAPHICS_VER(uncore->i915) > 6)
return gen7_check_mailbox_status(mbox);
else
return gen6_check_mailbox_status(mbox);
}
-int snb_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val, u32 *val1)
+int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
{
int err;
- mutex_lock(&i915->sb_lock);
- err = __snb_pcode_rw(i915, mbox, val, val1, 500, 20, true);
- mutex_unlock(&i915->sb_lock);
+ mutex_lock(&uncore->i915->sb_lock);
+ err = __snb_pcode_rw(uncore, mbox, val, val1, 500, 20, true);
+ mutex_unlock(&uncore->i915->sb_lock);
if (err) {
- drm_dbg(&i915->drm,
+ drm_dbg(&uncore->i915->drm,
"warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
mbox, __builtin_return_address(0), err);
}
@@ -111,18 +109,18 @@ int snb_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val, u32 *val1)
return err;
}
-int snb_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val,
+int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
int fast_timeout_us, int slow_timeout_ms)
{
int err;
- mutex_lock(&i915->sb_lock);
- err = __snb_pcode_rw(i915, mbox, &val, NULL,
+ mutex_lock(&uncore->i915->sb_lock);
+ err = __snb_pcode_rw(uncore, mbox, &val, NULL,
fast_timeout_us, slow_timeout_ms, false);
- mutex_unlock(&i915->sb_lock);
+ mutex_unlock(&uncore->i915->sb_lock);
if (err) {
- drm_dbg(&i915->drm,
+ drm_dbg(&uncore->i915->drm,
"warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
val, mbox, __builtin_return_address(0), err);
}
@@ -130,18 +128,18 @@ int snb_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val,
return err;
}
-static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
+static bool skl_pcode_try_request(struct intel_uncore *uncore, u32 mbox,
u32 request, u32 reply_mask, u32 reply,
u32 *status)
{
- *status = __snb_pcode_rw(i915, mbox, &request, NULL, 500, 0, true);
+ *status = __snb_pcode_rw(uncore, mbox, &request, NULL, 500, 0, true);
return (*status == 0) && ((request & reply_mask) == reply);
}
/**
* skl_pcode_request - send PCODE request until acknowledgment
- * @i915: device private
+ * @uncore: uncore
* @mbox: PCODE mailbox ID the request is targeted for
* @request: request ID
* @reply_mask: mask used to check for request acknowledgment
@@ -158,16 +156,16 @@ static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
* Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
* other error as reported by PCODE.
*/
-int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
+int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms)
{
u32 status;
int ret;
- mutex_lock(&i915->sb_lock);
+ mutex_lock(&uncore->i915->sb_lock);
#define COND \
- skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status)
+ skl_pcode_try_request(uncore, mbox, request, reply_mask, reply, &status)
/*
* Prime the PCODE by doing a request first. Normally it guarantees
@@ -193,35 +191,58 @@ int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
* requests, and for any quirks of the PCODE firmware that delays
* the request completion.
*/
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(&uncore->i915->drm,
"PCODE timeout, retrying with preemption disabled\n");
- drm_WARN_ON_ONCE(&i915->drm, timeout_base_ms > 3);
+ drm_WARN_ON_ONCE(&uncore->i915->drm, timeout_base_ms > 3);
preempt_disable();
ret = wait_for_atomic(COND, 50);
preempt_enable();
out:
- mutex_unlock(&i915->sb_lock);
+ mutex_unlock(&uncore->i915->sb_lock);
return status ? status : ret;
#undef COND
}
-int intel_pcode_init(struct drm_i915_private *i915)
+int intel_pcode_init(struct intel_uncore *uncore)
{
- int ret = 0;
+ if (!IS_DGFX(uncore->i915))
+ return 0;
+
+ return skl_pcode_request(uncore, DG1_PCODE_STATUS,
+ DG1_UNCORE_GET_INIT_STATUS,
+ DG1_UNCORE_INIT_STATUS_COMPLETE,
+ DG1_UNCORE_INIT_STATUS_COMPLETE, 180000);
+}
+
+int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val)
+{
+ intel_wakeref_t wakeref;
+ u32 mbox;
+ int err;
- if (!IS_DGFX(i915))
- return ret;
+ mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd)
+ | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1)
+ | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2);
- ret = skl_pcode_request(i915, DG1_PCODE_STATUS,
- DG1_UNCORE_GET_INIT_STATUS,
- DG1_UNCORE_INIT_STATUS_COMPLETE,
- DG1_UNCORE_INIT_STATUS_COMPLETE, 180000);
+ with_intel_runtime_pm(uncore->rpm, wakeref)
+ err = snb_pcode_read(uncore, mbox, val, NULL);
- drm_dbg(&i915->drm, "PCODE init status %d\n", ret);
+ return err;
+}
- if (ret)
- drm_err(&i915->drm, "Pcode did not report uncore initialization completion!\n");
+int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val)
+{
+ intel_wakeref_t wakeref;
+ u32 mbox;
+ int err;
- return ret;
+ mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd)
+ | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1)
+ | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2);
+
+ with_intel_runtime_pm(uncore->rpm, wakeref)
+ err = snb_pcode_write(uncore, mbox, val);
+
+ return err;
}
diff --git a/drivers/gpu/drm/i915/intel_pcode.h b/drivers/gpu/drm/i915/intel_pcode.h
index 0962a17fac48..8d2198e29422 100644
--- a/drivers/gpu/drm/i915/intel_pcode.h
+++ b/drivers/gpu/drm/i915/intel_pcode.h
@@ -8,17 +8,23 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct intel_uncore;
-int snb_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val, u32 *val1);
-int snb_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val,
+int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1);
+int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
int fast_timeout_us, int slow_timeout_ms);
-#define snb_pcode_write(i915, mbox, val) \
- snb_pcode_write_timeout(i915, mbox, val, 500, 0)
+#define snb_pcode_write(uncore, mbox, val) \
+ snb_pcode_write_timeout(uncore, mbox, val, 500, 0)
-int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
+int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms);
-int intel_pcode_init(struct drm_i915_private *i915);
+int intel_pcode_init(struct intel_uncore *uncore);
+
+/*
+ * Helpers for dGfx PCODE mailbox command formatting
+ */
+int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val);
+int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val);
#endif /* _INTEL_PCODE_H */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 5735915facc5..f06babdb3a8c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -30,6 +30,7 @@
#include <linux/pm_runtime.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
@@ -2874,7 +2875,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
- ret = snb_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY,
+ ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY,
&val, NULL);
if (ret) {
@@ -2893,7 +2894,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
/* read the second set of memory latencies[4:7] */
val = 1; /* data0 to be programmed to 1 for second set */
- ret = snb_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY,
+ ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY,
&val, NULL);
if (ret) {
drm_err(&dev_priv->drm,
@@ -3679,7 +3680,7 @@ intel_sagv_block_time(struct drm_i915_private *dev_priv)
u32 val = 0;
int ret;
- ret = snb_pcode_read(dev_priv,
+ ret = snb_pcode_read(&dev_priv->uncore,
GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
&val, NULL);
if (ret) {
@@ -3748,7 +3749,7 @@ static void skl_sagv_enable(struct drm_i915_private *dev_priv)
return;
drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
- ret = snb_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+ ret = snb_pcode_write(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_ENABLE);
/* We don't need to wait for SAGV when enabling */
@@ -3781,7 +3782,7 @@ static void skl_sagv_disable(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
/* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+ ret = skl_pcode_request(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_DISABLE,
GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
1);
@@ -4100,8 +4101,8 @@ static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
{
- return INTEL_INFO(dev_priv)->dbuf.size /
- hweight8(INTEL_INFO(dev_priv)->dbuf.slice_mask);
+ return INTEL_INFO(dev_priv)->display.dbuf.size /
+ hweight8(INTEL_INFO(dev_priv)->display.dbuf.slice_mask);
}
static void
@@ -4120,7 +4121,7 @@ skl_ddb_entry_for_slices(struct drm_i915_private *dev_priv, u8 slice_mask,
ddb->end = fls(slice_mask) * slice_size;
WARN_ON(ddb->start >= ddb->end);
- WARN_ON(ddb->end > INTEL_INFO(dev_priv)->dbuf.size);
+ WARN_ON(ddb->end > INTEL_INFO(dev_priv)->display.dbuf.size);
}
static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
@@ -4368,9 +4369,9 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
skl_ddb_entry_init_from_hw(ddb_y, val);
}
-void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
- struct skl_ddb_entry *ddb,
- struct skl_ddb_entry *ddb_y)
+static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb,
+ struct skl_ddb_entry *ddb_y)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
@@ -4950,7 +4951,7 @@ skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
return data_rate;
}
-const struct skl_wm_level *
+static const struct skl_wm_level *
skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
enum plane_id plane_id,
int level)
@@ -4963,7 +4964,7 @@ skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
return &wm->wm[level];
}
-const struct skl_wm_level *
+static const struct skl_wm_level *
skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
enum plane_id plane_id)
{
@@ -5915,8 +5916,8 @@ void skl_write_cursor_wm(struct intel_plane *plane,
skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
}
-bool skl_wm_level_equals(const struct skl_wm_level *l1,
- const struct skl_wm_level *l2)
+static bool skl_wm_level_equals(const struct skl_wm_level *l1,
+ const struct skl_wm_level *l2)
{
return l1->enable == l2->enable &&
l1->ignore_lines == l2->ignore_lines &&
@@ -6095,7 +6096,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
"Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
old_dbuf_state->enabled_slices,
new_dbuf_state->enabled_slices,
- INTEL_INFO(dev_priv)->dbuf.slice_mask,
+ INTEL_INFO(dev_priv)->display.dbuf.slice_mask,
str_yes_no(old_dbuf_state->joined_mbus),
str_yes_no(new_dbuf_state->joined_mbus));
}
@@ -6488,8 +6489,8 @@ static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
}
-void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
- struct skl_pipe_wm *out)
+static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
+ struct skl_pipe_wm *out)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -7166,6 +7167,126 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
!(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}
+void intel_wm_state_verify(struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct skl_hw_state {
+ struct skl_ddb_entry ddb[I915_MAX_PLANES];
+ struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
+ struct skl_pipe_wm wm;
+ } *hw;
+ const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
+ int level, max_level = ilk_wm_max_level(dev_priv);
+ struct intel_plane *plane;
+ u8 hw_enabled_slices;
+
+ if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
+ return;
+
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return;
+
+ skl_pipe_wm_get_hw_state(crtc, &hw->wm);
+
+ skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
+
+ hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
+
+ if (DISPLAY_VER(dev_priv) >= 11 &&
+ hw_enabled_slices != dev_priv->dbuf.enabled_slices)
+ drm_err(&dev_priv->drm,
+ "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
+ dev_priv->dbuf.enabled_slices,
+ hw_enabled_slices);
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ const struct skl_wm_level *hw_wm_level, *sw_wm_level;
+
+ /* Watermarks */
+ for (level = 0; level <= max_level; level++) {
+ hw_wm_level = &hw->wm.planes[plane->id].wm[level];
+ sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
+
+ if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
+ continue;
+
+ drm_err(&dev_priv->drm,
+ "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name, level,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
+ sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
+
+ if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&dev_priv->drm,
+ "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
+
+ if (HAS_HW_SAGV_WM(dev_priv) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&dev_priv->drm,
+ "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
+
+ if (HAS_HW_SAGV_WM(dev_priv) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&dev_priv->drm,
+ "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ /* DDB */
+ hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
+ sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
+
+ if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+ drm_err(&dev_priv->drm,
+ "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
+ plane->base.base.id, plane->base.name,
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
+ }
+ }
+
+ kfree(hw);
+}
+
void intel_enable_ipc(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -7513,10 +7634,9 @@ static void xehpsdv_init_clock_gating(struct drm_i915_private *dev_priv)
static void dg2_init_clock_gating(struct drm_i915_private *i915)
{
- /* Wa_22010954014:dg2_g10 */
- if (IS_DG2_G10(i915))
- intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
- SGSI_SIDECLK_DIS);
+ /* Wa_22010954014:dg2 */
+ intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
+ SGSI_SIDECLK_DIS);
/*
* Wa_14010733611:dg2_g10
@@ -7527,6 +7647,17 @@ static void dg2_init_clock_gating(struct drm_i915_private *i915)
SGR_DIS | SGGI_DIS);
}
+static void pvc_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+ /* Wa_14012385139:pvc */
+ if (IS_PVC_BD_STEP(dev_priv, STEP_A0, STEP_B0))
+ intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS);
+
+ /* Wa_22010954014:pvc */
+ if (IS_PVC_BD_STEP(dev_priv, STEP_A0, STEP_B0))
+ intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS);
+}
+
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (!HAS_PCH_CNP(dev_priv))
@@ -7943,6 +8074,7 @@ static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs =
.init_clock_gating = platform##_init_clock_gating, \
}
+CG_FUNCS(pvc);
CG_FUNCS(dg2);
CG_FUNCS(xehpsdv);
CG_FUNCS(adlp);
@@ -7981,7 +8113,9 @@ CG_FUNCS(nop);
*/
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_DG2(dev_priv))
+ if (IS_PONTEVECCHIO(dev_priv))
+ dev_priv->clock_gating_funcs = &pvc_clock_gating_funcs;
+ else if (IS_DG2(dev_priv))
dev_priv->clock_gating_funcs = &dg2_clock_gating_funcs;
else if (IS_XEHPSDV(dev_priv))
dev_priv->clock_gating_funcs = &xehpsdv_clock_gating_funcs;
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 50604cf7398c..945503ae493e 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -35,15 +35,12 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void intel_wm_state_verify(struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state);
u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv);
-void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
- struct skl_ddb_entry *ddb_y,
- struct skl_ddb_entry *ddb_uv);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry);
-void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
- struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
void skl_wm_sanitize(struct drm_i915_private *dev_priv);
@@ -51,13 +48,6 @@ bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
const struct intel_bw_state *bw_state);
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
void intel_sagv_post_plane_update(struct intel_atomic_state *state);
-const struct skl_wm_level *skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
- enum plane_id plane_id,
- int level);
-const struct skl_wm_level *skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
- enum plane_id plane_id);
-bool skl_wm_level_equals(const struct skl_wm_level *l1,
- const struct skl_wm_level *l2);
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
const struct skl_ddb_entry *entries,
int num_entries, int ignore_idx);
diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c
index 74e8e4680028..42b3133d8387 100644
--- a/drivers/gpu/drm/i915/intel_step.c
+++ b/drivers/gpu/drm/i915/intel_step.c
@@ -135,6 +135,8 @@ static const struct intel_step_info adlp_n_revids[] = {
[0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_D0 },
};
+static void pvc_step_init(struct drm_i915_private *i915, int pci_revid);
+
void intel_step_init(struct drm_i915_private *i915)
{
const struct intel_step_info *revids = NULL;
@@ -142,7 +144,10 @@ void intel_step_init(struct drm_i915_private *i915)
int revid = INTEL_REVID(i915);
struct intel_step_info step = {};
- if (IS_DG2_G10(i915)) {
+ if (IS_PONTEVECCHIO(i915)) {
+ pvc_step_init(i915, revid);
+ return;
+ } else if (IS_DG2_G10(i915)) {
revids = dg2_g10_revid_step_tbl;
size = ARRAY_SIZE(dg2_g10_revid_step_tbl);
} else if (IS_DG2_G11(i915)) {
@@ -235,6 +240,69 @@ void intel_step_init(struct drm_i915_private *i915)
RUNTIME_INFO(i915)->step = step;
}
+#define PVC_BD_REVID GENMASK(5, 3)
+#define PVC_CT_REVID GENMASK(2, 0)
+
+static const int pvc_bd_subids[] = {
+ [0x0] = STEP_A0,
+ [0x3] = STEP_B0,
+ [0x4] = STEP_B1,
+ [0x5] = STEP_B3,
+};
+
+static const int pvc_ct_subids[] = {
+ [0x3] = STEP_A0,
+ [0x5] = STEP_B0,
+ [0x6] = STEP_B1,
+ [0x7] = STEP_C0,
+};
+
+static int
+pvc_step_lookup(struct drm_i915_private *i915, const char *type,
+ const int *table, int size, int subid)
+{
+ if (subid < size && table[subid] != STEP_NONE)
+ return table[subid];
+
+ drm_warn(&i915->drm, "Unknown %s id 0x%02x\n", type, subid);
+
+ /*
+ * As on other platforms, try to use the next higher ID if we land on a
+ * gap in the table.
+ */
+ while (subid < size && table[subid] == STEP_NONE)
+ subid++;
+
+ if (subid < size) {
+ drm_dbg(&i915->drm, "Using steppings for %s id 0x%02x\n",
+ type, subid);
+ return table[subid];
+ }
+
+ drm_dbg(&i915->drm, "Using future steppings\n");
+ return STEP_FUTURE;
+}
+
+/*
+ * PVC needs special handling since we don't lookup the
+ * revid in a table, but rather specific bitfields within
+ * the revid for various components.
+ */
+static void pvc_step_init(struct drm_i915_private *i915, int pci_revid)
+{
+ int ct_subid, bd_subid;
+
+ bd_subid = FIELD_GET(PVC_BD_REVID, pci_revid);
+ ct_subid = FIELD_GET(PVC_CT_REVID, pci_revid);
+
+ RUNTIME_INFO(i915)->step.basedie_step =
+ pvc_step_lookup(i915, "Base Die", pvc_bd_subids,
+ ARRAY_SIZE(pvc_bd_subids), bd_subid);
+ RUNTIME_INFO(i915)->step.graphics_step =
+ pvc_step_lookup(i915, "Compute Tile", pvc_ct_subids,
+ ARRAY_SIZE(pvc_ct_subids), ct_subid);
+}
+
#define STEP_NAME_CASE(name) \
case STEP_##name: \
return #name;
diff --git a/drivers/gpu/drm/i915/intel_step.h b/drivers/gpu/drm/i915/intel_step.h
index d71a99bd5179..a6b12bfa9744 100644
--- a/drivers/gpu/drm/i915/intel_step.h
+++ b/drivers/gpu/drm/i915/intel_step.h
@@ -11,9 +11,10 @@
struct drm_i915_private;
struct intel_step_info {
- u8 graphics_step;
+ u8 graphics_step; /* Represents the compute tile on Xe_HPC */
u8 display_step;
u8 media_step;
+ u8 basedie_step;
};
#define STEP_ENUM_VAL(name) STEP_##name,
@@ -25,6 +26,7 @@ struct intel_step_info {
func(B0) \
func(B1) \
func(B2) \
+ func(B3) \
func(C0) \
func(C1) \
func(D0) \
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 83517a703eb6..a852c471d1b3 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -938,36 +938,32 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset)
return entry->domains;
}
-#define GEN_FW_RANGE(s, e, d) \
- { .start = (s), .end = (e), .domains = (d) }
-
-/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
-static const struct intel_forcewake_range __vlv_fw_ranges[] = {
- GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
- GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
- GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
-};
-
-#define __fwtable_reg_read_fw_domains(uncore, offset) \
-({ \
- enum forcewake_domains __fwd = 0; \
- if (NEEDS_FORCE_WAKE((offset))) \
- __fwd = find_fw_domain(uncore, offset); \
- __fwd; \
-})
+/*
+ * Shadowed register tables describe special register ranges that i915 is
+ * allowed to write to without acquiring forcewake. If these registers' power
+ * wells are down, the hardware will save values written by i915 to a shadow
+ * copy and automatically transfer them into the real register the next time
+ * the power well is woken up. Shadowing only applies to writes; forcewake
+ * must still be acquired when reading from registers in these ranges.
+ *
+ * The documentation for shadowed registers is somewhat spotty on older
+ * platforms. However missing registers from these lists is non-fatal; it just
+ * means we'll wake up the hardware for some register accesses where we didn't
+ * really need to.
+ *
+ * The ranges listed in these tables must be sorted by offset.
+ *
+ * When adding new tables here, please also add them to
+ * intel_shadow_table_check() in selftests/intel_uncore.c so that they will be
+ * scanned for obvious mistakes or typos by the selftests.
+ */
-/* *Must* be sorted by offset! See intel_shadow_table_check(). */
static const struct i915_range gen8_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0xA008, .end = 0xA00C },
{ .start = 0x12030, .end = 0x12030 },
{ .start = 0x1a030, .end = 0x1a030 },
{ .start = 0x22030, .end = 0x22030 },
- /* TODO: Other registers are not yet used */
};
static const struct i915_range gen11_shadowed_regs[] = {
@@ -1080,6 +1076,45 @@ static const struct i915_range dg2_shadowed_regs[] = {
{ .start = 0x1F8510, .end = 0x1F8550 },
};
+static const struct i915_range pvc_shadowed_regs[] = {
+ { .start = 0x2030, .end = 0x2030 },
+ { .start = 0x2510, .end = 0x2550 },
+ { .start = 0xA008, .end = 0xA00C },
+ { .start = 0xA188, .end = 0xA188 },
+ { .start = 0xA278, .end = 0xA278 },
+ { .start = 0xA540, .end = 0xA56C },
+ { .start = 0xC4C8, .end = 0xC4C8 },
+ { .start = 0xC4E0, .end = 0xC4E0 },
+ { .start = 0xC600, .end = 0xC600 },
+ { .start = 0xC658, .end = 0xC658 },
+ { .start = 0x22030, .end = 0x22030 },
+ { .start = 0x22510, .end = 0x22550 },
+ { .start = 0x1C0030, .end = 0x1C0030 },
+ { .start = 0x1C0510, .end = 0x1C0550 },
+ { .start = 0x1C4030, .end = 0x1C4030 },
+ { .start = 0x1C4510, .end = 0x1C4550 },
+ { .start = 0x1C8030, .end = 0x1C8030 },
+ { .start = 0x1C8510, .end = 0x1C8550 },
+ { .start = 0x1D0030, .end = 0x1D0030 },
+ { .start = 0x1D0510, .end = 0x1D0550 },
+ { .start = 0x1D4030, .end = 0x1D4030 },
+ { .start = 0x1D4510, .end = 0x1D4550 },
+ { .start = 0x1D8030, .end = 0x1D8030 },
+ { .start = 0x1D8510, .end = 0x1D8550 },
+ { .start = 0x1E0030, .end = 0x1E0030 },
+ { .start = 0x1E0510, .end = 0x1E0550 },
+ { .start = 0x1E4030, .end = 0x1E4030 },
+ { .start = 0x1E4510, .end = 0x1E4550 },
+ { .start = 0x1E8030, .end = 0x1E8030 },
+ { .start = 0x1E8510, .end = 0x1E8550 },
+ { .start = 0x1F0030, .end = 0x1F0030 },
+ { .start = 0x1F0510, .end = 0x1F0550 },
+ { .start = 0x1F4030, .end = 0x1F4030 },
+ { .start = 0x1F4510, .end = 0x1F4550 },
+ { .start = 0x1F8030, .end = 0x1F8030 },
+ { .start = 0x1F8510, .end = 0x1F8550 },
+};
+
static int mmio_range_cmp(u32 key, const struct i915_range *range)
{
if (key < range->start)
@@ -1107,11 +1142,70 @@ gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
return FORCEWAKE_RENDER;
}
+#define __fwtable_reg_read_fw_domains(uncore, offset) \
+({ \
+ enum forcewake_domains __fwd = 0; \
+ if (NEEDS_FORCE_WAKE((offset))) \
+ __fwd = find_fw_domain(uncore, offset); \
+ __fwd; \
+})
+
+#define __fwtable_reg_write_fw_domains(uncore, offset) \
+({ \
+ enum forcewake_domains __fwd = 0; \
+ const u32 __offset = (offset); \
+ if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
+ __fwd = find_fw_domain(uncore, __offset); \
+ __fwd; \
+})
+
+#define GEN_FW_RANGE(s, e, d) \
+ { .start = (s), .end = (e), .domains = (d) }
+
+/*
+ * All platforms' forcewake tables below must be sorted by offset ranges.
+ * Furthermore, new forcewake tables added should be "watertight" and have
+ * no gaps between ranges.
+ *
+ * When there are multiple consecutive ranges listed in the bspec with
+ * the same forcewake domain, it is customary to combine them into a single
+ * row in the tables below to keep the tables small and lookups fast.
+ * Likewise, reserved/unused ranges may be combined with the preceding and/or
+ * following ranges since the driver will never be making MMIO accesses in
+ * those ranges.
+ *
+ * For example, if the bspec were to list:
+ *
+ * ...
+ * 0x1000 - 0x1fff: GT
+ * 0x2000 - 0x2cff: GT
+ * 0x2d00 - 0x2fff: unused/reserved
+ * 0x3000 - 0xffff: GT
+ * ...
+ *
+ * these could all be represented by a single line in the code:
+ *
+ * GEN_FW_RANGE(0x1000, 0xffff, FORCEWAKE_GT)
+ *
+ * When adding new forcewake tables here, please also add them to
+ * intel_uncore_mock_selftests in selftests/intel_uncore.c so that they will be
+ * scanned for obvious mistakes or typos by the selftests.
+ */
+
static const struct intel_forcewake_range __gen6_fw_ranges[] = {
GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER),
};
-/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __vlv_fw_ranges[] = {
+ GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
+ GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
+};
+
static const struct intel_forcewake_range __chv_fw_ranges[] = {
GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
@@ -1131,16 +1225,6 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = {
GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
};
-#define __fwtable_reg_write_fw_domains(uncore, offset) \
-({ \
- enum forcewake_domains __fwd = 0; \
- const u32 __offset = (offset); \
- if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
- __fwd = find_fw_domain(uncore, __offset); \
- __fwd; \
-})
-
-/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
static const struct intel_forcewake_range __gen9_fw_ranges[] = {
GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
@@ -1176,7 +1260,6 @@ static const struct intel_forcewake_range __gen9_fw_ranges[] = {
GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
};
-/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
static const struct intel_forcewake_range __gen11_fw_ranges[] = {
GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
@@ -1215,14 +1298,6 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = {
GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
};
-/*
- * *Must* be sorted by offset ranges! See intel_fw_table_check().
- *
- * Note that the spec lists several reserved/unused ranges that don't
- * actually contain any registers. In the table below we'll combine those
- * reserved ranges with either the preceding or following range to keep the
- * table small and lookups fast.
- */
static const struct intel_forcewake_range __gen12_fw_ranges[] = {
GEN_FW_RANGE(0x0, 0x1fff, 0), /*
0x0 - 0xaff: reserved
@@ -1327,8 +1402,6 @@ static const struct intel_forcewake_range __gen12_fw_ranges[] = {
/*
* Graphics IP version 12.55 brings a slight change to the 0xd800 range,
* switching it from the GT domain to the render domain.
- *
- * *Must* be sorted by offset ranges! See intel_fw_table_check().
*/
#define XEHP_FWRANGES(FW_RANGE_D800) \
GEN_FW_RANGE(0x0, 0x1fff, 0), /* \
@@ -1490,6 +1563,103 @@ static const struct intel_forcewake_range __dg2_fw_ranges[] = {
XEHP_FWRANGES(FORCEWAKE_RENDER)
};
+static const struct intel_forcewake_range __pvc_fw_ranges[] = {
+ GEN_FW_RANGE(0x0, 0xaff, 0),
+ GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0xc00, 0xfff, 0),
+ GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x4000, 0x813f, FORCEWAKE_GT), /*
+ 0x4000 - 0x4aff: gt
+ 0x4b00 - 0x4fff: reserved
+ 0x5000 - 0x51ff: gt
+ 0x5200 - 0x52ff: reserved
+ 0x5300 - 0x53ff: gt
+ 0x5400 - 0x7fff: reserved
+ 0x8000 - 0x813f: gt */
+ GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8180, 0x81ff, 0),
+ GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /*
+ 0x8200 - 0x82ff: gt
+ 0x8300 - 0x84ff: reserved
+ 0x8500 - 0x887f: gt
+ 0x8880 - 0x8a7f: reserved
+ 0x8a80 - 0x8aff: gt
+ 0x8b00 - 0x8fff: reserved
+ 0x9000 - 0x947f: gt
+ 0x9480 - 0x94cf: reserved */
+ GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x9560, 0x967f, 0), /*
+ 0x9560 - 0x95ff: always on
+ 0x9600 - 0x967f: reserved */
+ GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
+ 0x9680 - 0x96ff: render
+ 0x9700 - 0x97ff: reserved */
+ GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
+ 0x9800 - 0xb4ff: gt
+ 0xb500 - 0xbfff: reserved
+ 0xc000 - 0xcfff: gt */
+ GEN_FW_RANGE(0xd000, 0xd3ff, 0),
+ GEN_FW_RANGE(0xd400, 0xdbff, FORCEWAKE_GT),
+ GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
+ 0xdd00 - 0xddff: gt
+ 0xde00 - 0xde7f: reserved */
+ GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
+ 0xde80 - 0xdeff: render
+ 0xdf00 - 0xe1ff: reserved
+ 0xe200 - 0xe7ff: render
+ 0xe800 - 0xe8ff: reserved */
+ GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT), /*
+ 0xe900 - 0xe9ff: gt
+ 0xea00 - 0xebff: reserved
+ 0xec00 - 0xffff: gt
+ 0x10000 - 0x11fff: reserved */
+ GEN_FW_RANGE(0x12000, 0x12fff, 0), /*
+ 0x12000 - 0x127ff: always on
+ 0x12800 - 0x12fff: reserved */
+ GEN_FW_RANGE(0x13000, 0x23fff, FORCEWAKE_GT), /*
+ 0x13000 - 0x135ff: gt
+ 0x13600 - 0x147ff: reserved
+ 0x14800 - 0x153ff: gt
+ 0x15400 - 0x19fff: reserved
+ 0x1a000 - 0x1ffff: gt
+ 0x20000 - 0x21fff: reserved
+ 0x22000 - 0x23fff: gt */
+ GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
+ 24000 - 0x2407f: always on
+ 24080 - 0x2417f: reserved */
+ GEN_FW_RANGE(0x24180, 0x3ffff, FORCEWAKE_GT), /*
+ 0x24180 - 0x241ff: gt
+ 0x24200 - 0x251ff: reserved
+ 0x25200 - 0x252ff: gt
+ 0x25300 - 0x25fff: reserved
+ 0x26000 - 0x27fff: gt
+ 0x28000 - 0x2ffff: reserved
+ 0x30000 - 0x3ffff: gt */
+ GEN_FW_RANGE(0x40000, 0x1bffff, 0),
+ GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
+ 0x1c0000 - 0x1c2bff: VD0
+ 0x1c2c00 - 0x1c2cff: reserved
+ 0x1c2d00 - 0x1c2dff: VD0
+ 0x1c2e00 - 0x1c3eff: reserved
+ 0x1c3f00 - 0x1c3fff: VD0 */
+ GEN_FW_RANGE(0x1c4000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX1), /*
+ 0x1c4000 - 0x1c6aff: VD1
+ 0x1c6b00 - 0x1c7eff: reserved
+ 0x1c7f00 - 0x1c7fff: VD1
+ 0x1c8000 - 0x1cffff: reserved */
+ GEN_FW_RANGE(0x1d0000, 0x23ffff, FORCEWAKE_MEDIA_VDBOX2), /*
+ 0x1d0000 - 0x1d2aff: VD2
+ 0x1d2b00 - 0x1d3eff: reserved
+ 0x1d3f00 - 0x1d3fff: VD2
+ 0x1d4000 - 0x23ffff: reserved */
+ GEN_FW_RANGE(0x240000, 0x3dffff, 0),
+ GEN_FW_RANGE(0x3e0000, 0x3effff, FORCEWAKE_GT),
+};
+
static void
ilk_dummy_write(struct intel_uncore *uncore)
{
@@ -2125,7 +2295,11 @@ static int uncore_forcewake_init(struct intel_uncore *uncore)
ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60)) {
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __pvc_fw_ranges);
+ ASSIGN_SHADOW_TABLE(uncore, pvc_shadowed_regs);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
+ } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
@@ -2470,118 +2644,6 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
return fw_domains;
}
-/**
- * uncore_rw_with_mcr_steering_fw - Access a register after programming
- * the MCR selector register.
- * @uncore: pointer to struct intel_uncore
- * @reg: register being accessed
- * @rw_flag: FW_REG_READ for read access or FW_REG_WRITE for write access
- * @slice: slice number (ignored for multi-cast write)
- * @subslice: sub-slice number (ignored for multi-cast write)
- * @value: register value to be written (ignored for read)
- *
- * Return: 0 for write access. register value for read access.
- *
- * Caller needs to make sure the relevant forcewake wells are up.
- */
-static u32 uncore_rw_with_mcr_steering_fw(struct intel_uncore *uncore,
- i915_reg_t reg, u8 rw_flag,
- int slice, int subslice, u32 value)
-{
- u32 mcr_mask, mcr_ss, mcr, old_mcr, val = 0;
-
- lockdep_assert_held(&uncore->lock);
-
- if (GRAPHICS_VER(uncore->i915) >= 11) {
- mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
- mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
-
- /*
- * Wa_22013088509
- *
- * The setting of the multicast/unicast bit usually wouldn't
- * matter for read operations (which always return the value
- * from a single register instance regardless of how that bit
- * is set), but some platforms have a workaround requiring us
- * to remain in multicast mode for reads. There's no real
- * downside to this, so we'll just go ahead and do so on all
- * platforms; we'll only clear the multicast bit from the mask
- * when exlicitly doing a write operation.
- */
- if (rw_flag == FW_REG_WRITE)
- mcr_mask |= GEN11_MCR_MULTICAST;
- } else {
- mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
- mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
- }
-
- old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
-
- mcr &= ~mcr_mask;
- mcr |= mcr_ss;
- intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
-
- if (rw_flag == FW_REG_READ)
- val = intel_uncore_read_fw(uncore, reg);
- else
- intel_uncore_write_fw(uncore, reg, value);
-
- mcr &= ~mcr_mask;
- mcr |= old_mcr & mcr_mask;
-
- intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
-
- return val;
-}
-
-static u32 uncore_rw_with_mcr_steering(struct intel_uncore *uncore,
- i915_reg_t reg, u8 rw_flag,
- int slice, int subslice,
- u32 value)
-{
- enum forcewake_domains fw_domains;
- u32 val;
-
- fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
- rw_flag);
- fw_domains |= intel_uncore_forcewake_for_reg(uncore,
- GEN8_MCR_SELECTOR,
- FW_REG_READ | FW_REG_WRITE);
-
- spin_lock_irq(&uncore->lock);
- intel_uncore_forcewake_get__locked(uncore, fw_domains);
-
- val = uncore_rw_with_mcr_steering_fw(uncore, reg, rw_flag,
- slice, subslice, value);
-
- intel_uncore_forcewake_put__locked(uncore, fw_domains);
- spin_unlock_irq(&uncore->lock);
-
- return val;
-}
-
-u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
- i915_reg_t reg, int slice, int subslice)
-{
- return uncore_rw_with_mcr_steering_fw(uncore, reg, FW_REG_READ,
- slice, subslice, 0);
-}
-
-u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
- i915_reg_t reg, int slice, int subslice)
-{
- return uncore_rw_with_mcr_steering(uncore, reg, FW_REG_READ,
- slice, subslice, 0);
-}
-
-void intel_uncore_write_with_mcr_steering(struct intel_uncore *uncore,
- i915_reg_t reg, u32 value,
- int slice, int subslice)
-{
- uncore_rw_with_mcr_steering(uncore, reg, FW_REG_WRITE,
- slice, subslice, value);
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_uncore.c"
#include "selftests/intel_uncore.c"
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 52fe3d89dd2b..b1fa912a65e7 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -210,14 +210,6 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore)
return uncore->flags & UNCORE_HAS_FIFO;
}
-u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
- i915_reg_t reg,
- int slice, int subslice);
-u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
- i915_reg_t reg, int slice, int subslice);
-void intel_uncore_write_with_mcr_steering(struct intel_uncore *uncore,
- i915_reg_t reg, u32 value,
- int slice, int subslice);
void
intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
void intel_uncore_init_early(struct intel_uncore *uncore,
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
index c9da1015eb42..e888b5124a07 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
@@ -9,9 +9,10 @@
#include <drm/drm_print.h>
#include "gt/intel_gt_debugfs.h"
-#include "pxp/intel_pxp.h"
-#include "pxp/intel_pxp_irq.h"
#include "i915_drv.h"
+#include "intel_pxp.h"
+#include "intel_pxp_debugfs.h"
+#include "intel_pxp_irq.h"
static int pxp_info_show(struct seq_file *m, void *data)
{
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index cdd196783535..fda9bb79c049 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -69,6 +69,7 @@ static int intel_shadow_table_check(void)
{ gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
{ gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
{ dg2_shadowed_regs, ARRAY_SIZE(dg2_shadowed_regs) },
+ { pvc_shadowed_regs, ARRAY_SIZE(pvc_shadowed_regs) },
};
const struct i915_range *range;
unsigned int i, j;
@@ -115,6 +116,7 @@ int intel_uncore_mock_selftests(void)
{ __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
{ __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true },
{ __xehp_fw_ranges, ARRAY_SIZE(__xehp_fw_ranges), true },
+ { __pvc_fw_ranges, ARRAY_SIZE(__pvc_fw_ranges), true },
};
int err, i;